]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5e: Protect unready flows with dedicated lock
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
80f09dfc 40#include "rdma.h"
e52c2802
PB
41#include "en.h"
42#include "fs_core.h"
ac004b83 43#include "lib/devcom.h"
a3888f33 44#include "lib/eq.h"
69697b6e 45
cd7e4186
BW
46/* There are two match-all miss flows, one for unicast dst mac and
47 * one for multicast.
48 */
49#define MLX5_ESW_MISS_FLOWS (2)
50
e52c2802
PB
51#define fdb_prio_table(esw, chain, prio, level) \
52 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
53
c9b99abc
BW
54#define UPLINK_REP_INDEX 0
55
879c8f84
BW
56static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
57 u16 vport_num)
58{
02f3afd9 59 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
60
61 WARN_ON(idx > esw->total_vports - 1);
62 return &esw->offloads.vport_reps[idx];
63}
64
e52c2802
PB
65static struct mlx5_flow_table *
66esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
67static void
68esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
69
70bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
71{
72 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
73}
74
75u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
76{
77 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
78 return FDB_MAX_CHAIN;
79
80 return 0;
81}
82
83u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
84{
85 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
86 return FDB_MAX_PRIO;
87
bf07aa73 88 return 1;
e52c2802
PB
89}
90
c01cfd0f
JL
91static void
92mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
93 struct mlx5_flow_spec *spec,
94 struct mlx5_esw_flow_attr *attr)
95{
96 void *misc2;
97 void *misc;
98
99 /* Use metadata matching because vport is not represented by single
100 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
101 */
102 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
103 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
104 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
105 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
106 attr->in_rep->vport));
107
108 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
109 MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
110
111 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
112 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
113 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
114 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
115 } else {
116 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
117 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
118
119 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
120 MLX5_SET(fte_match_set_misc, misc,
121 source_eswitch_owner_vhca_id,
122 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
123
124 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
125 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
126 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
127 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
128 source_eswitch_owner_vhca_id);
129
130 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
131 }
132
133 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
134 attr->in_rep->vport == MLX5_VPORT_UPLINK)
135 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
136}
137
74491de9 138struct mlx5_flow_handle *
3d80d1a2
OG
139mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
140 struct mlx5_flow_spec *spec,
776b12b6 141 struct mlx5_esw_flow_attr *attr)
3d80d1a2 142{
592d3651 143 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 144 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 145 bool split = !!(attr->split_count);
74491de9 146 struct mlx5_flow_handle *rule;
e52c2802 147 struct mlx5_flow_table *fdb;
592d3651 148 int j, i = 0;
3d80d1a2 149
f6455de0 150 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
151 return ERR_PTR(-EOPNOTSUPP);
152
6acfbf38
OG
153 flow_act.action = attr->action;
154 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 155 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
156 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
157 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
158 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
159 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
160 flow_act.vlan[0].vid = attr->vlan_vid[0];
161 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
162 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
163 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
164 flow_act.vlan[1].vid = attr->vlan_vid[1];
165 flow_act.vlan[1].prio = attr->vlan_prio[1];
166 }
6acfbf38 167 }
776b12b6 168
66958ed9 169 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e52c2802
PB
170 if (attr->dest_chain) {
171 struct mlx5_flow_table *ft;
172
173 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
174 if (IS_ERR(ft)) {
175 rule = ERR_CAST(ft);
176 goto err_create_goto_table;
177 }
178
179 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
180 dest[i].ft = ft;
592d3651 181 i++;
e52c2802 182 } else {
e85e02ba 183 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 184 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 185 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 186 dest[i].vport.vhca_id =
df65a573 187 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
188 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
189 dest[i].vport.flags |=
190 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
191 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
192 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
8c4dc42b 193 flow_act.reformat_id = attr->dests[j].encap_id;
a18e879d 194 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
8c4dc42b
EB
195 dest[i].vport.reformat_id =
196 attr->dests[j].encap_id;
f493f155 197 }
e52c2802
PB
198 i++;
199 }
56e858df 200 }
e37a79e5 201 }
66958ed9 202 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 203 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 204 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 205 i++;
3d80d1a2
OG
206 }
207
c01cfd0f 208 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
3d80d1a2 209
6363651d
OG
210 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
211 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
212 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
213 if (attr->match_level != MLX5_MATCH_NONE)
214 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
215 } else if (attr->match_level != MLX5_MATCH_NONE) {
216 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
217 }
3d80d1a2 218
aa24670e 219 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
d7e75a32
OG
220 flow_act.modify_id = attr->mod_hdr_id;
221
e85e02ba 222 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
223 if (IS_ERR(fdb)) {
224 rule = ERR_CAST(fdb);
225 goto err_esw_get;
226 }
227
10caabda
OS
228 if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
229 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
230 &flow_act, dest, i);
231 else
232 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 233 if (IS_ERR(rule))
e52c2802 234 goto err_add_rule;
375f51e2
RD
235 else
236 esw->offloads.num_flows++;
3d80d1a2 237
e52c2802
PB
238 return rule;
239
240err_add_rule:
e85e02ba 241 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
242err_esw_get:
243 if (attr->dest_chain)
244 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
245err_create_goto_table:
aa0cbbae 246 return rule;
3d80d1a2
OG
247}
248
e4ad91f2
CM
249struct mlx5_flow_handle *
250mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
251 struct mlx5_flow_spec *spec,
252 struct mlx5_esw_flow_attr *attr)
253{
254 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 255 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
256 struct mlx5_flow_table *fast_fdb;
257 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 258 struct mlx5_flow_handle *rule;
e4ad91f2
CM
259 int i;
260
e52c2802
PB
261 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
262 if (IS_ERR(fast_fdb)) {
263 rule = ERR_CAST(fast_fdb);
264 goto err_get_fast;
265 }
266
267 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
268 if (IS_ERR(fwd_fdb)) {
269 rule = ERR_CAST(fwd_fdb);
270 goto err_get_fwd;
271 }
272
e4ad91f2 273 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 274 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 275 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 276 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 277 dest[i].vport.vhca_id =
df65a573 278 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
279 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
280 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
281 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
282 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
8c4dc42b 283 dest[i].vport.reformat_id = attr->dests[i].encap_id;
1cc26d74 284 }
e4ad91f2
CM
285 }
286 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 287 dest[i].ft = fwd_fdb,
e4ad91f2
CM
288 i++;
289
c01cfd0f 290 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
e4ad91f2 291
4f5d1bea 292 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
c01cfd0f
JL
293 if (attr->match_level != MLX5_MATCH_NONE)
294 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 295
e52c2802 296 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 297
e52c2802
PB
298 if (IS_ERR(rule))
299 goto add_err;
e4ad91f2 300
e52c2802
PB
301 esw->offloads.num_flows++;
302
303 return rule;
304add_err:
305 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
306err_get_fwd:
307 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
308err_get_fast:
e4ad91f2
CM
309 return rule;
310}
311
e52c2802
PB
312static void
313__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
314 struct mlx5_flow_handle *rule,
315 struct mlx5_esw_flow_attr *attr,
316 bool fwd_rule)
317{
e85e02ba 318 bool split = (attr->split_count > 0);
10caabda 319 int i;
e52c2802
PB
320
321 mlx5_del_flow_rules(rule);
10caabda
OS
322
323 /* unref the term table */
324 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
325 if (attr->dests[i].termtbl)
326 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
327 }
328
e52c2802
PB
329 esw->offloads.num_flows--;
330
331 if (fwd_rule) {
332 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
333 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
334 } else {
e85e02ba 335 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
336 if (attr->dest_chain)
337 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
338 }
339}
340
d85cdccb
OG
341void
342mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
343 struct mlx5_flow_handle *rule,
344 struct mlx5_esw_flow_attr *attr)
345{
e52c2802 346 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
347}
348
48265006
OG
349void
350mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
351 struct mlx5_flow_handle *rule,
352 struct mlx5_esw_flow_attr *attr)
353{
e52c2802 354 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
355}
356
f5f82476
OG
357static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
358{
359 struct mlx5_eswitch_rep *rep;
411ec9e0 360 int i, err = 0;
f5f82476
OG
361
362 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 363 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 364 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
365 continue;
366
367 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
368 if (err)
369 goto out;
370 }
371
372out:
373 return err;
374}
375
376static struct mlx5_eswitch_rep *
377esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
378{
379 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
380
381 in_rep = attr->in_rep;
df65a573 382 out_rep = attr->dests[0].rep;
f5f82476
OG
383
384 if (push)
385 vport = in_rep;
386 else if (pop)
387 vport = out_rep;
388 else
389 vport = in_rep;
390
391 return vport;
392}
393
394static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
395 bool push, bool pop, bool fwd)
396{
397 struct mlx5_eswitch_rep *in_rep, *out_rep;
398
399 if ((push || pop) && !fwd)
400 goto out_notsupp;
401
402 in_rep = attr->in_rep;
df65a573 403 out_rep = attr->dests[0].rep;
f5f82476 404
b05af6aa 405 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
406 goto out_notsupp;
407
b05af6aa 408 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
409 goto out_notsupp;
410
411 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
412 if (!push && !pop && fwd)
b05af6aa 413 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
414 goto out_notsupp;
415
416 /* protects against (1) setting rules with different vlans to push and
417 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
418 */
1482bd3d 419 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
420 goto out_notsupp;
421
422 return 0;
423
424out_notsupp:
9eb78923 425 return -EOPNOTSUPP;
f5f82476
OG
426}
427
428int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
429 struct mlx5_esw_flow_attr *attr)
430{
431 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
432 struct mlx5_eswitch_rep *vport = NULL;
433 bool push, pop, fwd;
434 int err = 0;
435
6acfbf38 436 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 437 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
438 return 0;
439
f5f82476
OG
440 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
441 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
442 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
443 !attr->dest_chain);
f5f82476
OG
444
445 err = esw_add_vlan_action_check(attr, push, pop, fwd);
446 if (err)
447 return err;
448
449 attr->vlan_handled = false;
450
451 vport = esw_vlan_action_get_vport(attr, push, pop);
452
453 if (!push && !pop && fwd) {
454 /* tracks VF --> wire rules without vlan push action */
b05af6aa 455 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476
OG
456 vport->vlan_refcount++;
457 attr->vlan_handled = true;
458 }
459
460 return 0;
461 }
462
463 if (!push && !pop)
464 return 0;
465
466 if (!(offloads->vlan_push_pop_refcount)) {
467 /* it's the 1st vlan rule, apply global vlan pop policy */
468 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
469 if (err)
470 goto out;
471 }
472 offloads->vlan_push_pop_refcount++;
473
474 if (push) {
475 if (vport->vlan_refcount)
476 goto skip_set_push;
477
1482bd3d 478 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
479 SET_VLAN_INSERT | SET_VLAN_STRIP);
480 if (err)
481 goto out;
1482bd3d 482 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
483skip_set_push:
484 vport->vlan_refcount++;
485 }
486out:
487 if (!err)
488 attr->vlan_handled = true;
489 return err;
490}
491
492int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
493 struct mlx5_esw_flow_attr *attr)
494{
495 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
496 struct mlx5_eswitch_rep *vport = NULL;
497 bool push, pop, fwd;
498 int err = 0;
499
6acfbf38 500 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 501 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
502 return 0;
503
f5f82476
OG
504 if (!attr->vlan_handled)
505 return 0;
506
507 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
508 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
509 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
510
511 vport = esw_vlan_action_get_vport(attr, push, pop);
512
513 if (!push && !pop && fwd) {
514 /* tracks VF --> wire rules without vlan push action */
b05af6aa 515 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
516 vport->vlan_refcount--;
517
518 return 0;
519 }
520
521 if (push) {
522 vport->vlan_refcount--;
523 if (vport->vlan_refcount)
524 goto skip_unset_push;
525
526 vport->vlan = 0;
527 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
528 0, 0, SET_VLAN_STRIP);
529 if (err)
530 goto out;
531 }
532
533skip_unset_push:
534 offloads->vlan_push_pop_refcount--;
535 if (offloads->vlan_push_pop_refcount)
536 return 0;
537
538 /* no more vlan rules, stop global vlan pop policy */
539 err = esw_set_global_vlan_pop(esw, 0);
540
541out:
542 return err;
543}
544
f7a68945 545struct mlx5_flow_handle *
02f3afd9
PP
546mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
547 u32 sqn)
ab22be9b 548{
66958ed9 549 struct mlx5_flow_act flow_act = {0};
4c5009c5 550 struct mlx5_flow_destination dest = {};
74491de9 551 struct mlx5_flow_handle *flow_rule;
c5bb1730 552 struct mlx5_flow_spec *spec;
ab22be9b
OG
553 void *misc;
554
1b9a07ee 555 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 556 if (!spec) {
ab22be9b
OG
557 flow_rule = ERR_PTR(-ENOMEM);
558 goto out;
559 }
560
c5bb1730 561 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 562 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
563 /* source vport is the esw manager */
564 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 565
c5bb1730 566 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
567 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
568 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
569
c5bb1730 570 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 571 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 572 dest.vport.num = vport;
66958ed9 573 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 574
52fff327 575 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 576 &flow_act, &dest, 1);
ab22be9b
OG
577 if (IS_ERR(flow_rule))
578 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
579out:
c5bb1730 580 kvfree(spec);
ab22be9b
OG
581 return flow_rule;
582}
57cbd893 583EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 584
159fe639
MB
585void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
586{
587 mlx5_del_flow_rules(rule);
588}
589
c1286050
JL
590static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
591{
592 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
593 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
594 u8 fdb_to_vport_reg_c_id;
595 int err;
596
597 err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
598 out, sizeof(out));
599 if (err)
600 return err;
601
602 fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
603 esw_vport_context.fdb_to_vport_reg_c_id);
604
605 fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
606 MLX5_SET(modify_esw_vport_context_in, in,
607 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
608
609 MLX5_SET(modify_esw_vport_context_in, in,
610 field_select.fdb_to_vport_reg_c_id, 1);
611
612 return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
613 in, sizeof(in));
614}
615
616static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
617{
618 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
619 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
620 u8 fdb_to_vport_reg_c_id;
621 int err;
622
623 err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
624 out, sizeof(out));
625 if (err)
626 return err;
627
628 fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
629 esw_vport_context.fdb_to_vport_reg_c_id);
630
631 fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
632
633 MLX5_SET(modify_esw_vport_context_in, in,
634 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
635
636 MLX5_SET(modify_esw_vport_context_in, in,
637 field_select.fdb_to_vport_reg_c_id, 1);
638
639 return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
640 in, sizeof(in));
641}
642
a5641cb5
JL
643static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
644 struct mlx5_core_dev *peer_dev,
ac004b83
RD
645 struct mlx5_flow_spec *spec,
646 struct mlx5_flow_destination *dest)
647{
a5641cb5 648 void *misc;
ac004b83 649
a5641cb5
JL
650 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
651 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
652 misc_parameters_2);
653 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
ac004b83 654
a5641cb5
JL
655 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
656 } else {
657 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
658 misc_parameters);
ac004b83 659
a5641cb5
JL
660 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
661 MLX5_CAP_GEN(peer_dev, vhca_id));
662
663 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
664
665 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
666 misc_parameters);
667 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
668 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
669 source_eswitch_owner_vhca_id);
670 }
ac004b83
RD
671
672 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 673 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 674 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 675 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
676}
677
a5641cb5
JL
678static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
679 struct mlx5_eswitch *peer_esw,
680 struct mlx5_flow_spec *spec,
681 u16 vport)
682{
683 void *misc;
684
685 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
686 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
687 misc_parameters_2);
688 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
689 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
690 vport));
691 } else {
692 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
693 misc_parameters);
694 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
695 }
696}
697
ac004b83
RD
698static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
699 struct mlx5_core_dev *peer_dev)
700{
701 struct mlx5_flow_destination dest = {};
702 struct mlx5_flow_act flow_act = {0};
703 struct mlx5_flow_handle **flows;
704 struct mlx5_flow_handle *flow;
705 struct mlx5_flow_spec *spec;
706 /* total vports is the same for both e-switches */
707 int nvports = esw->total_vports;
708 void *misc;
709 int err, i;
710
711 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
712 if (!spec)
713 return -ENOMEM;
714
a5641cb5 715 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
716
717 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
718 if (!flows) {
719 err = -ENOMEM;
720 goto alloc_flows_err;
721 }
722
723 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
724 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
725 misc_parameters);
726
81cd229c 727 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
728 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
729 spec, MLX5_VPORT_PF);
730
81cd229c
BW
731 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
732 spec, &flow_act, &dest, 1);
733 if (IS_ERR(flow)) {
734 err = PTR_ERR(flow);
735 goto add_pf_flow_err;
736 }
737 flows[MLX5_VPORT_PF] = flow;
738 }
739
740 if (mlx5_ecpf_vport_exists(esw->dev)) {
741 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
742 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
743 spec, &flow_act, &dest, 1);
744 if (IS_ERR(flow)) {
745 err = PTR_ERR(flow);
746 goto add_ecpf_flow_err;
747 }
748 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
749 }
750
786ef904 751 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
752 esw_set_peer_miss_rule_source_port(esw,
753 peer_dev->priv.eswitch,
754 spec, i);
755
ac004b83
RD
756 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
757 spec, &flow_act, &dest, 1);
758 if (IS_ERR(flow)) {
759 err = PTR_ERR(flow);
81cd229c 760 goto add_vf_flow_err;
ac004b83
RD
761 }
762 flows[i] = flow;
763 }
764
765 esw->fdb_table.offloads.peer_miss_rules = flows;
766
767 kvfree(spec);
768 return 0;
769
81cd229c 770add_vf_flow_err:
879c8f84 771 nvports = --i;
786ef904 772 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 773 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
774
775 if (mlx5_ecpf_vport_exists(esw->dev))
776 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
777add_ecpf_flow_err:
778 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
779 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
780add_pf_flow_err:
781 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
782 kvfree(flows);
783alloc_flows_err:
784 kvfree(spec);
785 return err;
786}
787
788static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
789{
790 struct mlx5_flow_handle **flows;
791 int i;
792
793 flows = esw->fdb_table.offloads.peer_miss_rules;
794
786ef904
PP
795 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
796 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
797 mlx5_del_flow_rules(flows[i]);
798
81cd229c
BW
799 if (mlx5_ecpf_vport_exists(esw->dev))
800 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
801
802 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
803 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
804
ac004b83
RD
805 kvfree(flows);
806}
807
3aa33572
OG
808static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
809{
66958ed9 810 struct mlx5_flow_act flow_act = {0};
4c5009c5 811 struct mlx5_flow_destination dest = {};
74491de9 812 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 813 struct mlx5_flow_spec *spec;
f80be543
MB
814 void *headers_c;
815 void *headers_v;
3aa33572 816 int err = 0;
f80be543
MB
817 u8 *dmac_c;
818 u8 *dmac_v;
3aa33572 819
1b9a07ee 820 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 821 if (!spec) {
3aa33572
OG
822 err = -ENOMEM;
823 goto out;
824 }
825
f80be543
MB
826 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
827 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
828 outer_headers);
829 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
830 outer_headers.dmac_47_16);
831 dmac_c[0] = 0x01;
832
3aa33572 833 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 834 dest.vport.num = esw->manager_vport;
66958ed9 835 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 836
52fff327 837 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 838 &flow_act, &dest, 1);
3aa33572
OG
839 if (IS_ERR(flow_rule)) {
840 err = PTR_ERR(flow_rule);
f80be543 841 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
842 goto out;
843 }
844
f80be543
MB
845 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
846
847 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
848 outer_headers);
849 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
850 outer_headers.dmac_47_16);
851 dmac_v[0] = 0x01;
52fff327 852 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
853 &flow_act, &dest, 1);
854 if (IS_ERR(flow_rule)) {
855 err = PTR_ERR(flow_rule);
856 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
857 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
858 goto out;
859 }
860
861 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
862
3aa33572 863out:
c5bb1730 864 kvfree(spec);
3aa33572
OG
865 return err;
866}
867
1033665e 868#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 869
e52c2802
PB
870/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
871 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
872 * for each flow table pool. We can allocate up to 16M of each pool,
873 * and we keep track of how much we used via put/get_sz_to_pool.
874 * Firmware doesn't report any of this for now.
875 * ESW_POOL is expected to be sorted from large to small
876 */
877#define ESW_SIZE (16 * 1024 * 1024)
878const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
879 64 * 1024, 4 * 1024 };
880
881static int
882get_sz_from_pool(struct mlx5_eswitch *esw)
883{
884 int sz = 0, i;
885
886 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
887 if (esw->fdb_table.offloads.fdb_left[i]) {
888 --esw->fdb_table.offloads.fdb_left[i];
889 sz = ESW_POOLS[i];
890 break;
891 }
892 }
893
894 return sz;
895}
896
897static void
898put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
899{
900 int i;
901
902 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
903 if (sz >= ESW_POOLS[i]) {
904 ++esw->fdb_table.offloads.fdb_left[i];
905 break;
906 }
907 }
908}
909
910static struct mlx5_flow_table *
911create_next_size_table(struct mlx5_eswitch *esw,
912 struct mlx5_flow_namespace *ns,
913 u16 table_prio,
914 int level,
915 u32 flags)
916{
917 struct mlx5_flow_table *fdb;
918 int sz;
919
920 sz = get_sz_from_pool(esw);
921 if (!sz)
922 return ERR_PTR(-ENOSPC);
923
924 fdb = mlx5_create_auto_grouped_flow_table(ns,
925 table_prio,
926 sz,
927 ESW_OFFLOADS_NUM_GROUPS,
928 level,
929 flags);
930 if (IS_ERR(fdb)) {
931 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
932 (int)PTR_ERR(fdb), table_prio, level, sz);
933 put_sz_to_pool(esw, sz);
934 }
935
936 return fdb;
937}
938
939static struct mlx5_flow_table *
940esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
69697b6e 941{
69697b6e 942 struct mlx5_core_dev *dev = esw->dev;
69697b6e 943 struct mlx5_flow_table *fdb = NULL;
e52c2802
PB
944 struct mlx5_flow_namespace *ns;
945 int table_prio, l = 0;
bbd00f7e 946 u32 flags = 0;
69697b6e 947
c92a0b94
PB
948 if (chain == FDB_SLOW_PATH_CHAIN)
949 return esw->fdb_table.offloads.slow_fdb;
950
e52c2802 951 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
264d7bf3 952
e52c2802
PB
953 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
954 if (fdb) {
955 /* take ref on earlier levels as well */
956 while (level >= 0)
957 fdb_prio_table(esw, chain, prio, level--).num_rules++;
958 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
959 return fdb;
960 }
69697b6e 961
e52c2802
PB
962 ns = mlx5_get_fdb_sub_ns(dev, chain);
963 if (!ns) {
964 esw_warn(dev, "Failed to get FDB sub namespace\n");
965 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
966 return ERR_PTR(-EOPNOTSUPP);
967 }
a842dd04 968
7768d197 969 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
60786f09 970 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
61444b45 971 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
bbd00f7e 972
e52c2802 973 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
69697b6e 974
e52c2802
PB
975 /* create earlier levels for correct fs_core lookup when
976 * connecting tables
977 */
978 for (l = 0; l <= level; l++) {
979 if (fdb_prio_table(esw, chain, prio, l).fdb) {
980 fdb_prio_table(esw, chain, prio, l).num_rules++;
981 continue;
982 }
a842dd04 983
e52c2802
PB
984 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
985 if (IS_ERR(fdb)) {
986 l--;
987 goto err_create_fdb;
988 }
989
990 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
991 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
a842dd04 992 }
a842dd04 993
e52c2802
PB
994 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
995 return fdb;
a842dd04 996
e52c2802
PB
997err_create_fdb:
998 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
999 if (l >= 0)
1000 esw_put_prio_table(esw, chain, prio, l);
1001
1002 return fdb;
1967ce6e
OG
1003}
1004
e52c2802
PB
1005static void
1006esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
1967ce6e 1007{
e52c2802
PB
1008 int l;
1009
c92a0b94
PB
1010 if (chain == FDB_SLOW_PATH_CHAIN)
1011 return;
1012
e52c2802
PB
1013 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
1014
1015 for (l = level; l >= 0; l--) {
1016 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
1017 continue;
1018
1019 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
1020 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
1021 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
1022 }
1023
1024 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
1025}
1026
1027static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
1028{
1029 /* If lazy creation isn't supported, deref the fast path tables */
1030 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
1031 esw_put_prio_table(esw, 0, 1, 1);
1032 esw_put_prio_table(esw, 0, 1, 0);
1033 }
1967ce6e
OG
1034}
1035
1036#define MAX_PF_SQ 256
cd3d07e7 1037#define MAX_SQ_NVPORTS 32
1967ce6e 1038
a5641cb5
JL
1039static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1040 u32 *flow_group_in)
1041{
1042 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1043 flow_group_in,
1044 match_criteria);
1045
1046 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1047 MLX5_SET(create_flow_group_in, flow_group_in,
1048 match_criteria_enable,
1049 MLX5_MATCH_MISC_PARAMETERS_2);
1050
1051 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1052 misc_parameters_2.metadata_reg_c_0);
1053 } else {
1054 MLX5_SET(create_flow_group_in, flow_group_in,
1055 match_criteria_enable,
1056 MLX5_MATCH_MISC_PARAMETERS);
1057
1058 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1059 misc_parameters.source_port);
1060 }
1061}
1062
1967ce6e
OG
1063static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1064{
1065 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1066 struct mlx5_flow_table_attr ft_attr = {};
1067 struct mlx5_core_dev *dev = esw->dev;
e52c2802 1068 u32 *flow_group_in, max_flow_counter;
1967ce6e
OG
1069 struct mlx5_flow_namespace *root_ns;
1070 struct mlx5_flow_table *fdb = NULL;
e52c2802 1071 int table_size, ix, err = 0, i;
1967ce6e 1072 struct mlx5_flow_group *g;
e52c2802 1073 u32 flags = 0, fdb_max;
1967ce6e 1074 void *match_criteria;
f80be543 1075 u8 *dmac;
1967ce6e
OG
1076
1077 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 1078 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1079 if (!flow_group_in)
1080 return -ENOMEM;
1081
1082 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1083 if (!root_ns) {
1084 esw_warn(dev, "Failed to get FDB flow namespace\n");
1085 err = -EOPNOTSUPP;
1086 goto ns_err;
1087 }
1088
e52c2802
PB
1089 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
1090 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
1091 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1092
1093 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
1094 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
1095 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
1096 fdb_max);
1097
1098 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
1099 esw->fdb_table.offloads.fdb_left[i] =
1100 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1967ce6e 1101
cd7e4186
BW
1102 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1103 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 1104
e52c2802
PB
1105 /* create the slow path fdb with encap set, so further table instances
1106 * can be created at run time while VFs are probed if the FW allows that.
1107 */
1108 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1109 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1110 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1111
1112 ft_attr.flags = flags;
b3ba5149
ES
1113 ft_attr.max_fte = table_size;
1114 ft_attr.prio = FDB_SLOW_PATH;
1115
1116 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1117 if (IS_ERR(fdb)) {
1118 err = PTR_ERR(fdb);
1119 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1120 goto slow_fdb_err;
1121 }
52fff327 1122 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1123
e52c2802
PB
1124 /* If lazy creation isn't supported, open the fast path tables now */
1125 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
1126 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1127 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1128 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
1129 esw_get_prio_table(esw, 0, 1, 0);
1130 esw_get_prio_table(esw, 0, 1, 1);
1131 } else {
1132 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
1133 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1134 }
1135
69697b6e 1136 /* create send-to-vport group */
69697b6e
OG
1137 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1138 MLX5_MATCH_MISC_PARAMETERS);
1139
1140 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1141
1142 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1143 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1144
cd3d07e7 1145 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1146 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1147 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1148
1149 g = mlx5_create_flow_group(fdb, flow_group_in);
1150 if (IS_ERR(g)) {
1151 err = PTR_ERR(g);
1152 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1153 goto send_vport_err;
1154 }
1155 esw->fdb_table.offloads.send_to_vport_grp = g;
1156
ac004b83
RD
1157 /* create peer esw miss group */
1158 memset(flow_group_in, 0, inlen);
ac004b83 1159
a5641cb5
JL
1160 esw_set_flow_group_source_port(esw, flow_group_in);
1161
1162 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1163 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1164 flow_group_in,
1165 match_criteria);
ac004b83 1166
a5641cb5
JL
1167 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1168 misc_parameters.source_eswitch_owner_vhca_id);
1169
1170 MLX5_SET(create_flow_group_in, flow_group_in,
1171 source_eswitch_owner_vhca_id_valid, 1);
1172 }
ac004b83 1173
ac004b83
RD
1174 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1175 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1176 ix + esw->total_vports - 1);
1177 ix += esw->total_vports;
1178
1179 g = mlx5_create_flow_group(fdb, flow_group_in);
1180 if (IS_ERR(g)) {
1181 err = PTR_ERR(g);
1182 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1183 goto peer_miss_err;
1184 }
1185 esw->fdb_table.offloads.peer_miss_grp = g;
1186
69697b6e
OG
1187 /* create miss group */
1188 memset(flow_group_in, 0, inlen);
f80be543
MB
1189 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1190 MLX5_MATCH_OUTER_HEADERS);
1191 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1192 match_criteria);
1193 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1194 outer_headers.dmac_47_16);
1195 dmac[0] = 0x01;
69697b6e
OG
1196
1197 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1198 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1199 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1200
1201 g = mlx5_create_flow_group(fdb, flow_group_in);
1202 if (IS_ERR(g)) {
1203 err = PTR_ERR(g);
1204 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1205 goto miss_err;
1206 }
1207 esw->fdb_table.offloads.miss_grp = g;
1208
3aa33572
OG
1209 err = esw_add_fdb_miss_rule(esw);
1210 if (err)
1211 goto miss_rule_err;
1212
e52c2802 1213 esw->nvports = nvports;
c88a026e 1214 kvfree(flow_group_in);
69697b6e
OG
1215 return 0;
1216
3aa33572
OG
1217miss_rule_err:
1218 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1219miss_err:
ac004b83
RD
1220 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1221peer_miss_err:
69697b6e
OG
1222 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1223send_vport_err:
e52c2802 1224 esw_destroy_offloads_fast_fdb_tables(esw);
52fff327 1225 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1226slow_fdb_err:
69697b6e
OG
1227ns_err:
1228 kvfree(flow_group_in);
1229 return err;
1230}
1231
1967ce6e 1232static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1233{
e52c2802 1234 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1235 return;
1236
1967ce6e 1237 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1238 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1239 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1240 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1241 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1242 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1243
52fff327 1244 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
e52c2802 1245 esw_destroy_offloads_fast_fdb_tables(esw);
69697b6e 1246}
c116c6ee 1247
cd7e4186 1248static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1249{
b3ba5149 1250 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1251 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1252 struct mlx5_flow_table *ft_offloads;
1253 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1254 int err = 0;
1255
1256 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1257 if (!ns) {
1258 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1259 return -EOPNOTSUPP;
c116c6ee
OG
1260 }
1261
cd7e4186 1262 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
b3ba5149
ES
1263
1264 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1265 if (IS_ERR(ft_offloads)) {
1266 err = PTR_ERR(ft_offloads);
1267 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1268 return err;
1269 }
1270
1271 esw->offloads.ft_offloads = ft_offloads;
1272 return 0;
1273}
1274
1275static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1276{
1277 struct mlx5_esw_offload *offloads = &esw->offloads;
1278
1279 mlx5_destroy_flow_table(offloads->ft_offloads);
1280}
fed9ce22 1281
cd7e4186 1282static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1283{
1284 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1285 struct mlx5_flow_group *g;
fed9ce22 1286 u32 *flow_group_in;
fed9ce22 1287 int err = 0;
fed9ce22 1288
cd7e4186 1289 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1290 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1291 if (!flow_group_in)
1292 return -ENOMEM;
1293
1294 /* create vport rx group */
a5641cb5 1295 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1296
1297 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1298 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1299
1300 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1301
1302 if (IS_ERR(g)) {
1303 err = PTR_ERR(g);
1304 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1305 goto out;
1306 }
1307
1308 esw->offloads.vport_rx_group = g;
1309out:
e574978a 1310 kvfree(flow_group_in);
fed9ce22
OG
1311 return err;
1312}
1313
1314static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1315{
1316 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1317}
1318
74491de9 1319struct mlx5_flow_handle *
02f3afd9 1320mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1321 struct mlx5_flow_destination *dest)
fed9ce22 1322{
66958ed9 1323 struct mlx5_flow_act flow_act = {0};
74491de9 1324 struct mlx5_flow_handle *flow_rule;
c5bb1730 1325 struct mlx5_flow_spec *spec;
fed9ce22
OG
1326 void *misc;
1327
1b9a07ee 1328 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1329 if (!spec) {
fed9ce22
OG
1330 flow_rule = ERR_PTR(-ENOMEM);
1331 goto out;
1332 }
1333
a5641cb5
JL
1334 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1335 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1336 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1337 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1338
a5641cb5
JL
1339 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1340 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
fed9ce22 1341
a5641cb5
JL
1342 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1343 } else {
1344 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1345 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1346
1347 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1348 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1349
1350 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1351 }
fed9ce22 1352
66958ed9 1353 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1354 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1355 &flow_act, dest, 1);
fed9ce22
OG
1356 if (IS_ERR(flow_rule)) {
1357 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1358 goto out;
1359 }
1360
1361out:
c5bb1730 1362 kvfree(spec);
fed9ce22
OG
1363 return flow_rule;
1364}
feae9087 1365
db7ff19e
EB
1366static int esw_offloads_start(struct mlx5_eswitch *esw,
1367 struct netlink_ext_ack *extack)
c930a3ad 1368{
062f4bf4 1369 int err, err1;
c930a3ad 1370
f6455de0 1371 if (esw->mode != MLX5_ESWITCH_LEGACY &&
c96692fb 1372 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1373 NL_SET_ERR_MSG_MOD(extack,
1374 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1375 return -EINVAL;
1376 }
1377
f6455de0 1378 mlx5_eswitch_disable(esw);
062f4bf4
BW
1379 mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
1380 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
6c419ba8 1381 if (err) {
8c98ee77
EB
1382 NL_SET_ERR_MSG_MOD(extack,
1383 "Failed setting eswitch to offloads");
062f4bf4 1384 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
8c98ee77
EB
1385 if (err1) {
1386 NL_SET_ERR_MSG_MOD(extack,
1387 "Failed setting eswitch back to legacy");
1388 }
6c419ba8 1389 }
bffaa916
RD
1390 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1391 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
1392 &esw->offloads.inline_mode)) {
1393 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1394 NL_SET_ERR_MSG_MOD(extack,
1395 "Inline mode is different between vports");
bffaa916
RD
1396 }
1397 }
c930a3ad
OG
1398 return err;
1399}
1400
e8d31c4d
MB
1401void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1402{
1403 kfree(esw->offloads.vport_reps);
1404}
1405
1406int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1407{
2752b823 1408 int total_vports = esw->total_vports;
e8d31c4d 1409 struct mlx5_core_dev *dev = esw->dev;
e8d31c4d 1410 struct mlx5_eswitch_rep *rep;
f121e0ea 1411 u8 hw_id[ETH_ALEN], rep_type;
d6518db2 1412 int vport_index;
e8d31c4d 1413
2aca1787 1414 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1415 sizeof(struct mlx5_eswitch_rep),
1416 GFP_KERNEL);
1417 if (!esw->offloads.vport_reps)
1418 return -ENOMEM;
1419
e1d974d0 1420 mlx5_query_mac_address(dev, hw_id);
e8d31c4d 1421
d6518db2
BW
1422 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1423 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 1424 rep->vport_index = vport_index;
e8d31c4d 1425 ether_addr_copy(rep->hw_id, hw_id);
f121e0ea
BW
1426
1427 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 1428 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 1429 REP_UNREGISTERED);
e8d31c4d
MB
1430 }
1431
e8d31c4d
MB
1432 return 0;
1433}
1434
c9b99abc
BW
1435static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1436 struct mlx5_eswitch_rep *rep, u8 rep_type)
1437{
8693115a 1438 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1439 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 1440 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
1441}
1442
29d9fd7d 1443static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1444{
1445 struct mlx5_eswitch_rep *rep;
c9b99abc 1446
81cd229c
BW
1447 if (mlx5_ecpf_vport_exists(esw->dev)) {
1448 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1449 __esw_offloads_unload_rep(esw, rep, rep_type);
1450 }
1451
1452 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1453 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1454 __esw_offloads_unload_rep(esw, rep, rep_type);
1455 }
1456
879c8f84 1457 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1458 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1459}
1460
29d9fd7d
BW
1461static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1462 u8 rep_type)
1463{
1464 struct mlx5_eswitch_rep *rep;
1465 int i;
1466
1467 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1468 __esw_offloads_unload_rep(esw, rep, rep_type);
1469}
1470
1471static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1472{
1473 u8 rep_type = NUM_REP_TYPES;
1474
1475 while (rep_type-- > 0)
1476 __unload_reps_vf_vport(esw, nvports, rep_type);
1477}
1478
062f4bf4 1479static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
29d9fd7d 1480{
062f4bf4 1481 __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
29d9fd7d
BW
1482
1483 /* Special vports must be the last to unload. */
1484 __unload_reps_special_vport(esw, rep_type);
1485}
1486
062f4bf4 1487static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
a4b97ab4
MB
1488{
1489 u8 rep_type = NUM_REP_TYPES;
1490
1491 while (rep_type-- > 0)
062f4bf4 1492 __unload_reps_all_vport(esw, rep_type);
a4b97ab4
MB
1493}
1494
c9b99abc
BW
1495static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1496 struct mlx5_eswitch_rep *rep, u8 rep_type)
1497{
f121e0ea
BW
1498 int err = 0;
1499
8693115a 1500 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1501 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
8693115a 1502 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
6f4e0219 1503 if (err)
8693115a 1504 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219
BW
1505 REP_REGISTERED);
1506 }
f121e0ea 1507
6f4e0219 1508 return err;
c9b99abc
BW
1509}
1510
29d9fd7d 1511static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
c930a3ad 1512{
cb67b832 1513 struct mlx5_eswitch_rep *rep;
c930a3ad
OG
1514 int err;
1515
879c8f84 1516 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1517 err = __esw_offloads_load_rep(esw, rep, rep_type);
81cd229c
BW
1518 if (err)
1519 return err;
1520
1521 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1522 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1523 err = __esw_offloads_load_rep(esw, rep, rep_type);
1524 if (err)
1525 goto err_pf;
1526 }
1527
1528 if (mlx5_ecpf_vport_exists(esw->dev)) {
1529 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1530 err = __esw_offloads_load_rep(esw, rep, rep_type);
1531 if (err)
1532 goto err_ecpf;
1533 }
1534
1535 return 0;
1536
1537err_ecpf:
1538 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1539 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1540 __esw_offloads_unload_rep(esw, rep, rep_type);
1541 }
1542
1543err_pf:
1544 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1545 __esw_offloads_unload_rep(esw, rep, rep_type);
29d9fd7d
BW
1546 return err;
1547}
6ed1803a 1548
29d9fd7d
BW
1549static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1550 u8 rep_type)
1551{
1552 struct mlx5_eswitch_rep *rep;
1553 int err, i;
1554
1555 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
c9b99abc 1556 err = __esw_offloads_load_rep(esw, rep, rep_type);
6ed1803a 1557 if (err)
29d9fd7d 1558 goto err_vf;
6ed1803a
MB
1559 }
1560
1561 return 0;
1562
29d9fd7d
BW
1563err_vf:
1564 __unload_reps_vf_vport(esw, --i, rep_type);
1565 return err;
1566}
1567
062f4bf4
BW
1568static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1569{
1570 int err;
1571
1572 /* Special vports must be loaded first, uplink rep creates mdev resource. */
1573 err = __load_reps_special_vport(esw, rep_type);
1574 if (err)
1575 return err;
1576
1577 err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
1578 if (err)
1579 goto err_vfs;
1580
1581 return 0;
1582
1583err_vfs:
1584 __unload_reps_special_vport(esw, rep_type);
1585 return err;
1586}
1587
29d9fd7d
BW
1588static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1589{
1590 u8 rep_type = 0;
1591 int err;
1592
1593 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1594 err = __load_reps_vf_vport(esw, nvports, rep_type);
1595 if (err)
1596 goto err_reps;
1597 }
1598
1599 return err;
1600
6ed1803a 1601err_reps:
29d9fd7d
BW
1602 while (rep_type-- > 0)
1603 __unload_reps_vf_vport(esw, nvports, rep_type);
1604 return err;
1605}
1606
062f4bf4 1607static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
a4b97ab4
MB
1608{
1609 u8 rep_type = 0;
1610 int err;
1611
1612 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
062f4bf4 1613 err = __load_reps_all_vport(esw, rep_type);
a4b97ab4
MB
1614 if (err)
1615 goto err_reps;
1616 }
1617
1618 return err;
1619
1620err_reps:
1621 while (rep_type-- > 0)
062f4bf4 1622 __unload_reps_all_vport(esw, rep_type);
6ed1803a
MB
1623 return err;
1624}
1625
ac004b83
RD
1626#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1627#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1628
1629static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1630 struct mlx5_eswitch *peer_esw)
1631{
1632 int err;
1633
1634 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1635 if (err)
1636 return err;
1637
1638 return 0;
1639}
1640
1641static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1642{
04de7dda 1643 mlx5e_tc_clean_fdb_peer_flows(esw);
ac004b83
RD
1644 esw_del_fdb_peer_miss_rules(esw);
1645}
1646
1647static int mlx5_esw_offloads_devcom_event(int event,
1648 void *my_data,
1649 void *event_data)
1650{
1651 struct mlx5_eswitch *esw = my_data;
1652 struct mlx5_eswitch *peer_esw = event_data;
1653 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1654 int err;
1655
1656 switch (event) {
1657 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
1658 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1659 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1660 break;
1661
ac004b83
RD
1662 err = mlx5_esw_offloads_pair(esw, peer_esw);
1663 if (err)
1664 goto err_out;
1665
1666 err = mlx5_esw_offloads_pair(peer_esw, esw);
1667 if (err)
1668 goto err_pair;
1669
1670 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1671 break;
1672
1673 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1674 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1675 break;
1676
1677 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1678 mlx5_esw_offloads_unpair(peer_esw);
1679 mlx5_esw_offloads_unpair(esw);
1680 break;
1681 }
1682
1683 return 0;
1684
1685err_pair:
1686 mlx5_esw_offloads_unpair(esw);
1687
1688err_out:
1689 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1690 event, err);
1691 return err;
1692}
1693
1694static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1695{
1696 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1697
04de7dda
RD
1698 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1699 mutex_init(&esw->offloads.peer_mutex);
1700
ac004b83
RD
1701 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1702 return;
1703
1704 mlx5_devcom_register_component(devcom,
1705 MLX5_DEVCOM_ESW_OFFLOADS,
1706 mlx5_esw_offloads_devcom_event,
1707 esw);
1708
1709 mlx5_devcom_send_event(devcom,
1710 MLX5_DEVCOM_ESW_OFFLOADS,
1711 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1712}
1713
1714static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1715{
1716 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1717
1718 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1719 return;
1720
1721 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1722 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1723
1724 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1725}
1726
18486737
EB
1727static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1728 struct mlx5_vport *vport)
1729{
18486737
EB
1730 struct mlx5_flow_act flow_act = {0};
1731 struct mlx5_flow_spec *spec;
1732 int err = 0;
1733
1734 /* For prio tag mode, there is only 1 FTEs:
7445cfb1
JL
1735 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1736 * required, allow
18486737
EB
1737 * Unmatched traffic is allowed by default
1738 */
1739
18486737
EB
1740 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1741 if (!spec) {
1742 err = -ENOMEM;
1743 goto out_no_mem;
1744 }
1745
1746 /* Untagged packets - push prio tag VLAN, allow */
1747 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1748 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1749 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1750 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1751 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1752 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1753 flow_act.vlan[0].vid = 0;
1754 flow_act.vlan[0].prio = 0;
7445cfb1
JL
1755
1756 if (vport->ingress.modify_metadata_rule) {
1757 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1758 flow_act.modify_id = vport->ingress.modify_metadata_id;
1759 }
1760
18486737
EB
1761 vport->ingress.allow_rule =
1762 mlx5_add_flow_rules(vport->ingress.acl, spec,
1763 &flow_act, NULL, 0);
1764 if (IS_ERR(vport->ingress.allow_rule)) {
1765 err = PTR_ERR(vport->ingress.allow_rule);
1766 esw_warn(esw->dev,
1767 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1768 vport->vport, err);
1769 vport->ingress.allow_rule = NULL;
1770 goto out;
1771 }
1772
1773out:
1774 kvfree(spec);
1775out_no_mem:
1776 if (err)
1777 esw_vport_cleanup_ingress_rules(esw, vport);
1778 return err;
1779}
1780
7445cfb1
JL
1781static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1782 struct mlx5_vport *vport)
1783{
1784 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
9446d17e 1785 static const struct mlx5_flow_spec spec = {};
7445cfb1 1786 struct mlx5_flow_act flow_act = {};
7445cfb1
JL
1787 int err = 0;
1788
1789 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1790 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1791 MLX5_SET(set_action_in, action, data,
1792 mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
1793
1794 err = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1795 1, action, &vport->ingress.modify_metadata_id);
1796 if (err) {
1797 esw_warn(esw->dev,
1798 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1799 vport->vport, err);
1800 return err;
1801 }
1802
1803 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1804 flow_act.modify_id = vport->ingress.modify_metadata_id;
1805 vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
1806 &spec, &flow_act, NULL, 0);
1807 if (IS_ERR(vport->ingress.modify_metadata_rule)) {
1808 err = PTR_ERR(vport->ingress.modify_metadata_rule);
1809 esw_warn(esw->dev,
1810 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1811 vport->vport, err);
1812 vport->ingress.modify_metadata_rule = NULL;
1813 goto out;
1814 }
1815
1816out:
1817 if (err)
1818 mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
1819 return err;
1820}
1821
1822void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1823 struct mlx5_vport *vport)
1824{
1825 if (vport->ingress.modify_metadata_rule) {
1826 mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
1827 mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
1828
1829 vport->ingress.modify_metadata_rule = NULL;
1830 }
1831}
1832
18486737
EB
1833static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
1834 struct mlx5_vport *vport)
1835{
1836 struct mlx5_flow_act flow_act = {0};
1837 struct mlx5_flow_spec *spec;
1838 int err = 0;
1839
7445cfb1
JL
1840 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
1841 return 0;
1842
18486737
EB
1843 /* For prio tag mode, there is only 1 FTEs:
1844 * 1) prio tag packets - pop the prio tag VLAN, allow
1845 * Unmatched traffic is allowed by default
1846 */
1847
1848 esw_vport_cleanup_egress_rules(esw, vport);
1849
1850 err = esw_vport_enable_egress_acl(esw, vport);
1851 if (err) {
1852 mlx5_core_warn(esw->dev,
1853 "failed to enable egress acl (%d) on vport[%d]\n",
1854 err, vport->vport);
1855 return err;
1856 }
1857
1858 esw_debug(esw->dev,
1859 "vport[%d] configure prio tag egress rules\n", vport->vport);
1860
1861 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1862 if (!spec) {
1863 err = -ENOMEM;
1864 goto out_no_mem;
1865 }
1866
1867 /* prio tag vlan rule - pop it so VF receives untagged packets */
1868 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1869 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1870 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1871 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
1872
1873 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1874 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1875 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1876 vport->egress.allowed_vlan =
1877 mlx5_add_flow_rules(vport->egress.acl, spec,
1878 &flow_act, NULL, 0);
1879 if (IS_ERR(vport->egress.allowed_vlan)) {
1880 err = PTR_ERR(vport->egress.allowed_vlan);
1881 esw_warn(esw->dev,
1882 "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
1883 vport->vport, err);
1884 vport->egress.allowed_vlan = NULL;
1885 goto out;
1886 }
1887
1888out:
1889 kvfree(spec);
1890out_no_mem:
1891 if (err)
1892 esw_vport_cleanup_egress_rules(esw, vport);
1893 return err;
1894}
1895
7445cfb1
JL
1896static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
1897 struct mlx5_vport *vport)
18486737 1898{
18486737
EB
1899 int err;
1900
7445cfb1
JL
1901 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1902 !MLX5_CAP_GEN(esw->dev, prio_tag_required))
1903 return 0;
1904
1905 esw_vport_cleanup_ingress_rules(esw, vport);
1906
1907 err = esw_vport_enable_ingress_acl(esw, vport);
1908 if (err) {
1909 esw_warn(esw->dev,
1910 "failed to enable ingress acl (%d) on vport[%d]\n",
1911 err, vport->vport);
1912 return err;
1913 }
1914
1915 esw_debug(esw->dev,
1916 "vport[%d] configure ingress rules\n", vport->vport);
1917
1918 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1919 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
1920 if (err)
1921 goto out;
1922 }
1923
1924 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
1925 mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
786ef904 1926 err = esw_vport_ingress_prio_tag_config(esw, vport);
18486737 1927 if (err)
7445cfb1
JL
1928 goto out;
1929 }
1930
1931out:
1932 if (err)
1933 esw_vport_disable_ingress_acl(esw, vport);
1934 return err;
1935}
1936
92ab1eb3
JL
1937static bool
1938esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
1939{
1940 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
1941 return false;
1942
1943 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1944 MLX5_FDB_TO_VPORT_REG_C_0))
1945 return false;
1946
1947 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
1948 return false;
1949
1950 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
1951 mlx5_ecpf_vport_exists(esw->dev))
1952 return false;
1953
1954 return true;
1955}
1956
7445cfb1
JL
1957static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
1958{
1959 struct mlx5_vport *vport;
1960 int i, j;
1961 int err;
1962
92ab1eb3
JL
1963 if (esw_check_vport_match_metadata_supported(esw))
1964 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
1965
7445cfb1
JL
1966 mlx5_esw_for_all_vports(esw, i, vport) {
1967 err = esw_vport_ingress_common_config(esw, vport);
18486737 1968 if (err)
7445cfb1
JL
1969 goto err_ingress;
1970
1971 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
1972 err = esw_vport_egress_prio_tag_config(esw, vport);
1973 if (err)
1974 goto err_egress;
1975 }
18486737
EB
1976 }
1977
7445cfb1
JL
1978 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
1979 esw_info(esw->dev, "Use metadata reg_c as source vport to match\n");
1980
18486737
EB
1981 return 0;
1982
1983err_egress:
786ef904 1984 esw_vport_disable_ingress_acl(esw, vport);
18486737 1985err_ingress:
7445cfb1
JL
1986 for (j = MLX5_VPORT_PF; j < i; j++) {
1987 vport = &esw->vports[j];
786ef904
PP
1988 esw_vport_disable_egress_acl(esw, vport);
1989 esw_vport_disable_ingress_acl(esw, vport);
18486737
EB
1990 }
1991
1992 return err;
1993}
1994
7445cfb1 1995static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 1996{
786ef904 1997 struct mlx5_vport *vport;
18486737
EB
1998 int i;
1999
7445cfb1 2000 mlx5_esw_for_all_vports(esw, i, vport) {
786ef904
PP
2001 esw_vport_disable_egress_acl(esw, vport);
2002 esw_vport_disable_ingress_acl(esw, vport);
18486737 2003 }
7445cfb1
JL
2004
2005 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
2006}
2007
062f4bf4 2008static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 2009{
062f4bf4
BW
2010 int num_vfs = esw->esw_funcs.num_vfs;
2011 int total_vports;
6ed1803a
MB
2012 int err;
2013
062f4bf4
BW
2014 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2015 total_vports = esw->total_vports;
2016 else
2017 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2018
5c1d260e 2019 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
e52c2802
PB
2020 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
2021
7445cfb1
JL
2022 err = esw_create_offloads_acl_tables(esw);
2023 if (err)
2024 return err;
18486737 2025
062f4bf4 2026 err = esw_create_offloads_fdb_tables(esw, total_vports);
c930a3ad 2027 if (err)
7445cfb1 2028 goto create_fdb_err;
c930a3ad 2029
062f4bf4 2030 err = esw_create_offloads_table(esw, total_vports);
c930a3ad
OG
2031 if (err)
2032 goto create_ft_err;
2033
062f4bf4 2034 err = esw_create_vport_rx_group(esw, total_vports);
c930a3ad
OG
2035 if (err)
2036 goto create_fg_err;
2037
2038 return 0;
2039
2040create_fg_err:
2041 esw_destroy_offloads_table(esw);
2042
2043create_ft_err:
1967ce6e 2044 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 2045
7445cfb1
JL
2046create_fdb_err:
2047 esw_destroy_offloads_acl_tables(esw);
2048
c930a3ad
OG
2049 return err;
2050}
2051
eca8cc38
BW
2052static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2053{
2054 esw_destroy_vport_rx_group(esw);
2055 esw_destroy_offloads_table(esw);
2056 esw_destroy_offloads_fdb_tables(esw);
7445cfb1 2057 esw_destroy_offloads_acl_tables(esw);
eca8cc38
BW
2058}
2059
7e736f9a
PP
2060static void
2061esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 2062{
5ccf2770 2063 bool host_pf_disabled;
7e736f9a 2064 u16 new_num_vfs;
a3888f33 2065
7e736f9a
PP
2066 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2067 host_params_context.host_num_of_vfs);
5ccf2770
BW
2068 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2069 host_params_context.host_pf_disabled);
a3888f33 2070
7e736f9a
PP
2071 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2072 return;
a3888f33
BW
2073
2074 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929
VP
2075 if (esw->esw_funcs.num_vfs > 0) {
2076 esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
a3888f33 2077 } else {
7e736f9a 2078 int err;
a3888f33 2079
7e736f9a 2080 err = esw_offloads_load_vf_reps(esw, new_num_vfs);
a3888f33 2081 if (err)
7e736f9a 2082 return;
a3888f33 2083 }
7e736f9a 2084 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
2085}
2086
7e736f9a 2087static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 2088{
7e736f9a
PP
2089 struct mlx5_host_work *host_work;
2090 struct mlx5_eswitch *esw;
dd28087c 2091 const u32 *out;
ac35dcd6 2092
7e736f9a
PP
2093 host_work = container_of(work, struct mlx5_host_work, work);
2094 esw = host_work->esw;
a3888f33 2095
dd28087c
PP
2096 out = mlx5_esw_query_functions(esw->dev);
2097 if (IS_ERR(out))
7e736f9a 2098 goto out;
a3888f33 2099
7e736f9a 2100 esw_vfs_changed_event_handler(esw, out);
dd28087c 2101 kvfree(out);
a3888f33 2102out:
ac35dcd6
VP
2103 kfree(host_work);
2104}
2105
16fff98a 2106int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 2107{
cd56f929 2108 struct mlx5_esw_functions *esw_funcs;
a3888f33 2109 struct mlx5_host_work *host_work;
a3888f33
BW
2110 struct mlx5_eswitch *esw;
2111
2112 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2113 if (!host_work)
2114 return NOTIFY_DONE;
2115
cd56f929
VP
2116 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2117 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2118
2119 host_work->esw = esw;
2120
062f4bf4 2121 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2122 queue_work(esw->work_queue, &host_work->work);
2123
2124 return NOTIFY_OK;
2125}
2126
062f4bf4 2127int esw_offloads_init(struct mlx5_eswitch *esw)
eca8cc38
BW
2128{
2129 int err;
2130
9a64144d
MG
2131 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2132 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2133 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2134 else
2135 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2136
062f4bf4 2137 err = esw_offloads_steering_init(esw);
eca8cc38
BW
2138 if (err)
2139 return err;
2140
c1286050
JL
2141 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2142 err = mlx5_eswitch_enable_passing_vport_metadata(esw);
2143 if (err)
2144 goto err_vport_metadata;
2145 }
2146
062f4bf4 2147 err = esw_offloads_load_all_reps(esw);
eca8cc38
BW
2148 if (err)
2149 goto err_reps;
2150
2151 esw_offloads_devcom_init(esw);
10caabda 2152 mutex_init(&esw->offloads.termtbl_mutex);
a3888f33 2153
80f09dfc
MG
2154 mlx5_rdma_enable_roce(esw->dev);
2155
eca8cc38
BW
2156 return 0;
2157
2158err_reps:
c1286050
JL
2159 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2160 mlx5_eswitch_disable_passing_vport_metadata(esw);
2161err_vport_metadata:
eca8cc38
BW
2162 esw_offloads_steering_cleanup(esw);
2163 return err;
2164}
2165
db7ff19e
EB
2166static int esw_offloads_stop(struct mlx5_eswitch *esw,
2167 struct netlink_ext_ack *extack)
c930a3ad 2168{
062f4bf4 2169 int err, err1;
c930a3ad 2170
f6455de0 2171 mlx5_eswitch_disable(esw);
062f4bf4 2172 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
6c419ba8 2173 if (err) {
8c98ee77 2174 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
062f4bf4 2175 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
8c98ee77
EB
2176 if (err1) {
2177 NL_SET_ERR_MSG_MOD(extack,
2178 "Failed setting eswitch back to offloads");
2179 }
6c419ba8 2180 }
c930a3ad
OG
2181
2182 return err;
2183}
2184
c9b99abc 2185void esw_offloads_cleanup(struct mlx5_eswitch *esw)
c930a3ad 2186{
80f09dfc 2187 mlx5_rdma_disable_roce(esw->dev);
ac004b83 2188 esw_offloads_devcom_cleanup(esw);
062f4bf4 2189 esw_offloads_unload_all_reps(esw);
c1286050
JL
2190 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2191 mlx5_eswitch_disable_passing_vport_metadata(esw);
eca8cc38 2192 esw_offloads_steering_cleanup(esw);
9a64144d 2193 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2194}
2195
ef78618b 2196static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2197{
2198 switch (mode) {
2199 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2200 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2201 break;
2202 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2203 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2204 break;
2205 default:
2206 return -EINVAL;
2207 }
2208
2209 return 0;
2210}
2211
ef78618b
OG
2212static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2213{
2214 switch (mlx5_mode) {
f6455de0 2215 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2216 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2217 break;
f6455de0 2218 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2219 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2220 break;
2221 default:
2222 return -EINVAL;
2223 }
2224
2225 return 0;
2226}
2227
bffaa916
RD
2228static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2229{
2230 switch (mode) {
2231 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2232 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2233 break;
2234 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2235 *mlx5_mode = MLX5_INLINE_MODE_L2;
2236 break;
2237 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2238 *mlx5_mode = MLX5_INLINE_MODE_IP;
2239 break;
2240 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2241 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2242 break;
2243 default:
2244 return -EINVAL;
2245 }
2246
2247 return 0;
2248}
2249
2250static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2251{
2252 switch (mlx5_mode) {
2253 case MLX5_INLINE_MODE_NONE:
2254 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2255 break;
2256 case MLX5_INLINE_MODE_L2:
2257 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2258 break;
2259 case MLX5_INLINE_MODE_IP:
2260 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2261 break;
2262 case MLX5_INLINE_MODE_TCP_UDP:
2263 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2264 break;
2265 default:
2266 return -EINVAL;
2267 }
2268
2269 return 0;
2270}
2271
9d1cef19 2272static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 2273{
9d1cef19 2274 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 2275
9d1cef19
OG
2276 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2277 return -EOPNOTSUPP;
c930a3ad 2278
733d3e54
OG
2279 if(!MLX5_ESWITCH_MANAGER(dev))
2280 return -EPERM;
c930a3ad 2281
f6455de0 2282 if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
c96692fb 2283 !mlx5_core_is_ecpf_esw_manager(dev))
c930a3ad
OG
2284 return -EOPNOTSUPP;
2285
9d1cef19
OG
2286 return 0;
2287}
2288
db7ff19e
EB
2289int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2290 struct netlink_ext_ack *extack)
9d1cef19
OG
2291{
2292 struct mlx5_core_dev *dev = devlink_priv(devlink);
2293 u16 cur_mlx5_mode, mlx5_mode = 0;
2294 int err;
2295
2296 err = mlx5_devlink_eswitch_check(devlink);
2297 if (err)
2298 return err;
2299
2300 cur_mlx5_mode = dev->priv.eswitch->mode;
2301
ef78618b 2302 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2303 return -EINVAL;
2304
2305 if (cur_mlx5_mode == mlx5_mode)
2306 return 0;
2307
2308 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 2309 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 2310 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 2311 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
2312 else
2313 return -EINVAL;
feae9087
OG
2314}
2315
2316int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2317{
9d1cef19
OG
2318 struct mlx5_core_dev *dev = devlink_priv(devlink);
2319 int err;
c930a3ad 2320
9d1cef19
OG
2321 err = mlx5_devlink_eswitch_check(devlink);
2322 if (err)
2323 return err;
c930a3ad 2324
ef78618b 2325 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 2326}
127ea380 2327
db7ff19e
EB
2328int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2329 struct netlink_ext_ack *extack)
bffaa916
RD
2330{
2331 struct mlx5_core_dev *dev = devlink_priv(devlink);
2332 struct mlx5_eswitch *esw = dev->priv.eswitch;
db68cc56 2333 int err, vport, num_vport;
bffaa916
RD
2334 u8 mlx5_mode;
2335
9d1cef19
OG
2336 err = mlx5_devlink_eswitch_check(devlink);
2337 if (err)
2338 return err;
bffaa916 2339
c415f704
OG
2340 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2341 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2342 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2343 return 0;
2344 /* fall through */
2345 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2346 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 2347 return -EOPNOTSUPP;
c415f704
OG
2348 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2349 break;
2350 }
bffaa916 2351
375f51e2 2352 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
2353 NL_SET_ERR_MSG_MOD(extack,
2354 "Can't set inline mode when flows are configured");
375f51e2
RD
2355 return -EOPNOTSUPP;
2356 }
2357
bffaa916
RD
2358 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2359 if (err)
2360 goto out;
2361
411ec9e0 2362 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2363 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2364 if (err) {
8c98ee77
EB
2365 NL_SET_ERR_MSG_MOD(extack,
2366 "Failed to set min inline on vport");
bffaa916
RD
2367 goto revert_inline_mode;
2368 }
2369 }
2370
2371 esw->offloads.inline_mode = mlx5_mode;
2372 return 0;
2373
2374revert_inline_mode:
db68cc56 2375 num_vport = --vport;
411ec9e0 2376 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
2377 mlx5_modify_nic_vport_min_inline(dev,
2378 vport,
2379 esw->offloads.inline_mode);
2380out:
2381 return err;
2382}
2383
2384int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2385{
2386 struct mlx5_core_dev *dev = devlink_priv(devlink);
2387 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2388 int err;
bffaa916 2389
9d1cef19
OG
2390 err = mlx5_devlink_eswitch_check(devlink);
2391 if (err)
2392 return err;
bffaa916 2393
bffaa916
RD
2394 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2395}
2396
062f4bf4 2397int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
bffaa916 2398{
c415f704 2399 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
2400 struct mlx5_core_dev *dev = esw->dev;
2401 int vport;
bffaa916
RD
2402
2403 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2404 return -EOPNOTSUPP;
2405
f6455de0 2406 if (esw->mode == MLX5_ESWITCH_NONE)
bffaa916
RD
2407 return -EOPNOTSUPP;
2408
c415f704
OG
2409 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2410 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2411 mlx5_mode = MLX5_INLINE_MODE_NONE;
2412 goto out;
2413 case MLX5_CAP_INLINE_MODE_L2:
2414 mlx5_mode = MLX5_INLINE_MODE_L2;
2415 goto out;
2416 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2417 goto query_vports;
2418 }
bffaa916 2419
c415f704 2420query_vports:
411ec9e0
BW
2421 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2422 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916 2423 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
411ec9e0 2424 if (prev_mlx5_mode != mlx5_mode)
bffaa916
RD
2425 return -EINVAL;
2426 prev_mlx5_mode = mlx5_mode;
2427 }
2428
c415f704 2429out:
bffaa916
RD
2430 *mode = mlx5_mode;
2431 return 0;
2432}
2433
98fdbea5
LR
2434int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2435 enum devlink_eswitch_encap_mode encap,
db7ff19e 2436 struct netlink_ext_ack *extack)
7768d197
RD
2437{
2438 struct mlx5_core_dev *dev = devlink_priv(devlink);
2439 struct mlx5_eswitch *esw = dev->priv.eswitch;
2440 int err;
2441
9d1cef19
OG
2442 err = mlx5_devlink_eswitch_check(devlink);
2443 if (err)
2444 return err;
7768d197
RD
2445
2446 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2447 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
2448 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2449 return -EOPNOTSUPP;
2450
2451 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2452 return -EOPNOTSUPP;
2453
f6455de0 2454 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197
RD
2455 esw->offloads.encap = encap;
2456 return 0;
2457 }
2458
2459 if (esw->offloads.encap == encap)
2460 return 0;
2461
2462 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
2463 NL_SET_ERR_MSG_MOD(extack,
2464 "Can't set encapsulation when flows are configured");
7768d197
RD
2465 return -EOPNOTSUPP;
2466 }
2467
e52c2802 2468 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2469
2470 esw->offloads.encap = encap;
e52c2802
PB
2471
2472 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2473
7768d197 2474 if (err) {
8c98ee77
EB
2475 NL_SET_ERR_MSG_MOD(extack,
2476 "Failed re-creating fast FDB table");
7768d197 2477 esw->offloads.encap = !encap;
e52c2802 2478 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2479 }
e52c2802 2480
7768d197
RD
2481 return err;
2482}
2483
98fdbea5
LR
2484int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2485 enum devlink_eswitch_encap_mode *encap)
7768d197
RD
2486{
2487 struct mlx5_core_dev *dev = devlink_priv(devlink);
2488 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2489 int err;
7768d197 2490
9d1cef19
OG
2491 err = mlx5_devlink_eswitch_check(devlink);
2492 if (err)
2493 return err;
7768d197
RD
2494
2495 *encap = esw->offloads.encap;
2496 return 0;
2497}
2498
f8e8fa02 2499void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 2500 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 2501 u8 rep_type)
127ea380 2502{
8693115a 2503 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
2504 struct mlx5_eswitch_rep *rep;
2505 int i;
9deb2241 2506
8693115a 2507 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 2508 mlx5_esw_for_all_reps(esw, i, rep) {
8693115a
PP
2509 rep_data = &rep->rep_data[rep_type];
2510 atomic_set(&rep_data->state, REP_REGISTERED);
f8e8fa02 2511 }
127ea380 2512}
f8e8fa02 2513EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2514
f8e8fa02 2515void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2516{
cb67b832 2517 struct mlx5_eswitch_rep *rep;
f8e8fa02 2518 int i;
cb67b832 2519
f6455de0 2520 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 2521 __unload_reps_all_vport(esw, rep_type);
127ea380 2522
f8e8fa02 2523 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 2524 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 2525}
f8e8fa02 2526EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2527
a4b97ab4 2528void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2529{
726293f1
HHZ
2530 struct mlx5_eswitch_rep *rep;
2531
879c8f84 2532 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 2533 return rep->rep_data[rep_type].priv;
726293f1 2534}
22215908
MB
2535
2536void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 2537 u16 vport,
22215908
MB
2538 u8 rep_type)
2539{
22215908
MB
2540 struct mlx5_eswitch_rep *rep;
2541
879c8f84 2542 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2543
8693115a
PP
2544 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2545 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2546 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
2547 return NULL;
2548}
57cbd893 2549EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2550
2551void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2552{
879c8f84 2553 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2554}
57cbd893
MB
2555EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2556
2557struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 2558 u16 vport)
57cbd893 2559{
879c8f84 2560 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2561}
2562EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
2563
2564bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2565{
2566 return vport_num >= MLX5_VPORT_FIRST_VF &&
2567 vport_num <= esw->dev->priv.sriov.max_vfs;
2568}
7445cfb1
JL
2569
2570bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2571{
2572 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2573}
2574EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2575
2576u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
2577 u16 vport_num)
2578{
2579 return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
2580}
2581EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);