]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5e: ethtool, Fix analysis of speed setting
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
80f09dfc 40#include "rdma.h"
e52c2802
PB
41#include "en.h"
42#include "fs_core.h"
ac004b83 43#include "lib/devcom.h"
a3888f33 44#include "lib/eq.h"
69697b6e 45
cd7e4186
BW
46/* There are two match-all miss flows, one for unicast dst mac and
47 * one for multicast.
48 */
49#define MLX5_ESW_MISS_FLOWS (2)
50
e52c2802
PB
51#define fdb_prio_table(esw, chain, prio, level) \
52 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
53
c9b99abc
BW
54#define UPLINK_REP_INDEX 0
55
879c8f84
BW
56static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
57 u16 vport_num)
58{
02f3afd9 59 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
60
61 WARN_ON(idx > esw->total_vports - 1);
62 return &esw->offloads.vport_reps[idx];
63}
64
e52c2802
PB
65static struct mlx5_flow_table *
66esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
67static void
68esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
69
70bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
71{
72 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
73}
74
75u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
76{
77 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
2cf2954b 78 return FDB_TC_MAX_CHAIN;
e52c2802
PB
79
80 return 0;
81}
82
83u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
84{
85 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
2cf2954b 86 return FDB_TC_MAX_PRIO;
e52c2802 87
bf07aa73 88 return 1;
e52c2802
PB
89}
90
c01cfd0f
JL
91static void
92mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
93 struct mlx5_flow_spec *spec,
94 struct mlx5_esw_flow_attr *attr)
95{
96 void *misc2;
97 void *misc;
98
99 /* Use metadata matching because vport is not represented by single
100 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
101 */
102 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
103 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
104 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
105 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
106 attr->in_rep->vport));
107
108 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
109 MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
110
111 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
112 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
113 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
114 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
115 } else {
116 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
117 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
118
119 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
120 MLX5_SET(fte_match_set_misc, misc,
121 source_eswitch_owner_vhca_id,
122 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
123
124 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
125 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
126 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
127 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
128 source_eswitch_owner_vhca_id);
129
130 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
131 }
132
133 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
134 attr->in_rep->vport == MLX5_VPORT_UPLINK)
135 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
136}
137
74491de9 138struct mlx5_flow_handle *
3d80d1a2
OG
139mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
140 struct mlx5_flow_spec *spec,
776b12b6 141 struct mlx5_esw_flow_attr *attr)
3d80d1a2 142{
592d3651 143 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 144 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 145 bool split = !!(attr->split_count);
74491de9 146 struct mlx5_flow_handle *rule;
e52c2802 147 struct mlx5_flow_table *fdb;
592d3651 148 int j, i = 0;
3d80d1a2 149
f6455de0 150 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
151 return ERR_PTR(-EOPNOTSUPP);
152
6acfbf38
OG
153 flow_act.action = attr->action;
154 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 155 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
156 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
157 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
158 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
159 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
160 flow_act.vlan[0].vid = attr->vlan_vid[0];
161 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
162 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
163 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
164 flow_act.vlan[1].vid = attr->vlan_vid[1];
165 flow_act.vlan[1].prio = attr->vlan_prio[1];
166 }
6acfbf38 167 }
776b12b6 168
66958ed9 169 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e52c2802
PB
170 if (attr->dest_chain) {
171 struct mlx5_flow_table *ft;
172
173 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
174 if (IS_ERR(ft)) {
175 rule = ERR_CAST(ft);
176 goto err_create_goto_table;
177 }
178
179 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
180 dest[i].ft = ft;
592d3651 181 i++;
e52c2802 182 } else {
e85e02ba 183 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 184 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 185 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 186 dest[i].vport.vhca_id =
df65a573 187 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
188 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
189 dest[i].vport.flags |=
190 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
191 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
192 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2b688ea5 193 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
a18e879d 194 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5
MG
195 dest[i].vport.pkt_reformat =
196 attr->dests[j].pkt_reformat;
f493f155 197 }
e52c2802
PB
198 i++;
199 }
56e858df 200 }
e37a79e5 201 }
66958ed9 202 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 203 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 204 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 205 i++;
3d80d1a2
OG
206 }
207
c01cfd0f 208 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
3d80d1a2 209
93b3586e 210 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 211 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
212 if (attr->inner_match_level != MLX5_MATCH_NONE)
213 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 214
aa24670e 215 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 216 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 217
e85e02ba 218 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
219 if (IS_ERR(fdb)) {
220 rule = ERR_CAST(fdb);
221 goto err_esw_get;
222 }
223
10caabda
OS
224 if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
225 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
226 &flow_act, dest, i);
227 else
228 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 229 if (IS_ERR(rule))
e52c2802 230 goto err_add_rule;
375f51e2 231 else
525e84be 232 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 233
e52c2802
PB
234 return rule;
235
236err_add_rule:
e85e02ba 237 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
238err_esw_get:
239 if (attr->dest_chain)
240 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
241err_create_goto_table:
aa0cbbae 242 return rule;
3d80d1a2
OG
243}
244
e4ad91f2
CM
245struct mlx5_flow_handle *
246mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
247 struct mlx5_flow_spec *spec,
248 struct mlx5_esw_flow_attr *attr)
249{
250 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 251 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
252 struct mlx5_flow_table *fast_fdb;
253 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 254 struct mlx5_flow_handle *rule;
e4ad91f2
CM
255 int i;
256
e52c2802
PB
257 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
258 if (IS_ERR(fast_fdb)) {
259 rule = ERR_CAST(fast_fdb);
260 goto err_get_fast;
261 }
262
263 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
264 if (IS_ERR(fwd_fdb)) {
265 rule = ERR_CAST(fwd_fdb);
266 goto err_get_fwd;
267 }
268
e4ad91f2 269 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 270 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 271 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 272 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 273 dest[i].vport.vhca_id =
df65a573 274 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
275 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
276 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
277 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
278 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5 279 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
1cc26d74 280 }
e4ad91f2
CM
281 }
282 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 283 dest[i].ft = fwd_fdb,
e4ad91f2
CM
284 i++;
285
c01cfd0f 286 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
e4ad91f2 287
93b3586e 288 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 289 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 290
e52c2802 291 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 292
e52c2802
PB
293 if (IS_ERR(rule))
294 goto add_err;
e4ad91f2 295
525e84be 296 atomic64_inc(&esw->offloads.num_flows);
e52c2802
PB
297
298 return rule;
299add_err:
300 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
301err_get_fwd:
302 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
303err_get_fast:
e4ad91f2
CM
304 return rule;
305}
306
e52c2802
PB
307static void
308__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
309 struct mlx5_flow_handle *rule,
310 struct mlx5_esw_flow_attr *attr,
311 bool fwd_rule)
312{
e85e02ba 313 bool split = (attr->split_count > 0);
10caabda 314 int i;
e52c2802
PB
315
316 mlx5_del_flow_rules(rule);
10caabda
OS
317
318 /* unref the term table */
319 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
320 if (attr->dests[i].termtbl)
321 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
322 }
323
525e84be 324 atomic64_dec(&esw->offloads.num_flows);
e52c2802
PB
325
326 if (fwd_rule) {
327 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
328 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
329 } else {
e85e02ba 330 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
331 if (attr->dest_chain)
332 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
333 }
334}
335
d85cdccb
OG
336void
337mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
338 struct mlx5_flow_handle *rule,
339 struct mlx5_esw_flow_attr *attr)
340{
e52c2802 341 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
342}
343
48265006
OG
344void
345mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
346 struct mlx5_flow_handle *rule,
347 struct mlx5_esw_flow_attr *attr)
348{
e52c2802 349 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
350}
351
f5f82476
OG
352static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
353{
354 struct mlx5_eswitch_rep *rep;
411ec9e0 355 int i, err = 0;
f5f82476
OG
356
357 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 358 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 359 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
360 continue;
361
362 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
363 if (err)
364 goto out;
365 }
366
367out:
368 return err;
369}
370
371static struct mlx5_eswitch_rep *
372esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
373{
374 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
375
376 in_rep = attr->in_rep;
df65a573 377 out_rep = attr->dests[0].rep;
f5f82476
OG
378
379 if (push)
380 vport = in_rep;
381 else if (pop)
382 vport = out_rep;
383 else
384 vport = in_rep;
385
386 return vport;
387}
388
389static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
390 bool push, bool pop, bool fwd)
391{
392 struct mlx5_eswitch_rep *in_rep, *out_rep;
393
394 if ((push || pop) && !fwd)
395 goto out_notsupp;
396
397 in_rep = attr->in_rep;
df65a573 398 out_rep = attr->dests[0].rep;
f5f82476 399
b05af6aa 400 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
401 goto out_notsupp;
402
b05af6aa 403 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
404 goto out_notsupp;
405
406 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
407 if (!push && !pop && fwd)
b05af6aa 408 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
409 goto out_notsupp;
410
411 /* protects against (1) setting rules with different vlans to push and
412 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
413 */
1482bd3d 414 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
415 goto out_notsupp;
416
417 return 0;
418
419out_notsupp:
9eb78923 420 return -EOPNOTSUPP;
f5f82476
OG
421}
422
423int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
424 struct mlx5_esw_flow_attr *attr)
425{
426 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
427 struct mlx5_eswitch_rep *vport = NULL;
428 bool push, pop, fwd;
429 int err = 0;
430
6acfbf38 431 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 432 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
433 return 0;
434
f5f82476
OG
435 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
436 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
437 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
438 !attr->dest_chain);
f5f82476 439
0e18134f
VB
440 mutex_lock(&esw->state_lock);
441
f5f82476
OG
442 err = esw_add_vlan_action_check(attr, push, pop, fwd);
443 if (err)
0e18134f 444 goto unlock;
f5f82476
OG
445
446 attr->vlan_handled = false;
447
448 vport = esw_vlan_action_get_vport(attr, push, pop);
449
450 if (!push && !pop && fwd) {
451 /* tracks VF --> wire rules without vlan push action */
b05af6aa 452 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476
OG
453 vport->vlan_refcount++;
454 attr->vlan_handled = true;
455 }
456
0e18134f 457 goto unlock;
f5f82476
OG
458 }
459
460 if (!push && !pop)
0e18134f 461 goto unlock;
f5f82476
OG
462
463 if (!(offloads->vlan_push_pop_refcount)) {
464 /* it's the 1st vlan rule, apply global vlan pop policy */
465 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
466 if (err)
467 goto out;
468 }
469 offloads->vlan_push_pop_refcount++;
470
471 if (push) {
472 if (vport->vlan_refcount)
473 goto skip_set_push;
474
1482bd3d 475 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
476 SET_VLAN_INSERT | SET_VLAN_STRIP);
477 if (err)
478 goto out;
1482bd3d 479 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
480skip_set_push:
481 vport->vlan_refcount++;
482 }
483out:
484 if (!err)
485 attr->vlan_handled = true;
0e18134f
VB
486unlock:
487 mutex_unlock(&esw->state_lock);
f5f82476
OG
488 return err;
489}
490
491int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
492 struct mlx5_esw_flow_attr *attr)
493{
494 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
495 struct mlx5_eswitch_rep *vport = NULL;
496 bool push, pop, fwd;
497 int err = 0;
498
6acfbf38 499 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 500 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
501 return 0;
502
f5f82476
OG
503 if (!attr->vlan_handled)
504 return 0;
505
506 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
507 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
508 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
509
0e18134f
VB
510 mutex_lock(&esw->state_lock);
511
f5f82476
OG
512 vport = esw_vlan_action_get_vport(attr, push, pop);
513
514 if (!push && !pop && fwd) {
515 /* tracks VF --> wire rules without vlan push action */
b05af6aa 516 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
517 vport->vlan_refcount--;
518
0e18134f 519 goto out;
f5f82476
OG
520 }
521
522 if (push) {
523 vport->vlan_refcount--;
524 if (vport->vlan_refcount)
525 goto skip_unset_push;
526
527 vport->vlan = 0;
528 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
529 0, 0, SET_VLAN_STRIP);
530 if (err)
531 goto out;
532 }
533
534skip_unset_push:
535 offloads->vlan_push_pop_refcount--;
536 if (offloads->vlan_push_pop_refcount)
0e18134f 537 goto out;
f5f82476
OG
538
539 /* no more vlan rules, stop global vlan pop policy */
540 err = esw_set_global_vlan_pop(esw, 0);
541
542out:
0e18134f 543 mutex_unlock(&esw->state_lock);
f5f82476
OG
544 return err;
545}
546
f7a68945 547struct mlx5_flow_handle *
02f3afd9
PP
548mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
549 u32 sqn)
ab22be9b 550{
66958ed9 551 struct mlx5_flow_act flow_act = {0};
4c5009c5 552 struct mlx5_flow_destination dest = {};
74491de9 553 struct mlx5_flow_handle *flow_rule;
c5bb1730 554 struct mlx5_flow_spec *spec;
ab22be9b
OG
555 void *misc;
556
1b9a07ee 557 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 558 if (!spec) {
ab22be9b
OG
559 flow_rule = ERR_PTR(-ENOMEM);
560 goto out;
561 }
562
c5bb1730 563 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 564 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
565 /* source vport is the esw manager */
566 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 567
c5bb1730 568 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
569 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
570 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
571
c5bb1730 572 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 573 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 574 dest.vport.num = vport;
66958ed9 575 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 576
52fff327 577 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 578 &flow_act, &dest, 1);
ab22be9b
OG
579 if (IS_ERR(flow_rule))
580 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
581out:
c5bb1730 582 kvfree(spec);
ab22be9b
OG
583 return flow_rule;
584}
57cbd893 585EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 586
159fe639
MB
587void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
588{
589 mlx5_del_flow_rules(rule);
590}
591
332bd3a5 592static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
593{
594 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
595 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
596 u8 fdb_to_vport_reg_c_id;
597 int err;
598
332bd3a5
PP
599 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
600 return 0;
c1286050 601
238302fa 602 err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
c1286050
JL
603 out, sizeof(out));
604 if (err)
605 return err;
606
607 fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
608 esw_vport_context.fdb_to_vport_reg_c_id);
609
332bd3a5
PP
610 if (enable)
611 fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
612 else
613 fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
c1286050
JL
614
615 MLX5_SET(modify_esw_vport_context_in, in,
616 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
617
618 MLX5_SET(modify_esw_vport_context_in, in,
619 field_select.fdb_to_vport_reg_c_id, 1);
620
238302fa 621 return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false,
c1286050
JL
622 in, sizeof(in));
623}
624
a5641cb5
JL
625static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
626 struct mlx5_core_dev *peer_dev,
ac004b83
RD
627 struct mlx5_flow_spec *spec,
628 struct mlx5_flow_destination *dest)
629{
a5641cb5 630 void *misc;
ac004b83 631
a5641cb5
JL
632 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
633 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
634 misc_parameters_2);
635 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
ac004b83 636
a5641cb5
JL
637 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
638 } else {
639 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
640 misc_parameters);
ac004b83 641
a5641cb5
JL
642 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
643 MLX5_CAP_GEN(peer_dev, vhca_id));
644
645 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
646
647 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
648 misc_parameters);
649 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
650 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
651 source_eswitch_owner_vhca_id);
652 }
ac004b83
RD
653
654 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 655 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 656 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 657 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
658}
659
a5641cb5
JL
660static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
661 struct mlx5_eswitch *peer_esw,
662 struct mlx5_flow_spec *spec,
663 u16 vport)
664{
665 void *misc;
666
667 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
668 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
669 misc_parameters_2);
670 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
671 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
672 vport));
673 } else {
674 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
675 misc_parameters);
676 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
677 }
678}
679
ac004b83
RD
680static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
681 struct mlx5_core_dev *peer_dev)
682{
683 struct mlx5_flow_destination dest = {};
684 struct mlx5_flow_act flow_act = {0};
685 struct mlx5_flow_handle **flows;
686 struct mlx5_flow_handle *flow;
687 struct mlx5_flow_spec *spec;
688 /* total vports is the same for both e-switches */
689 int nvports = esw->total_vports;
690 void *misc;
691 int err, i;
692
693 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
694 if (!spec)
695 return -ENOMEM;
696
a5641cb5 697 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
698
699 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
700 if (!flows) {
701 err = -ENOMEM;
702 goto alloc_flows_err;
703 }
704
705 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
706 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
707 misc_parameters);
708
81cd229c 709 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
710 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
711 spec, MLX5_VPORT_PF);
712
81cd229c
BW
713 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
714 spec, &flow_act, &dest, 1);
715 if (IS_ERR(flow)) {
716 err = PTR_ERR(flow);
717 goto add_pf_flow_err;
718 }
719 flows[MLX5_VPORT_PF] = flow;
720 }
721
722 if (mlx5_ecpf_vport_exists(esw->dev)) {
723 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
724 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
725 spec, &flow_act, &dest, 1);
726 if (IS_ERR(flow)) {
727 err = PTR_ERR(flow);
728 goto add_ecpf_flow_err;
729 }
730 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
731 }
732
786ef904 733 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
734 esw_set_peer_miss_rule_source_port(esw,
735 peer_dev->priv.eswitch,
736 spec, i);
737
ac004b83
RD
738 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
739 spec, &flow_act, &dest, 1);
740 if (IS_ERR(flow)) {
741 err = PTR_ERR(flow);
81cd229c 742 goto add_vf_flow_err;
ac004b83
RD
743 }
744 flows[i] = flow;
745 }
746
747 esw->fdb_table.offloads.peer_miss_rules = flows;
748
749 kvfree(spec);
750 return 0;
751
81cd229c 752add_vf_flow_err:
879c8f84 753 nvports = --i;
786ef904 754 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 755 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
756
757 if (mlx5_ecpf_vport_exists(esw->dev))
758 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
759add_ecpf_flow_err:
760 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
761 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
762add_pf_flow_err:
763 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
764 kvfree(flows);
765alloc_flows_err:
766 kvfree(spec);
767 return err;
768}
769
770static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
771{
772 struct mlx5_flow_handle **flows;
773 int i;
774
775 flows = esw->fdb_table.offloads.peer_miss_rules;
776
786ef904
PP
777 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
778 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
779 mlx5_del_flow_rules(flows[i]);
780
81cd229c
BW
781 if (mlx5_ecpf_vport_exists(esw->dev))
782 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
783
784 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
785 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
786
ac004b83
RD
787 kvfree(flows);
788}
789
3aa33572
OG
790static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
791{
66958ed9 792 struct mlx5_flow_act flow_act = {0};
4c5009c5 793 struct mlx5_flow_destination dest = {};
74491de9 794 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 795 struct mlx5_flow_spec *spec;
f80be543
MB
796 void *headers_c;
797 void *headers_v;
3aa33572 798 int err = 0;
f80be543
MB
799 u8 *dmac_c;
800 u8 *dmac_v;
3aa33572 801
1b9a07ee 802 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 803 if (!spec) {
3aa33572
OG
804 err = -ENOMEM;
805 goto out;
806 }
807
f80be543
MB
808 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
809 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
810 outer_headers);
811 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
812 outer_headers.dmac_47_16);
813 dmac_c[0] = 0x01;
814
3aa33572 815 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 816 dest.vport.num = esw->manager_vport;
66958ed9 817 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 818
52fff327 819 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 820 &flow_act, &dest, 1);
3aa33572
OG
821 if (IS_ERR(flow_rule)) {
822 err = PTR_ERR(flow_rule);
f80be543 823 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
824 goto out;
825 }
826
f80be543
MB
827 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
828
829 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
830 outer_headers);
831 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
832 outer_headers.dmac_47_16);
833 dmac_v[0] = 0x01;
52fff327 834 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
835 &flow_act, &dest, 1);
836 if (IS_ERR(flow_rule)) {
837 err = PTR_ERR(flow_rule);
838 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
839 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
840 goto out;
841 }
842
843 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
844
3aa33572 845out:
c5bb1730 846 kvfree(spec);
3aa33572
OG
847 return err;
848}
849
1033665e 850#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 851
e52c2802
PB
852/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
853 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
854 * for each flow table pool. We can allocate up to 16M of each pool,
855 * and we keep track of how much we used via put/get_sz_to_pool.
856 * Firmware doesn't report any of this for now.
857 * ESW_POOL is expected to be sorted from large to small
858 */
859#define ESW_SIZE (16 * 1024 * 1024)
860const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
861 64 * 1024, 4 * 1024 };
862
863static int
864get_sz_from_pool(struct mlx5_eswitch *esw)
865{
866 int sz = 0, i;
867
868 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
869 if (esw->fdb_table.offloads.fdb_left[i]) {
870 --esw->fdb_table.offloads.fdb_left[i];
871 sz = ESW_POOLS[i];
872 break;
873 }
874 }
875
876 return sz;
877}
878
879static void
880put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
881{
882 int i;
883
884 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
885 if (sz >= ESW_POOLS[i]) {
886 ++esw->fdb_table.offloads.fdb_left[i];
887 break;
888 }
889 }
890}
891
892static struct mlx5_flow_table *
893create_next_size_table(struct mlx5_eswitch *esw,
894 struct mlx5_flow_namespace *ns,
895 u16 table_prio,
896 int level,
897 u32 flags)
898{
899 struct mlx5_flow_table *fdb;
900 int sz;
901
902 sz = get_sz_from_pool(esw);
903 if (!sz)
904 return ERR_PTR(-ENOSPC);
905
906 fdb = mlx5_create_auto_grouped_flow_table(ns,
907 table_prio,
908 sz,
909 ESW_OFFLOADS_NUM_GROUPS,
910 level,
911 flags);
912 if (IS_ERR(fdb)) {
913 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
914 (int)PTR_ERR(fdb), table_prio, level, sz);
915 put_sz_to_pool(esw, sz);
916 }
917
918 return fdb;
919}
920
921static struct mlx5_flow_table *
922esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
69697b6e 923{
69697b6e 924 struct mlx5_core_dev *dev = esw->dev;
69697b6e 925 struct mlx5_flow_table *fdb = NULL;
e52c2802
PB
926 struct mlx5_flow_namespace *ns;
927 int table_prio, l = 0;
bbd00f7e 928 u32 flags = 0;
69697b6e 929
2cf2954b 930 if (chain == FDB_TC_SLOW_PATH_CHAIN)
c92a0b94
PB
931 return esw->fdb_table.offloads.slow_fdb;
932
e52c2802 933 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
264d7bf3 934
e52c2802
PB
935 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
936 if (fdb) {
937 /* take ref on earlier levels as well */
938 while (level >= 0)
939 fdb_prio_table(esw, chain, prio, level--).num_rules++;
940 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
941 return fdb;
942 }
69697b6e 943
e52c2802
PB
944 ns = mlx5_get_fdb_sub_ns(dev, chain);
945 if (!ns) {
946 esw_warn(dev, "Failed to get FDB sub namespace\n");
947 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
948 return ERR_PTR(-EOPNOTSUPP);
949 }
a842dd04 950
7768d197 951 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
60786f09 952 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
61444b45 953 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
bbd00f7e 954
34b13cb3 955 table_prio = prio - 1;
69697b6e 956
e52c2802
PB
957 /* create earlier levels for correct fs_core lookup when
958 * connecting tables
959 */
960 for (l = 0; l <= level; l++) {
961 if (fdb_prio_table(esw, chain, prio, l).fdb) {
962 fdb_prio_table(esw, chain, prio, l).num_rules++;
963 continue;
964 }
a842dd04 965
e52c2802
PB
966 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
967 if (IS_ERR(fdb)) {
968 l--;
969 goto err_create_fdb;
970 }
971
972 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
973 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
a842dd04 974 }
a842dd04 975
e52c2802
PB
976 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
977 return fdb;
a842dd04 978
e52c2802
PB
979err_create_fdb:
980 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
981 if (l >= 0)
982 esw_put_prio_table(esw, chain, prio, l);
983
984 return fdb;
1967ce6e
OG
985}
986
e52c2802
PB
987static void
988esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
1967ce6e 989{
e52c2802
PB
990 int l;
991
2cf2954b 992 if (chain == FDB_TC_SLOW_PATH_CHAIN)
c92a0b94
PB
993 return;
994
e52c2802
PB
995 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
996
997 for (l = level; l >= 0; l--) {
998 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
999 continue;
1000
1001 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
1002 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
1003 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
1004 }
1005
1006 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
1007}
1008
1009static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
1010{
1011 /* If lazy creation isn't supported, deref the fast path tables */
1012 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
1013 esw_put_prio_table(esw, 0, 1, 1);
1014 esw_put_prio_table(esw, 0, 1, 0);
1015 }
1967ce6e
OG
1016}
1017
1018#define MAX_PF_SQ 256
cd3d07e7 1019#define MAX_SQ_NVPORTS 32
1967ce6e 1020
a5641cb5
JL
1021static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1022 u32 *flow_group_in)
1023{
1024 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1025 flow_group_in,
1026 match_criteria);
1027
1028 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1029 MLX5_SET(create_flow_group_in, flow_group_in,
1030 match_criteria_enable,
1031 MLX5_MATCH_MISC_PARAMETERS_2);
1032
1033 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1034 misc_parameters_2.metadata_reg_c_0);
1035 } else {
1036 MLX5_SET(create_flow_group_in, flow_group_in,
1037 match_criteria_enable,
1038 MLX5_MATCH_MISC_PARAMETERS);
1039
1040 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1041 misc_parameters.source_port);
1042 }
1043}
1044
1967ce6e
OG
1045static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1046{
1047 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1048 struct mlx5_flow_table_attr ft_attr = {};
1049 struct mlx5_core_dev *dev = esw->dev;
e52c2802 1050 u32 *flow_group_in, max_flow_counter;
1967ce6e
OG
1051 struct mlx5_flow_namespace *root_ns;
1052 struct mlx5_flow_table *fdb = NULL;
e52c2802 1053 int table_size, ix, err = 0, i;
1967ce6e 1054 struct mlx5_flow_group *g;
e52c2802 1055 u32 flags = 0, fdb_max;
1967ce6e 1056 void *match_criteria;
f80be543 1057 u8 *dmac;
1967ce6e
OG
1058
1059 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 1060 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1061 if (!flow_group_in)
1062 return -ENOMEM;
1063
1064 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1065 if (!root_ns) {
1066 esw_warn(dev, "Failed to get FDB flow namespace\n");
1067 err = -EOPNOTSUPP;
1068 goto ns_err;
1069 }
8463daf1
MG
1070 esw->fdb_table.offloads.ns = root_ns;
1071 err = mlx5_flow_namespace_set_mode(root_ns,
1072 esw->dev->priv.steering->mode);
1073 if (err) {
1074 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1075 goto ns_err;
1076 }
1967ce6e 1077
e52c2802
PB
1078 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
1079 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
1080 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1081
f382b0df 1082 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n",
e52c2802
PB
1083 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
1084 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
1085 fdb_max);
1086
1087 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
1088 esw->fdb_table.offloads.fdb_left[i] =
1089 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1967ce6e 1090
cd7e4186
BW
1091 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1092 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 1093
e52c2802
PB
1094 /* create the slow path fdb with encap set, so further table instances
1095 * can be created at run time while VFs are probed if the FW allows that.
1096 */
1097 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1098 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1099 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1100
1101 ft_attr.flags = flags;
b3ba5149
ES
1102 ft_attr.max_fte = table_size;
1103 ft_attr.prio = FDB_SLOW_PATH;
1104
1105 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1106 if (IS_ERR(fdb)) {
1107 err = PTR_ERR(fdb);
1108 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1109 goto slow_fdb_err;
1110 }
52fff327 1111 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1112
e52c2802
PB
1113 /* If lazy creation isn't supported, open the fast path tables now */
1114 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
1115 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1116 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1117 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
1118 esw_get_prio_table(esw, 0, 1, 0);
1119 esw_get_prio_table(esw, 0, 1, 1);
1120 } else {
1121 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
1122 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1123 }
1124
69697b6e 1125 /* create send-to-vport group */
69697b6e
OG
1126 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1127 MLX5_MATCH_MISC_PARAMETERS);
1128
1129 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1130
1131 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1132 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1133
cd3d07e7 1134 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1135 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1136 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1137
1138 g = mlx5_create_flow_group(fdb, flow_group_in);
1139 if (IS_ERR(g)) {
1140 err = PTR_ERR(g);
1141 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1142 goto send_vport_err;
1143 }
1144 esw->fdb_table.offloads.send_to_vport_grp = g;
1145
ac004b83
RD
1146 /* create peer esw miss group */
1147 memset(flow_group_in, 0, inlen);
ac004b83 1148
a5641cb5
JL
1149 esw_set_flow_group_source_port(esw, flow_group_in);
1150
1151 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1152 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1153 flow_group_in,
1154 match_criteria);
ac004b83 1155
a5641cb5
JL
1156 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1157 misc_parameters.source_eswitch_owner_vhca_id);
1158
1159 MLX5_SET(create_flow_group_in, flow_group_in,
1160 source_eswitch_owner_vhca_id_valid, 1);
1161 }
ac004b83 1162
ac004b83
RD
1163 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1164 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1165 ix + esw->total_vports - 1);
1166 ix += esw->total_vports;
1167
1168 g = mlx5_create_flow_group(fdb, flow_group_in);
1169 if (IS_ERR(g)) {
1170 err = PTR_ERR(g);
1171 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1172 goto peer_miss_err;
1173 }
1174 esw->fdb_table.offloads.peer_miss_grp = g;
1175
69697b6e
OG
1176 /* create miss group */
1177 memset(flow_group_in, 0, inlen);
f80be543
MB
1178 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1179 MLX5_MATCH_OUTER_HEADERS);
1180 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1181 match_criteria);
1182 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1183 outer_headers.dmac_47_16);
1184 dmac[0] = 0x01;
69697b6e
OG
1185
1186 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1187 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1188 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1189
1190 g = mlx5_create_flow_group(fdb, flow_group_in);
1191 if (IS_ERR(g)) {
1192 err = PTR_ERR(g);
1193 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1194 goto miss_err;
1195 }
1196 esw->fdb_table.offloads.miss_grp = g;
1197
3aa33572
OG
1198 err = esw_add_fdb_miss_rule(esw);
1199 if (err)
1200 goto miss_rule_err;
1201
e52c2802 1202 esw->nvports = nvports;
c88a026e 1203 kvfree(flow_group_in);
69697b6e
OG
1204 return 0;
1205
3aa33572
OG
1206miss_rule_err:
1207 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1208miss_err:
ac004b83
RD
1209 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1210peer_miss_err:
69697b6e
OG
1211 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1212send_vport_err:
e52c2802 1213 esw_destroy_offloads_fast_fdb_tables(esw);
52fff327 1214 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1215slow_fdb_err:
8463daf1
MG
1216 /* Holds true only as long as DMFS is the default */
1217 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1218ns_err:
1219 kvfree(flow_group_in);
1220 return err;
1221}
1222
1967ce6e 1223static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1224{
e52c2802 1225 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1226 return;
1227
1967ce6e 1228 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1229 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1230 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1231 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1232 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1233 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1234
52fff327 1235 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
e52c2802 1236 esw_destroy_offloads_fast_fdb_tables(esw);
8463daf1
MG
1237 /* Holds true only as long as DMFS is the default */
1238 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1239 MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e 1240}
c116c6ee 1241
cd7e4186 1242static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1243{
b3ba5149 1244 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1245 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1246 struct mlx5_flow_table *ft_offloads;
1247 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1248 int err = 0;
1249
1250 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1251 if (!ns) {
1252 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1253 return -EOPNOTSUPP;
c116c6ee
OG
1254 }
1255
cd7e4186 1256 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
b3ba5149
ES
1257
1258 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1259 if (IS_ERR(ft_offloads)) {
1260 err = PTR_ERR(ft_offloads);
1261 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1262 return err;
1263 }
1264
1265 esw->offloads.ft_offloads = ft_offloads;
1266 return 0;
1267}
1268
1269static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1270{
1271 struct mlx5_esw_offload *offloads = &esw->offloads;
1272
1273 mlx5_destroy_flow_table(offloads->ft_offloads);
1274}
fed9ce22 1275
cd7e4186 1276static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1277{
1278 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1279 struct mlx5_flow_group *g;
fed9ce22 1280 u32 *flow_group_in;
fed9ce22 1281 int err = 0;
fed9ce22 1282
cd7e4186 1283 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1284 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1285 if (!flow_group_in)
1286 return -ENOMEM;
1287
1288 /* create vport rx group */
a5641cb5 1289 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1290
1291 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1292 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1293
1294 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1295
1296 if (IS_ERR(g)) {
1297 err = PTR_ERR(g);
1298 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1299 goto out;
1300 }
1301
1302 esw->offloads.vport_rx_group = g;
1303out:
e574978a 1304 kvfree(flow_group_in);
fed9ce22
OG
1305 return err;
1306}
1307
1308static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1309{
1310 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1311}
1312
74491de9 1313struct mlx5_flow_handle *
02f3afd9 1314mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1315 struct mlx5_flow_destination *dest)
fed9ce22 1316{
66958ed9 1317 struct mlx5_flow_act flow_act = {0};
74491de9 1318 struct mlx5_flow_handle *flow_rule;
c5bb1730 1319 struct mlx5_flow_spec *spec;
fed9ce22
OG
1320 void *misc;
1321
1b9a07ee 1322 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1323 if (!spec) {
fed9ce22
OG
1324 flow_rule = ERR_PTR(-ENOMEM);
1325 goto out;
1326 }
1327
a5641cb5
JL
1328 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1329 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1330 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1331 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1332
a5641cb5
JL
1333 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1334 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
fed9ce22 1335
a5641cb5
JL
1336 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1337 } else {
1338 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1339 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1340
1341 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1342 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1343
1344 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1345 }
fed9ce22 1346
66958ed9 1347 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1348 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1349 &flow_act, dest, 1);
fed9ce22
OG
1350 if (IS_ERR(flow_rule)) {
1351 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1352 goto out;
1353 }
1354
1355out:
c5bb1730 1356 kvfree(spec);
fed9ce22
OG
1357 return flow_rule;
1358}
feae9087 1359
db7ff19e
EB
1360static int esw_offloads_start(struct mlx5_eswitch *esw,
1361 struct netlink_ext_ack *extack)
c930a3ad 1362{
062f4bf4 1363 int err, err1;
c930a3ad 1364
f6455de0 1365 if (esw->mode != MLX5_ESWITCH_LEGACY &&
c96692fb 1366 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1367 NL_SET_ERR_MSG_MOD(extack,
1368 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1369 return -EINVAL;
1370 }
1371
556b9d16 1372 mlx5_eswitch_disable(esw, false);
062f4bf4
BW
1373 mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
1374 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
6c419ba8 1375 if (err) {
8c98ee77
EB
1376 NL_SET_ERR_MSG_MOD(extack,
1377 "Failed setting eswitch to offloads");
062f4bf4 1378 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
8c98ee77
EB
1379 if (err1) {
1380 NL_SET_ERR_MSG_MOD(extack,
1381 "Failed setting eswitch back to legacy");
1382 }
6c419ba8 1383 }
bffaa916
RD
1384 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1385 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
1386 &esw->offloads.inline_mode)) {
1387 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1388 NL_SET_ERR_MSG_MOD(extack,
1389 "Inline mode is different between vports");
bffaa916
RD
1390 }
1391 }
c930a3ad
OG
1392 return err;
1393}
1394
e8d31c4d
MB
1395void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1396{
1397 kfree(esw->offloads.vport_reps);
1398}
1399
1400int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1401{
2752b823 1402 int total_vports = esw->total_vports;
e8d31c4d 1403 struct mlx5_eswitch_rep *rep;
d6518db2 1404 int vport_index;
ef2e4094 1405 u8 rep_type;
e8d31c4d 1406
2aca1787 1407 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1408 sizeof(struct mlx5_eswitch_rep),
1409 GFP_KERNEL);
1410 if (!esw->offloads.vport_reps)
1411 return -ENOMEM;
1412
d6518db2
BW
1413 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1414 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 1415 rep->vport_index = vport_index;
f121e0ea
BW
1416
1417 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 1418 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 1419 REP_UNREGISTERED);
e8d31c4d
MB
1420 }
1421
e8d31c4d
MB
1422 return 0;
1423}
1424
c9b99abc
BW
1425static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1426 struct mlx5_eswitch_rep *rep, u8 rep_type)
1427{
8693115a 1428 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1429 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 1430 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
1431}
1432
29d9fd7d 1433static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1434{
1435 struct mlx5_eswitch_rep *rep;
c9b99abc 1436
81cd229c
BW
1437 if (mlx5_ecpf_vport_exists(esw->dev)) {
1438 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1439 __esw_offloads_unload_rep(esw, rep, rep_type);
1440 }
1441
1442 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1443 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1444 __esw_offloads_unload_rep(esw, rep, rep_type);
1445 }
1446
879c8f84 1447 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1448 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1449}
1450
29d9fd7d
BW
1451static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1452 u8 rep_type)
1453{
1454 struct mlx5_eswitch_rep *rep;
1455 int i;
1456
1457 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1458 __esw_offloads_unload_rep(esw, rep, rep_type);
1459}
1460
1461static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1462{
1463 u8 rep_type = NUM_REP_TYPES;
1464
1465 while (rep_type-- > 0)
1466 __unload_reps_vf_vport(esw, nvports, rep_type);
1467}
1468
062f4bf4 1469static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
29d9fd7d 1470{
062f4bf4 1471 __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
29d9fd7d
BW
1472
1473 /* Special vports must be the last to unload. */
1474 __unload_reps_special_vport(esw, rep_type);
1475}
1476
062f4bf4 1477static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
a4b97ab4
MB
1478{
1479 u8 rep_type = NUM_REP_TYPES;
1480
1481 while (rep_type-- > 0)
062f4bf4 1482 __unload_reps_all_vport(esw, rep_type);
a4b97ab4
MB
1483}
1484
c9b99abc
BW
1485static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1486 struct mlx5_eswitch_rep *rep, u8 rep_type)
1487{
f121e0ea
BW
1488 int err = 0;
1489
8693115a 1490 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1491 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
8693115a 1492 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
6f4e0219 1493 if (err)
8693115a 1494 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219
BW
1495 REP_REGISTERED);
1496 }
f121e0ea 1497
6f4e0219 1498 return err;
c9b99abc
BW
1499}
1500
29d9fd7d 1501static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
c930a3ad 1502{
cb67b832 1503 struct mlx5_eswitch_rep *rep;
c930a3ad
OG
1504 int err;
1505
879c8f84 1506 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1507 err = __esw_offloads_load_rep(esw, rep, rep_type);
81cd229c
BW
1508 if (err)
1509 return err;
1510
1511 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1512 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1513 err = __esw_offloads_load_rep(esw, rep, rep_type);
1514 if (err)
1515 goto err_pf;
1516 }
1517
1518 if (mlx5_ecpf_vport_exists(esw->dev)) {
1519 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1520 err = __esw_offloads_load_rep(esw, rep, rep_type);
1521 if (err)
1522 goto err_ecpf;
1523 }
1524
1525 return 0;
1526
1527err_ecpf:
1528 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1529 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1530 __esw_offloads_unload_rep(esw, rep, rep_type);
1531 }
1532
1533err_pf:
1534 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1535 __esw_offloads_unload_rep(esw, rep, rep_type);
29d9fd7d
BW
1536 return err;
1537}
6ed1803a 1538
29d9fd7d
BW
1539static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1540 u8 rep_type)
1541{
1542 struct mlx5_eswitch_rep *rep;
1543 int err, i;
1544
1545 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
c9b99abc 1546 err = __esw_offloads_load_rep(esw, rep, rep_type);
6ed1803a 1547 if (err)
29d9fd7d 1548 goto err_vf;
6ed1803a
MB
1549 }
1550
1551 return 0;
1552
29d9fd7d
BW
1553err_vf:
1554 __unload_reps_vf_vport(esw, --i, rep_type);
1555 return err;
1556}
1557
062f4bf4
BW
1558static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1559{
1560 int err;
1561
1562 /* Special vports must be loaded first, uplink rep creates mdev resource. */
1563 err = __load_reps_special_vport(esw, rep_type);
1564 if (err)
1565 return err;
1566
1567 err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
1568 if (err)
1569 goto err_vfs;
1570
1571 return 0;
1572
1573err_vfs:
1574 __unload_reps_special_vport(esw, rep_type);
1575 return err;
1576}
1577
29d9fd7d
BW
1578static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1579{
1580 u8 rep_type = 0;
1581 int err;
1582
1583 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1584 err = __load_reps_vf_vport(esw, nvports, rep_type);
1585 if (err)
1586 goto err_reps;
1587 }
1588
1589 return err;
1590
6ed1803a 1591err_reps:
29d9fd7d
BW
1592 while (rep_type-- > 0)
1593 __unload_reps_vf_vport(esw, nvports, rep_type);
1594 return err;
1595}
1596
062f4bf4 1597static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
a4b97ab4
MB
1598{
1599 u8 rep_type = 0;
1600 int err;
1601
1602 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
062f4bf4 1603 err = __load_reps_all_vport(esw, rep_type);
a4b97ab4
MB
1604 if (err)
1605 goto err_reps;
1606 }
1607
1608 return err;
1609
1610err_reps:
1611 while (rep_type-- > 0)
062f4bf4 1612 __unload_reps_all_vport(esw, rep_type);
6ed1803a
MB
1613 return err;
1614}
1615
ac004b83
RD
1616#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1617#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1618
1619static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1620 struct mlx5_eswitch *peer_esw)
1621{
1622 int err;
1623
1624 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1625 if (err)
1626 return err;
1627
1628 return 0;
1629}
1630
1631static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1632{
04de7dda 1633 mlx5e_tc_clean_fdb_peer_flows(esw);
ac004b83
RD
1634 esw_del_fdb_peer_miss_rules(esw);
1635}
1636
8463daf1
MG
1637static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1638 struct mlx5_eswitch *peer_esw,
1639 bool pair)
1640{
1641 struct mlx5_flow_root_namespace *peer_ns;
1642 struct mlx5_flow_root_namespace *ns;
1643 int err;
1644
1645 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1646 ns = esw->dev->priv.steering->fdb_root_ns;
1647
1648 if (pair) {
1649 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1650 if (err)
1651 return err;
1652
e53e6655 1653 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
1654 if (err) {
1655 mlx5_flow_namespace_set_peer(ns, NULL);
1656 return err;
1657 }
1658 } else {
1659 mlx5_flow_namespace_set_peer(ns, NULL);
1660 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1661 }
1662
1663 return 0;
1664}
1665
ac004b83
RD
1666static int mlx5_esw_offloads_devcom_event(int event,
1667 void *my_data,
1668 void *event_data)
1669{
1670 struct mlx5_eswitch *esw = my_data;
ac004b83 1671 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 1672 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
1673 int err;
1674
1675 switch (event) {
1676 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
1677 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1678 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1679 break;
1680
8463daf1 1681 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
1682 if (err)
1683 goto err_out;
8463daf1
MG
1684 err = mlx5_esw_offloads_pair(esw, peer_esw);
1685 if (err)
1686 goto err_peer;
ac004b83
RD
1687
1688 err = mlx5_esw_offloads_pair(peer_esw, esw);
1689 if (err)
1690 goto err_pair;
1691
1692 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1693 break;
1694
1695 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1696 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1697 break;
1698
1699 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1700 mlx5_esw_offloads_unpair(peer_esw);
1701 mlx5_esw_offloads_unpair(esw);
8463daf1 1702 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1703 break;
1704 }
1705
1706 return 0;
1707
1708err_pair:
1709 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
1710err_peer:
1711 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1712err_out:
1713 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1714 event, err);
1715 return err;
1716}
1717
1718static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1719{
1720 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1721
04de7dda
RD
1722 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1723 mutex_init(&esw->offloads.peer_mutex);
1724
ac004b83
RD
1725 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1726 return;
1727
1728 mlx5_devcom_register_component(devcom,
1729 MLX5_DEVCOM_ESW_OFFLOADS,
1730 mlx5_esw_offloads_devcom_event,
1731 esw);
1732
1733 mlx5_devcom_send_event(devcom,
1734 MLX5_DEVCOM_ESW_OFFLOADS,
1735 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1736}
1737
1738static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1739{
1740 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1741
1742 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1743 return;
1744
1745 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1746 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1747
1748 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1749}
1750
18486737
EB
1751static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1752 struct mlx5_vport *vport)
1753{
18486737
EB
1754 struct mlx5_flow_act flow_act = {0};
1755 struct mlx5_flow_spec *spec;
1756 int err = 0;
1757
1758 /* For prio tag mode, there is only 1 FTEs:
7445cfb1
JL
1759 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1760 * required, allow
18486737
EB
1761 * Unmatched traffic is allowed by default
1762 */
1763
18486737
EB
1764 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1765 if (!spec) {
1766 err = -ENOMEM;
1767 goto out_no_mem;
1768 }
1769
1770 /* Untagged packets - push prio tag VLAN, allow */
1771 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1772 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1773 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1774 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1775 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1776 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1777 flow_act.vlan[0].vid = 0;
1778 flow_act.vlan[0].prio = 0;
7445cfb1 1779
d68316b5 1780 if (vport->ingress.offloads.modify_metadata_rule) {
7445cfb1 1781 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
d68316b5 1782 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
7445cfb1
JL
1783 }
1784
18486737
EB
1785 vport->ingress.allow_rule =
1786 mlx5_add_flow_rules(vport->ingress.acl, spec,
1787 &flow_act, NULL, 0);
1788 if (IS_ERR(vport->ingress.allow_rule)) {
1789 err = PTR_ERR(vport->ingress.allow_rule);
1790 esw_warn(esw->dev,
1791 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1792 vport->vport, err);
1793 vport->ingress.allow_rule = NULL;
1794 goto out;
1795 }
1796
1797out:
1798 kvfree(spec);
1799out_no_mem:
1800 if (err)
1801 esw_vport_cleanup_ingress_rules(esw, vport);
1802 return err;
1803}
1804
7445cfb1
JL
1805static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1806 struct mlx5_vport *vport)
1807{
1808 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
9446d17e 1809 static const struct mlx5_flow_spec spec = {};
7445cfb1 1810 struct mlx5_flow_act flow_act = {};
7445cfb1
JL
1811 int err = 0;
1812
1813 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1814 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1815 MLX5_SET(set_action_in, action, data,
1816 mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
1817
d68316b5 1818 vport->ingress.offloads.modify_metadata =
2b688ea5
MG
1819 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1820 1, action);
d68316b5
PP
1821 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
1822 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
7445cfb1
JL
1823 esw_warn(esw->dev,
1824 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1825 vport->vport, err);
1826 return err;
1827 }
1828
1829 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
d68316b5
PP
1830 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1831 vport->ingress.offloads.modify_metadata_rule =
1832 mlx5_add_flow_rules(vport->ingress.acl,
1833 &spec, &flow_act, NULL, 0);
1834 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
1835 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
7445cfb1
JL
1836 esw_warn(esw->dev,
1837 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1838 vport->vport, err);
d68316b5 1839 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1
JL
1840 goto out;
1841 }
1842
1843out:
1844 if (err)
d68316b5 1845 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
7445cfb1
JL
1846 return err;
1847}
1848
a962d7a6
PP
1849static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1850 struct mlx5_vport *vport)
7445cfb1 1851{
d68316b5
PP
1852 if (vport->ingress.offloads.modify_metadata_rule) {
1853 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
1854 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
7445cfb1 1855
d68316b5 1856 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1
JL
1857 }
1858}
1859
10652f39
PP
1860static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
1861 struct mlx5_vport *vport)
18486737 1862{
10652f39
PP
1863 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1864 struct mlx5_flow_group *g;
1865 u32 *flow_group_in;
1866 int ret = 0;
18486737 1867
10652f39
PP
1868 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1869 if (!flow_group_in)
1870 return -ENOMEM;
18486737 1871
10652f39
PP
1872 memset(flow_group_in, 0, inlen);
1873 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1874 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
18486737 1875
10652f39
PP
1876 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1877 if (IS_ERR(g)) {
1878 ret = PTR_ERR(g);
18486737 1879 esw_warn(esw->dev,
9ea7f01f 1880 "Failed to create vport[%d] ingress metadata group, err(%d)\n",
10652f39
PP
1881 vport->vport, ret);
1882 goto grp_err;
18486737 1883 }
10652f39
PP
1884 vport->ingress.offloads.metadata_grp = g;
1885grp_err:
1886 kvfree(flow_group_in);
1887 return ret;
1888}
18486737 1889
10652f39
PP
1890static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
1891{
1892 if (vport->ingress.offloads.metadata_grp) {
1893 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp);
1894 vport->ingress.offloads.metadata_grp = NULL;
1895 }
18486737
EB
1896}
1897
b1a3380a
VP
1898static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1899 struct mlx5_vport *vport)
18486737 1900{
18486737
EB
1901 int err;
1902
7445cfb1
JL
1903 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1904 !MLX5_CAP_GEN(esw->dev, prio_tag_required))
1905 return 0;
1906
1907 esw_vport_cleanup_ingress_rules(esw, vport);
10652f39 1908 err = esw_vport_create_ingress_acl_table(esw, vport, 1);
7445cfb1
JL
1909 if (err) {
1910 esw_warn(esw->dev,
1911 "failed to enable ingress acl (%d) on vport[%d]\n",
1912 err, vport->vport);
1913 return err;
1914 }
1915
10652f39
PP
1916 err = esw_vport_create_ingress_acl_group(esw, vport);
1917 if (err)
1918 goto group_err;
1919
7445cfb1
JL
1920 esw_debug(esw->dev,
1921 "vport[%d] configure ingress rules\n", vport->vport);
1922
1923 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1924 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
1925 if (err)
10652f39 1926 goto metadata_err;
7445cfb1
JL
1927 }
1928
1929 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
1930 mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
786ef904 1931 err = esw_vport_ingress_prio_tag_config(esw, vport);
18486737 1932 if (err)
10652f39 1933 goto prio_tag_err;
7445cfb1 1934 }
10652f39 1935 return 0;
7445cfb1 1936
10652f39
PP
1937prio_tag_err:
1938 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
1939metadata_err:
1940 esw_vport_cleanup_ingress_rules(esw, vport);
1941 esw_vport_destroy_ingress_acl_group(vport);
1942group_err:
1943 esw_vport_destroy_ingress_acl_table(vport);
7445cfb1
JL
1944 return err;
1945}
1946
6d94e610
VP
1947static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1948 struct mlx5_vport *vport)
1949{
1950 int err;
1951
1952 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
1953 return 0;
1954
1955 esw_vport_cleanup_egress_rules(esw, vport);
1956
1957 err = esw_vport_enable_egress_acl(esw, vport);
1958 if (err)
1959 return err;
1960
fdde49e0
PP
1961 /* For prio tag mode, there is only 1 FTEs:
1962 * 1) prio tag packets - pop the prio tag VLAN, allow
1963 * Unmatched traffic is allowed by default
1964 */
1965 esw_debug(esw->dev,
1966 "vport[%d] configure prio tag egress rules\n", vport->vport);
6d94e610 1967
fdde49e0
PP
1968 /* prio tag vlan rule - pop it so VF receives untagged packets */
1969 err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
1970 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1971 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
7445cfb1 1972 if (err)
6d94e610
VP
1973 esw_vport_disable_egress_acl(esw, vport);
1974
7445cfb1
JL
1975 return err;
1976}
1977
92ab1eb3
JL
1978static bool
1979esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
1980{
1981 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
1982 return false;
1983
1984 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1985 MLX5_FDB_TO_VPORT_REG_C_0))
1986 return false;
1987
1988 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
1989 return false;
1990
1991 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
1992 mlx5_ecpf_vport_exists(esw->dev))
1993 return false;
1994
1995 return true;
1996}
1997
748da30b 1998int
89a0f1fb
PP
1999esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2000 struct mlx5_vport *vport)
7445cfb1 2001{
7445cfb1
JL
2002 int err;
2003
89a0f1fb
PP
2004 err = esw_vport_ingress_config(esw, vport);
2005 if (err)
2006 return err;
7445cfb1 2007
89a0f1fb
PP
2008 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
2009 err = esw_vport_egress_config(esw, vport);
a962d7a6
PP
2010 if (err) {
2011 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
10652f39
PP
2012 esw_vport_cleanup_ingress_rules(esw, vport);
2013 esw_vport_destroy_ingress_acl_table(vport);
7445cfb1 2014 }
18486737 2015 }
89a0f1fb
PP
2016 return err;
2017}
18486737 2018
748da30b 2019void
89a0f1fb
PP
2020esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2021 struct mlx5_vport *vport)
2022{
2023 esw_vport_disable_egress_acl(esw, vport);
a962d7a6 2024 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
10652f39
PP
2025 esw_vport_cleanup_ingress_rules(esw, vport);
2026 esw_vport_destroy_ingress_acl_group(vport);
2027 esw_vport_destroy_ingress_acl_table(vport);
89a0f1fb 2028}
7445cfb1 2029
748da30b 2030static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
2031{
2032 struct mlx5_vport *vport;
7445cfb1 2033 int err;
18486737 2034
92ab1eb3
JL
2035 if (esw_check_vport_match_metadata_supported(esw))
2036 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737 2037
748da30b
VP
2038 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2039 err = esw_vport_create_offloads_acl_tables(esw, vport);
2040 if (err)
2041 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
2042 return err;
2043}
2044
748da30b 2045static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 2046{
786ef904 2047 struct mlx5_vport *vport;
7445cfb1 2048
748da30b
VP
2049 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2050 esw_vport_destroy_offloads_acl_tables(esw, vport);
7445cfb1 2051 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
2052}
2053
062f4bf4 2054static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 2055{
062f4bf4
BW
2056 int num_vfs = esw->esw_funcs.num_vfs;
2057 int total_vports;
6ed1803a
MB
2058 int err;
2059
062f4bf4
BW
2060 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2061 total_vports = esw->total_vports;
2062 else
2063 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2064
5c1d260e 2065 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
e52c2802
PB
2066 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
2067
748da30b 2068 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1
JL
2069 if (err)
2070 return err;
18486737 2071
062f4bf4 2072 err = esw_create_offloads_fdb_tables(esw, total_vports);
c930a3ad 2073 if (err)
7445cfb1 2074 goto create_fdb_err;
c930a3ad 2075
062f4bf4 2076 err = esw_create_offloads_table(esw, total_vports);
c930a3ad
OG
2077 if (err)
2078 goto create_ft_err;
2079
062f4bf4 2080 err = esw_create_vport_rx_group(esw, total_vports);
c930a3ad
OG
2081 if (err)
2082 goto create_fg_err;
2083
2084 return 0;
2085
2086create_fg_err:
2087 esw_destroy_offloads_table(esw);
2088
2089create_ft_err:
1967ce6e 2090 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 2091
7445cfb1 2092create_fdb_err:
748da30b 2093 esw_destroy_uplink_offloads_acl_tables(esw);
7445cfb1 2094
c930a3ad
OG
2095 return err;
2096}
2097
eca8cc38
BW
2098static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2099{
2100 esw_destroy_vport_rx_group(esw);
2101 esw_destroy_offloads_table(esw);
2102 esw_destroy_offloads_fdb_tables(esw);
748da30b 2103 esw_destroy_uplink_offloads_acl_tables(esw);
eca8cc38
BW
2104}
2105
7e736f9a
PP
2106static void
2107esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 2108{
5ccf2770 2109 bool host_pf_disabled;
7e736f9a 2110 u16 new_num_vfs;
a3888f33 2111
7e736f9a
PP
2112 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2113 host_params_context.host_num_of_vfs);
5ccf2770
BW
2114 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2115 host_params_context.host_pf_disabled);
a3888f33 2116
7e736f9a
PP
2117 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2118 return;
a3888f33
BW
2119
2120 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929
VP
2121 if (esw->esw_funcs.num_vfs > 0) {
2122 esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
a3888f33 2123 } else {
7e736f9a 2124 int err;
a3888f33 2125
7e736f9a 2126 err = esw_offloads_load_vf_reps(esw, new_num_vfs);
a3888f33 2127 if (err)
7e736f9a 2128 return;
a3888f33 2129 }
7e736f9a 2130 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
2131}
2132
7e736f9a 2133static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 2134{
7e736f9a
PP
2135 struct mlx5_host_work *host_work;
2136 struct mlx5_eswitch *esw;
dd28087c 2137 const u32 *out;
ac35dcd6 2138
7e736f9a
PP
2139 host_work = container_of(work, struct mlx5_host_work, work);
2140 esw = host_work->esw;
a3888f33 2141
dd28087c
PP
2142 out = mlx5_esw_query_functions(esw->dev);
2143 if (IS_ERR(out))
7e736f9a 2144 goto out;
a3888f33 2145
7e736f9a 2146 esw_vfs_changed_event_handler(esw, out);
dd28087c 2147 kvfree(out);
a3888f33 2148out:
ac35dcd6
VP
2149 kfree(host_work);
2150}
2151
16fff98a 2152int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 2153{
cd56f929 2154 struct mlx5_esw_functions *esw_funcs;
a3888f33 2155 struct mlx5_host_work *host_work;
a3888f33
BW
2156 struct mlx5_eswitch *esw;
2157
2158 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2159 if (!host_work)
2160 return NOTIFY_DONE;
2161
cd56f929
VP
2162 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2163 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2164
2165 host_work->esw = esw;
2166
062f4bf4 2167 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2168 queue_work(esw->work_queue, &host_work->work);
2169
2170 return NOTIFY_OK;
2171}
2172
5896b972 2173int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38
BW
2174{
2175 int err;
2176
9a64144d
MG
2177 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2178 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2179 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2180 else
2181 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2182
8463daf1 2183 mlx5_rdma_enable_roce(esw->dev);
062f4bf4 2184 err = esw_offloads_steering_init(esw);
eca8cc38 2185 if (err)
8463daf1 2186 goto err_steering_init;
eca8cc38 2187
332bd3a5
PP
2188 err = esw_set_passing_vport_metadata(esw, true);
2189 if (err)
2190 goto err_vport_metadata;
c1286050 2191
925a6acc
PP
2192 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
2193 if (err)
2194 goto err_vports;
c1286050 2195
062f4bf4 2196 err = esw_offloads_load_all_reps(esw);
eca8cc38
BW
2197 if (err)
2198 goto err_reps;
2199
2200 esw_offloads_devcom_init(esw);
10caabda 2201 mutex_init(&esw->offloads.termtbl_mutex);
a3888f33 2202
eca8cc38
BW
2203 return 0;
2204
2205err_reps:
5896b972 2206 mlx5_eswitch_disable_pf_vf_vports(esw);
925a6acc 2207err_vports:
332bd3a5 2208 esw_set_passing_vport_metadata(esw, false);
c1286050 2209err_vport_metadata:
eca8cc38 2210 esw_offloads_steering_cleanup(esw);
8463daf1
MG
2211err_steering_init:
2212 mlx5_rdma_disable_roce(esw->dev);
eca8cc38
BW
2213 return err;
2214}
2215
db7ff19e
EB
2216static int esw_offloads_stop(struct mlx5_eswitch *esw,
2217 struct netlink_ext_ack *extack)
c930a3ad 2218{
062f4bf4 2219 int err, err1;
c930a3ad 2220
556b9d16 2221 mlx5_eswitch_disable(esw, false);
062f4bf4 2222 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
6c419ba8 2223 if (err) {
8c98ee77 2224 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
062f4bf4 2225 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
8c98ee77
EB
2226 if (err1) {
2227 NL_SET_ERR_MSG_MOD(extack,
2228 "Failed setting eswitch back to offloads");
2229 }
6c419ba8 2230 }
c930a3ad
OG
2231
2232 return err;
2233}
2234
5896b972 2235void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 2236{
ac004b83 2237 esw_offloads_devcom_cleanup(esw);
062f4bf4 2238 esw_offloads_unload_all_reps(esw);
5896b972 2239 mlx5_eswitch_disable_pf_vf_vports(esw);
332bd3a5 2240 esw_set_passing_vport_metadata(esw, false);
eca8cc38 2241 esw_offloads_steering_cleanup(esw);
8463daf1 2242 mlx5_rdma_disable_roce(esw->dev);
9a64144d 2243 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2244}
2245
ef78618b 2246static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2247{
2248 switch (mode) {
2249 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2250 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2251 break;
2252 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2253 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2254 break;
2255 default:
2256 return -EINVAL;
2257 }
2258
2259 return 0;
2260}
2261
ef78618b
OG
2262static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2263{
2264 switch (mlx5_mode) {
f6455de0 2265 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2266 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2267 break;
f6455de0 2268 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2269 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2270 break;
2271 default:
2272 return -EINVAL;
2273 }
2274
2275 return 0;
2276}
2277
bffaa916
RD
2278static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2279{
2280 switch (mode) {
2281 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2282 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2283 break;
2284 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2285 *mlx5_mode = MLX5_INLINE_MODE_L2;
2286 break;
2287 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2288 *mlx5_mode = MLX5_INLINE_MODE_IP;
2289 break;
2290 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2291 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2292 break;
2293 default:
2294 return -EINVAL;
2295 }
2296
2297 return 0;
2298}
2299
2300static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2301{
2302 switch (mlx5_mode) {
2303 case MLX5_INLINE_MODE_NONE:
2304 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2305 break;
2306 case MLX5_INLINE_MODE_L2:
2307 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2308 break;
2309 case MLX5_INLINE_MODE_IP:
2310 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2311 break;
2312 case MLX5_INLINE_MODE_TCP_UDP:
2313 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2314 break;
2315 default:
2316 return -EINVAL;
2317 }
2318
2319 return 0;
2320}
2321
9d1cef19 2322static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 2323{
9d1cef19 2324 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 2325
9d1cef19
OG
2326 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2327 return -EOPNOTSUPP;
c930a3ad 2328
733d3e54
OG
2329 if(!MLX5_ESWITCH_MANAGER(dev))
2330 return -EPERM;
c930a3ad 2331
f6455de0 2332 if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
c96692fb 2333 !mlx5_core_is_ecpf_esw_manager(dev))
c930a3ad
OG
2334 return -EOPNOTSUPP;
2335
9d1cef19
OG
2336 return 0;
2337}
2338
db7ff19e
EB
2339int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2340 struct netlink_ext_ack *extack)
9d1cef19
OG
2341{
2342 struct mlx5_core_dev *dev = devlink_priv(devlink);
2343 u16 cur_mlx5_mode, mlx5_mode = 0;
2344 int err;
2345
2346 err = mlx5_devlink_eswitch_check(devlink);
2347 if (err)
2348 return err;
2349
2350 cur_mlx5_mode = dev->priv.eswitch->mode;
2351
ef78618b 2352 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2353 return -EINVAL;
2354
2355 if (cur_mlx5_mode == mlx5_mode)
2356 return 0;
2357
2358 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 2359 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 2360 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 2361 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
2362 else
2363 return -EINVAL;
feae9087
OG
2364}
2365
2366int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2367{
9d1cef19
OG
2368 struct mlx5_core_dev *dev = devlink_priv(devlink);
2369 int err;
c930a3ad 2370
9d1cef19
OG
2371 err = mlx5_devlink_eswitch_check(devlink);
2372 if (err)
2373 return err;
c930a3ad 2374
ef78618b 2375 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 2376}
127ea380 2377
db7ff19e
EB
2378int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2379 struct netlink_ext_ack *extack)
bffaa916
RD
2380{
2381 struct mlx5_core_dev *dev = devlink_priv(devlink);
2382 struct mlx5_eswitch *esw = dev->priv.eswitch;
db68cc56 2383 int err, vport, num_vport;
bffaa916
RD
2384 u8 mlx5_mode;
2385
9d1cef19
OG
2386 err = mlx5_devlink_eswitch_check(devlink);
2387 if (err)
2388 return err;
bffaa916 2389
c415f704
OG
2390 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2391 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2392 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2393 return 0;
2394 /* fall through */
2395 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2396 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 2397 return -EOPNOTSUPP;
c415f704
OG
2398 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2399 break;
2400 }
bffaa916 2401
525e84be 2402 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2403 NL_SET_ERR_MSG_MOD(extack,
2404 "Can't set inline mode when flows are configured");
375f51e2
RD
2405 return -EOPNOTSUPP;
2406 }
2407
bffaa916
RD
2408 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2409 if (err)
2410 goto out;
2411
411ec9e0 2412 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2413 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2414 if (err) {
8c98ee77
EB
2415 NL_SET_ERR_MSG_MOD(extack,
2416 "Failed to set min inline on vport");
bffaa916
RD
2417 goto revert_inline_mode;
2418 }
2419 }
2420
2421 esw->offloads.inline_mode = mlx5_mode;
2422 return 0;
2423
2424revert_inline_mode:
db68cc56 2425 num_vport = --vport;
411ec9e0 2426 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
2427 mlx5_modify_nic_vport_min_inline(dev,
2428 vport,
2429 esw->offloads.inline_mode);
2430out:
2431 return err;
2432}
2433
2434int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2435{
2436 struct mlx5_core_dev *dev = devlink_priv(devlink);
2437 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2438 int err;
bffaa916 2439
9d1cef19
OG
2440 err = mlx5_devlink_eswitch_check(devlink);
2441 if (err)
2442 return err;
bffaa916 2443
bffaa916
RD
2444 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2445}
2446
062f4bf4 2447int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
bffaa916 2448{
c415f704 2449 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
2450 struct mlx5_core_dev *dev = esw->dev;
2451 int vport;
bffaa916
RD
2452
2453 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2454 return -EOPNOTSUPP;
2455
f6455de0 2456 if (esw->mode == MLX5_ESWITCH_NONE)
bffaa916
RD
2457 return -EOPNOTSUPP;
2458
c415f704
OG
2459 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2460 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2461 mlx5_mode = MLX5_INLINE_MODE_NONE;
2462 goto out;
2463 case MLX5_CAP_INLINE_MODE_L2:
2464 mlx5_mode = MLX5_INLINE_MODE_L2;
2465 goto out;
2466 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2467 goto query_vports;
2468 }
bffaa916 2469
c415f704 2470query_vports:
411ec9e0
BW
2471 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2472 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916 2473 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
411ec9e0 2474 if (prev_mlx5_mode != mlx5_mode)
bffaa916
RD
2475 return -EINVAL;
2476 prev_mlx5_mode = mlx5_mode;
2477 }
2478
c415f704 2479out:
bffaa916
RD
2480 *mode = mlx5_mode;
2481 return 0;
2482}
2483
98fdbea5
LR
2484int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2485 enum devlink_eswitch_encap_mode encap,
db7ff19e 2486 struct netlink_ext_ack *extack)
7768d197
RD
2487{
2488 struct mlx5_core_dev *dev = devlink_priv(devlink);
2489 struct mlx5_eswitch *esw = dev->priv.eswitch;
2490 int err;
2491
9d1cef19
OG
2492 err = mlx5_devlink_eswitch_check(devlink);
2493 if (err)
2494 return err;
7768d197
RD
2495
2496 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2497 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
2498 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2499 return -EOPNOTSUPP;
2500
2501 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2502 return -EOPNOTSUPP;
2503
f6455de0 2504 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197
RD
2505 esw->offloads.encap = encap;
2506 return 0;
2507 }
2508
2509 if (esw->offloads.encap == encap)
2510 return 0;
2511
525e84be 2512 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2513 NL_SET_ERR_MSG_MOD(extack,
2514 "Can't set encapsulation when flows are configured");
7768d197
RD
2515 return -EOPNOTSUPP;
2516 }
2517
e52c2802 2518 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2519
2520 esw->offloads.encap = encap;
e52c2802
PB
2521
2522 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2523
7768d197 2524 if (err) {
8c98ee77
EB
2525 NL_SET_ERR_MSG_MOD(extack,
2526 "Failed re-creating fast FDB table");
7768d197 2527 esw->offloads.encap = !encap;
e52c2802 2528 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2529 }
e52c2802 2530
7768d197
RD
2531 return err;
2532}
2533
98fdbea5
LR
2534int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2535 enum devlink_eswitch_encap_mode *encap)
7768d197
RD
2536{
2537 struct mlx5_core_dev *dev = devlink_priv(devlink);
2538 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2539 int err;
7768d197 2540
9d1cef19
OG
2541 err = mlx5_devlink_eswitch_check(devlink);
2542 if (err)
2543 return err;
7768d197
RD
2544
2545 *encap = esw->offloads.encap;
2546 return 0;
2547}
2548
f8e8fa02 2549void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 2550 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 2551 u8 rep_type)
127ea380 2552{
8693115a 2553 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
2554 struct mlx5_eswitch_rep *rep;
2555 int i;
9deb2241 2556
8693115a 2557 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 2558 mlx5_esw_for_all_reps(esw, i, rep) {
8693115a
PP
2559 rep_data = &rep->rep_data[rep_type];
2560 atomic_set(&rep_data->state, REP_REGISTERED);
f8e8fa02 2561 }
127ea380 2562}
f8e8fa02 2563EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2564
f8e8fa02 2565void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2566{
cb67b832 2567 struct mlx5_eswitch_rep *rep;
f8e8fa02 2568 int i;
cb67b832 2569
f6455de0 2570 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 2571 __unload_reps_all_vport(esw, rep_type);
127ea380 2572
f8e8fa02 2573 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 2574 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 2575}
f8e8fa02 2576EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2577
a4b97ab4 2578void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2579{
726293f1
HHZ
2580 struct mlx5_eswitch_rep *rep;
2581
879c8f84 2582 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 2583 return rep->rep_data[rep_type].priv;
726293f1 2584}
22215908
MB
2585
2586void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 2587 u16 vport,
22215908
MB
2588 u8 rep_type)
2589{
22215908
MB
2590 struct mlx5_eswitch_rep *rep;
2591
879c8f84 2592 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2593
8693115a
PP
2594 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2595 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2596 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
2597 return NULL;
2598}
57cbd893 2599EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2600
2601void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2602{
879c8f84 2603 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2604}
57cbd893
MB
2605EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2606
2607struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 2608 u16 vport)
57cbd893 2609{
879c8f84 2610 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2611}
2612EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
2613
2614bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2615{
2616 return vport_num >= MLX5_VPORT_FIRST_VF &&
2617 vport_num <= esw->dev->priv.sriov.max_vfs;
2618}
7445cfb1
JL
2619
2620bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2621{
2622 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2623}
2624EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2625
2626u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
2627 u16 vport_num)
2628{
2629 return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
2630}
2631EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);