]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: E-switch, Move vport table functions to a new file
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
133dcfc5 34#include <linux/idr.h>
69697b6e
OG
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/mlx5_ifc.h>
37#include <linux/mlx5/vport.h>
38#include <linux/mlx5/fs.h>
39#include "mlx5_core.h"
40#include "eswitch.h"
34ca6535 41#include "esw/indir_table.h"
ea651a86 42#include "esw/acl/ofld.h"
80f09dfc 43#include "rdma.h"
e52c2802
PB
44#include "en.h"
45#include "fs_core.h"
ac004b83 46#include "lib/devcom.h"
a3888f33 47#include "lib/eq.h"
ae430332 48#include "lib/fs_chains.h"
c620b772 49#include "en_tc.h"
69697b6e 50
cd7e4186
BW
51/* There are two match-all miss flows, one for unicast dst mac and
52 * one for multicast.
53 */
54#define MLX5_ESW_MISS_FLOWS (2)
c9b99abc
BW
55#define UPLINK_REP_INDEX 0
56
879c8f84
BW
57static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
58 u16 vport_num)
59{
02f3afd9 60 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
61
62 WARN_ON(idx > esw->total_vports - 1);
63 return &esw->offloads.vport_reps[idx];
64}
65
6f7bbad1
JL
66static void
67mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
68 struct mlx5_flow_spec *spec,
69 struct mlx5_esw_flow_attr *attr)
70{
71 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
036e19b9
HI
72 attr && attr->in_rep)
73 spec->flow_context.flow_source =
74 attr->in_rep->vport == MLX5_VPORT_UPLINK ?
75 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
76 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
6f7bbad1 77}
b7826076 78
c01cfd0f
JL
79static void
80mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
81 struct mlx5_flow_spec *spec,
a508728a 82 struct mlx5_flow_attr *attr,
b055ecf5
MB
83 struct mlx5_eswitch *src_esw,
84 u16 vport)
c01cfd0f
JL
85{
86 void *misc2;
87 void *misc;
88
89 /* Use metadata matching because vport is not represented by single
90 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
91 */
92 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
a508728a
VB
93 if (mlx5_esw_indir_table_decap_vport(attr))
94 vport = mlx5_esw_indir_table_decap_vport(attr);
c01cfd0f
JL
95 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
96 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
b055ecf5
MB
97 mlx5_eswitch_get_vport_metadata_for_match(src_esw,
98 vport));
c01cfd0f
JL
99
100 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
101 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
102 mlx5_eswitch_get_vport_metadata_mask());
c01cfd0f
JL
103
104 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
c01cfd0f
JL
105 } else {
106 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
b055ecf5 107 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
c01cfd0f
JL
108
109 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
110 MLX5_SET(fte_match_set_misc, misc,
111 source_eswitch_owner_vhca_id,
b055ecf5 112 MLX5_CAP_GEN(src_esw->dev, vhca_id));
c01cfd0f
JL
113
114 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
115 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
116 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
117 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
118 source_eswitch_owner_vhca_id);
119
120 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
121 }
c01cfd0f
JL
122}
123
a508728a
VB
124static int
125esw_setup_decap_indir(struct mlx5_eswitch *esw,
126 struct mlx5_flow_attr *attr,
127 struct mlx5_flow_spec *spec)
128{
129 struct mlx5_flow_table *ft;
130
131 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
132 return -EOPNOTSUPP;
133
134 ft = mlx5_esw_indir_table_get(esw, attr, spec,
135 mlx5_esw_indir_table_decap_vport(attr), true);
136 return PTR_ERR_OR_ZERO(ft);
137}
138
9e51c0a6 139static void
a508728a
VB
140esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
141 struct mlx5_flow_attr *attr)
142{
143 if (mlx5_esw_indir_table_decap_vport(attr))
144 mlx5_esw_indir_table_put(esw, attr,
145 mlx5_esw_indir_table_decap_vport(attr),
146 true);
147}
148
149static int
9e51c0a6
VB
150esw_setup_ft_dest(struct mlx5_flow_destination *dest,
151 struct mlx5_flow_act *flow_act,
a508728a 152 struct mlx5_eswitch *esw,
9e51c0a6 153 struct mlx5_flow_attr *attr,
a508728a 154 struct mlx5_flow_spec *spec,
9e51c0a6
VB
155 int i)
156{
157 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
158 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
159 dest[i].ft = attr->dest_ft;
a508728a
VB
160
161 if (mlx5_esw_indir_table_decap_vport(attr))
162 return esw_setup_decap_indir(esw, attr, spec);
163 return 0;
9e51c0a6
VB
164}
165
166static void
167esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
168 struct mlx5_flow_act *flow_act,
169 struct mlx5_fs_chains *chains,
170 int i)
171{
172 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
173 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
174 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
175}
176
177static int
178esw_setup_chain_dest(struct mlx5_flow_destination *dest,
179 struct mlx5_flow_act *flow_act,
180 struct mlx5_fs_chains *chains,
181 u32 chain, u32 prio, u32 level,
182 int i)
183{
184 struct mlx5_flow_table *ft;
185
186 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
187 ft = mlx5_chains_get_table(chains, chain, prio, level);
188 if (IS_ERR(ft))
189 return PTR_ERR(ft);
190
191 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
192 dest[i].ft = ft;
193 return 0;
194}
195
10742efc
VB
196static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
197 int from, int to)
198{
199 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
200 struct mlx5_fs_chains *chains = esw_chains(esw);
201 int i;
202
203 for (i = from; i < to; i++)
204 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
205 mlx5_chains_put_table(chains, 0, 1, 0);
a508728a
VB
206 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
207 esw_attr->dests[i].mdev))
208 mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
209 false);
10742efc
VB
210}
211
212static bool
213esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
214{
215 int i;
216
217 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
218 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
219 return true;
220 return false;
221}
222
223static int
224esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
225 struct mlx5_flow_act *flow_act,
226 struct mlx5_eswitch *esw,
227 struct mlx5_fs_chains *chains,
228 struct mlx5_flow_attr *attr,
229 int *i)
230{
231 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
232 int j, err;
233
234 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
235 return -EOPNOTSUPP;
236
237 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
238 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
239 if (err)
240 goto err_setup_chain;
241 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
242 flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
243 }
244 return 0;
245
246err_setup_chain:
247 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
248 return err;
249}
250
251static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
252 struct mlx5_flow_attr *attr)
253{
254 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
255
256 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
257}
258
a508728a
VB
259static bool
260esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
261{
262 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
263 int i;
264
265 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
266 if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
267 esw_attr->dests[i].mdev))
268 return true;
269 return false;
270}
271
272static int
273esw_setup_indir_table(struct mlx5_flow_destination *dest,
274 struct mlx5_flow_act *flow_act,
275 struct mlx5_eswitch *esw,
276 struct mlx5_flow_attr *attr,
277 struct mlx5_flow_spec *spec,
278 bool ignore_flow_lvl,
279 int *i)
280{
281 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
282 int j, err;
283
284 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
285 return -EOPNOTSUPP;
286
287 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
288 if (ignore_flow_lvl)
289 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
290 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
291
292 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
293 esw_attr->dests[j].rep->vport, false);
294 if (IS_ERR(dest[*i].ft)) {
295 err = PTR_ERR(dest[*i].ft);
296 goto err_indir_tbl_get;
297 }
298 }
299
300 if (mlx5_esw_indir_table_decap_vport(attr)) {
301 err = esw_setup_decap_indir(esw, attr, spec);
302 if (err)
303 goto err_indir_tbl_get;
304 }
305
306 return 0;
307
308err_indir_tbl_get:
309 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
310 return err;
311}
312
313static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
314{
315 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
316
317 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
318 esw_cleanup_decap_indir(esw, attr);
319}
320
9e51c0a6
VB
321static void
322esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
323{
324 mlx5_chains_put_table(chains, chain, prio, level);
325}
326
327static void
328esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
329 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
330 int attr_idx, int dest_idx, bool pkt_reformat)
331{
332 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
333 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
334 dest[dest_idx].vport.vhca_id =
335 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
336 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
337 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
338 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
339 if (pkt_reformat) {
340 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
341 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
342 }
343 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
344 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
345 }
346}
347
348static int
349esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
350 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
351 int i)
352{
353 int j;
354
355 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
356 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
357 return i;
358}
359
360static int
361esw_setup_dests(struct mlx5_flow_destination *dest,
362 struct mlx5_flow_act *flow_act,
363 struct mlx5_eswitch *esw,
364 struct mlx5_flow_attr *attr,
10742efc 365 struct mlx5_flow_spec *spec,
9e51c0a6
VB
366 int *i)
367{
368 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
369 struct mlx5_fs_chains *chains = esw_chains(esw);
370 int err = 0;
371
10742efc
VB
372 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
373 MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
f574531a
MD
374 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
375 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
10742efc
VB
376 attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
377
9e51c0a6 378 if (attr->dest_ft) {
a508728a 379 esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
9e51c0a6
VB
380 (*i)++;
381 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
382 esw_setup_slow_path_dest(dest, flow_act, chains, *i);
383 (*i)++;
384 } else if (attr->dest_chain) {
385 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
386 1, 0, *i);
387 (*i)++;
a508728a
VB
388 } else if (esw_is_indir_table(esw, attr)) {
389 err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
10742efc
VB
390 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
391 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
9e51c0a6
VB
392 } else {
393 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
394 }
395
396 return err;
397}
398
399static void
400esw_cleanup_dests(struct mlx5_eswitch *esw,
401 struct mlx5_flow_attr *attr)
402{
10742efc 403 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
9e51c0a6
VB
404 struct mlx5_fs_chains *chains = esw_chains(esw);
405
a508728a
VB
406 if (attr->dest_ft) {
407 esw_cleanup_decap_indir(esw, attr);
408 } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
10742efc
VB
409 if (attr->dest_chain)
410 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
a508728a
VB
411 else if (esw_is_indir_table(esw, attr))
412 esw_cleanup_indir_table(esw, attr);
10742efc
VB
413 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
414 esw_cleanup_chain_src_port_rewrite(esw, attr);
415 }
9e51c0a6
VB
416}
417
74491de9 418struct mlx5_flow_handle *
3d80d1a2
OG
419mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
420 struct mlx5_flow_spec *spec,
c620b772 421 struct mlx5_flow_attr *attr)
3d80d1a2 422{
592d3651 423 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 424 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
c620b772 425 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
ae430332 426 struct mlx5_fs_chains *chains = esw_chains(esw);
c620b772
AL
427 bool split = !!(esw_attr->split_count);
428 struct mlx5_vport_tbl_attr fwd_attr;
74491de9 429 struct mlx5_flow_handle *rule;
e52c2802 430 struct mlx5_flow_table *fdb;
9e51c0a6 431 int i = 0;
3d80d1a2 432
f6455de0 433 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
434 return ERR_PTR(-EOPNOTSUPP);
435
6acfbf38
OG
436 flow_act.action = attr->action;
437 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 438 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
439 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
440 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
441 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
c620b772
AL
442 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
443 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
444 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
cc495188 445 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
c620b772
AL
446 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
447 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
448 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
cc495188 449 }
6acfbf38 450 }
776b12b6 451
10742efc
VB
452 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
453
66958ed9 454 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
9e51c0a6
VB
455 int err;
456
10742efc 457 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
9e51c0a6
VB
458 if (err) {
459 rule = ERR_PTR(err);
460 goto err_create_goto_table;
56e858df 461 }
e37a79e5 462 }
14e6b038 463
c620b772
AL
464 if (esw_attr->decap_pkt_reformat)
465 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
14e6b038 466
66958ed9 467 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 468 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 469 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 470 i++;
3d80d1a2
OG
471 }
472
93b3586e 473 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 474 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
475 if (attr->inner_match_level != MLX5_MATCH_NONE)
476 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 477
aa24670e 478 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 479 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 480
96e32687 481 if (split) {
c620b772
AL
482 fwd_attr.chain = attr->chain;
483 fwd_attr.prio = attr->prio;
484 fwd_attr.vport = esw_attr->in_rep->vport;
485
486 fdb = esw_vport_tbl_get(esw, &fwd_attr);
96e32687 487 } else {
d18296ff 488 if (attr->chain || attr->prio)
ae430332
AL
489 fdb = mlx5_chains_get_table(chains, attr->chain,
490 attr->prio, 0);
d18296ff 491 else
c620b772 492 fdb = attr->ft;
6fb0701a
PB
493
494 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
a508728a 495 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
b055ecf5
MB
496 esw_attr->in_mdev->priv.eswitch,
497 esw_attr->in_rep->vport);
96e32687 498 }
e52c2802
PB
499 if (IS_ERR(fdb)) {
500 rule = ERR_CAST(fdb);
501 goto err_esw_get;
502 }
503
84be2fda 504 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
c620b772 505 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
10caabda 506 &flow_act, dest, i);
84be2fda 507 else
10caabda 508 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 509 if (IS_ERR(rule))
e52c2802 510 goto err_add_rule;
375f51e2 511 else
525e84be 512 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 513
e52c2802
PB
514 return rule;
515
516err_add_rule:
96e32687 517 if (split)
c620b772 518 esw_vport_tbl_put(esw, &fwd_attr);
d18296ff 519 else if (attr->chain || attr->prio)
ae430332 520 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
e52c2802 521err_esw_get:
9e51c0a6 522 esw_cleanup_dests(esw, attr);
e52c2802 523err_create_goto_table:
aa0cbbae 524 return rule;
3d80d1a2
OG
525}
526
e4ad91f2
CM
527struct mlx5_flow_handle *
528mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
529 struct mlx5_flow_spec *spec,
c620b772 530 struct mlx5_flow_attr *attr)
e4ad91f2
CM
531{
532 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 533 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
c620b772 534 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
ae430332 535 struct mlx5_fs_chains *chains = esw_chains(esw);
c620b772 536 struct mlx5_vport_tbl_attr fwd_attr;
e52c2802
PB
537 struct mlx5_flow_table *fast_fdb;
538 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 539 struct mlx5_flow_handle *rule;
10742efc 540 int i, err = 0;
e4ad91f2 541
ae430332 542 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
e52c2802
PB
543 if (IS_ERR(fast_fdb)) {
544 rule = ERR_CAST(fast_fdb);
545 goto err_get_fast;
546 }
547
c620b772
AL
548 fwd_attr.chain = attr->chain;
549 fwd_attr.prio = attr->prio;
550 fwd_attr.vport = esw_attr->in_rep->vport;
551 fwd_fdb = esw_vport_tbl_get(esw, &fwd_attr);
e52c2802
PB
552 if (IS_ERR(fwd_fdb)) {
553 rule = ERR_CAST(fwd_fdb);
554 goto err_get_fwd;
555 }
556
e4ad91f2 557 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
10742efc 558 for (i = 0; i < esw_attr->split_count; i++) {
a508728a
VB
559 if (esw_is_indir_table(esw, attr))
560 err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
561 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
10742efc
VB
562 err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
563 &i);
564 else
565 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
566
567 if (err) {
568 rule = ERR_PTR(err);
569 goto err_chain_src_rewrite;
570 }
571 }
e4ad91f2 572 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
873d2f12 573 dest[i].ft = fwd_fdb;
e4ad91f2
CM
574 i++;
575
a508728a 576 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
b055ecf5
MB
577 esw_attr->in_mdev->priv.eswitch,
578 esw_attr->in_rep->vport);
e4ad91f2 579
93b3586e 580 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 581 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 582
278d51f2 583 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
e52c2802 584 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 585
10742efc
VB
586 if (IS_ERR(rule)) {
587 i = esw_attr->split_count;
588 goto err_chain_src_rewrite;
589 }
e4ad91f2 590
525e84be 591 atomic64_inc(&esw->offloads.num_flows);
e52c2802
PB
592
593 return rule;
10742efc
VB
594err_chain_src_rewrite:
595 esw_put_dest_tables_loop(esw, attr, 0, i);
c620b772 596 esw_vport_tbl_put(esw, &fwd_attr);
e52c2802 597err_get_fwd:
ae430332 598 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
e52c2802 599err_get_fast:
e4ad91f2
CM
600 return rule;
601}
602
e52c2802
PB
603static void
604__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
605 struct mlx5_flow_handle *rule,
c620b772 606 struct mlx5_flow_attr *attr,
e52c2802
PB
607 bool fwd_rule)
608{
c620b772 609 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
ae430332 610 struct mlx5_fs_chains *chains = esw_chains(esw);
c620b772
AL
611 bool split = (esw_attr->split_count > 0);
612 struct mlx5_vport_tbl_attr fwd_attr;
10caabda 613 int i;
e52c2802
PB
614
615 mlx5_del_flow_rules(rule);
10caabda 616
84be2fda 617 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
d8a2034f
EC
618 /* unref the term table */
619 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
c620b772
AL
620 if (esw_attr->dests[i].termtbl)
621 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
d8a2034f 622 }
10caabda
OS
623 }
624
525e84be 625 atomic64_dec(&esw->offloads.num_flows);
e52c2802 626
c620b772
AL
627 if (fwd_rule || split) {
628 fwd_attr.chain = attr->chain;
629 fwd_attr.prio = attr->prio;
630 fwd_attr.vport = esw_attr->in_rep->vport;
631 }
632
e52c2802 633 if (fwd_rule) {
c620b772 634 esw_vport_tbl_put(esw, &fwd_attr);
ae430332 635 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
10742efc 636 esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
e52c2802 637 } else {
96e32687 638 if (split)
c620b772 639 esw_vport_tbl_put(esw, &fwd_attr);
d18296ff 640 else if (attr->chain || attr->prio)
ae430332 641 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
9e51c0a6 642 esw_cleanup_dests(esw, attr);
e52c2802
PB
643 }
644}
645
d85cdccb
OG
646void
647mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
648 struct mlx5_flow_handle *rule,
c620b772 649 struct mlx5_flow_attr *attr)
d85cdccb 650{
e52c2802 651 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
652}
653
48265006
OG
654void
655mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
656 struct mlx5_flow_handle *rule,
c620b772 657 struct mlx5_flow_attr *attr)
48265006 658{
e52c2802 659 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
660}
661
f5f82476
OG
662static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
663{
664 struct mlx5_eswitch_rep *rep;
411ec9e0 665 int i, err = 0;
f5f82476
OG
666
667 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 668 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 669 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
670 continue;
671
672 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
673 if (err)
674 goto out;
675 }
676
677out:
678 return err;
679}
680
681static struct mlx5_eswitch_rep *
682esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
683{
684 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
685
686 in_rep = attr->in_rep;
df65a573 687 out_rep = attr->dests[0].rep;
f5f82476
OG
688
689 if (push)
690 vport = in_rep;
691 else if (pop)
692 vport = out_rep;
693 else
694 vport = in_rep;
695
696 return vport;
697}
698
699static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
700 bool push, bool pop, bool fwd)
701{
702 struct mlx5_eswitch_rep *in_rep, *out_rep;
703
704 if ((push || pop) && !fwd)
705 goto out_notsupp;
706
707 in_rep = attr->in_rep;
df65a573 708 out_rep = attr->dests[0].rep;
f5f82476 709
b05af6aa 710 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
711 goto out_notsupp;
712
b05af6aa 713 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
714 goto out_notsupp;
715
716 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
717 if (!push && !pop && fwd)
b05af6aa 718 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
719 goto out_notsupp;
720
721 /* protects against (1) setting rules with different vlans to push and
722 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
723 */
1482bd3d 724 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
725 goto out_notsupp;
726
727 return 0;
728
729out_notsupp:
9eb78923 730 return -EOPNOTSUPP;
f5f82476
OG
731}
732
733int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
c620b772 734 struct mlx5_flow_attr *attr)
f5f82476
OG
735{
736 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
c620b772 737 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
f5f82476
OG
738 struct mlx5_eswitch_rep *vport = NULL;
739 bool push, pop, fwd;
740 int err = 0;
741
6acfbf38 742 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 743 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
744 return 0;
745
f5f82476
OG
746 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
747 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
748 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
749 !attr->dest_chain);
f5f82476 750
0e18134f
VB
751 mutex_lock(&esw->state_lock);
752
c620b772 753 err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
f5f82476 754 if (err)
0e18134f 755 goto unlock;
f5f82476 756
39ac237c 757 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476 758
c620b772 759 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
f5f82476
OG
760
761 if (!push && !pop && fwd) {
762 /* tracks VF --> wire rules without vlan push action */
c620b772 763 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476 764 vport->vlan_refcount++;
39ac237c 765 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
766 }
767
0e18134f 768 goto unlock;
f5f82476
OG
769 }
770
771 if (!push && !pop)
0e18134f 772 goto unlock;
f5f82476
OG
773
774 if (!(offloads->vlan_push_pop_refcount)) {
775 /* it's the 1st vlan rule, apply global vlan pop policy */
776 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
777 if (err)
778 goto out;
779 }
780 offloads->vlan_push_pop_refcount++;
781
782 if (push) {
783 if (vport->vlan_refcount)
784 goto skip_set_push;
785
c620b772
AL
786 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
787 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
f5f82476
OG
788 if (err)
789 goto out;
c620b772 790 vport->vlan = esw_attr->vlan_vid[0];
f5f82476
OG
791skip_set_push:
792 vport->vlan_refcount++;
793 }
794out:
795 if (!err)
39ac237c 796 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
0e18134f
VB
797unlock:
798 mutex_unlock(&esw->state_lock);
f5f82476
OG
799 return err;
800}
801
802int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
c620b772 803 struct mlx5_flow_attr *attr)
f5f82476
OG
804{
805 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
c620b772 806 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
f5f82476
OG
807 struct mlx5_eswitch_rep *vport = NULL;
808 bool push, pop, fwd;
809 int err = 0;
810
6acfbf38 811 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 812 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
813 return 0;
814
39ac237c 815 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
f5f82476
OG
816 return 0;
817
818 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
819 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
820 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
821
0e18134f
VB
822 mutex_lock(&esw->state_lock);
823
c620b772 824 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
f5f82476
OG
825
826 if (!push && !pop && fwd) {
827 /* tracks VF --> wire rules without vlan push action */
c620b772 828 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
829 vport->vlan_refcount--;
830
0e18134f 831 goto out;
f5f82476
OG
832 }
833
834 if (push) {
835 vport->vlan_refcount--;
836 if (vport->vlan_refcount)
837 goto skip_unset_push;
838
839 vport->vlan = 0;
840 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
841 0, 0, SET_VLAN_STRIP);
842 if (err)
843 goto out;
844 }
845
846skip_unset_push:
847 offloads->vlan_push_pop_refcount--;
848 if (offloads->vlan_push_pop_refcount)
0e18134f 849 goto out;
f5f82476
OG
850
851 /* no more vlan rules, stop global vlan pop policy */
852 err = esw_set_global_vlan_pop(esw, 0);
853
854out:
0e18134f 855 mutex_unlock(&esw->state_lock);
f5f82476
OG
856 return err;
857}
858
f7a68945 859struct mlx5_flow_handle *
02f3afd9
PP
860mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
861 u32 sqn)
ab22be9b 862{
66958ed9 863 struct mlx5_flow_act flow_act = {0};
4c5009c5 864 struct mlx5_flow_destination dest = {};
74491de9 865 struct mlx5_flow_handle *flow_rule;
c5bb1730 866 struct mlx5_flow_spec *spec;
ab22be9b
OG
867 void *misc;
868
1b9a07ee 869 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 870 if (!spec) {
ab22be9b
OG
871 flow_rule = ERR_PTR(-ENOMEM);
872 goto out;
873 }
874
c5bb1730 875 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 876 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
877 /* source vport is the esw manager */
878 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 879
c5bb1730 880 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
881 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
882 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
883
c5bb1730 884 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 885 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 886 dest.vport.num = vport;
66958ed9 887 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 888
39ac237c
PB
889 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
890 spec, &flow_act, &dest, 1);
ab22be9b
OG
891 if (IS_ERR(flow_rule))
892 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
893out:
c5bb1730 894 kvfree(spec);
ab22be9b
OG
895 return flow_rule;
896}
57cbd893 897EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 898
159fe639
MB
899void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
900{
901 mlx5_del_flow_rules(rule);
902}
903
8e404fef
VB
904static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
905{
906 struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
907 int i = 0, num_vfs = esw->esw_funcs.num_vfs, vport_num;
908
909 if (!num_vfs || !flows)
910 return;
911
912 mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs)
913 mlx5_del_flow_rules(flows[i++]);
914
915 kvfree(flows);
916}
917
918static int
919mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
920{
921 int num_vfs, vport_num, rule_idx = 0, err = 0;
922 struct mlx5_flow_destination dest = {};
923 struct mlx5_flow_act flow_act = {0};
924 struct mlx5_flow_handle *flow_rule;
925 struct mlx5_flow_handle **flows;
926 struct mlx5_flow_spec *spec;
927
928 num_vfs = esw->esw_funcs.num_vfs;
929 flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
930 if (!flows)
931 return -ENOMEM;
932
933 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
934 if (!spec) {
935 err = -ENOMEM;
936 goto alloc_err;
937 }
938
939 MLX5_SET(fte_match_param, spec->match_criteria,
940 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
941 MLX5_SET(fte_match_param, spec->match_criteria,
942 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
943 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
944 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
945
946 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
947 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
948 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
949
950 mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) {
951 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
952 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
953 dest.vport.num = vport_num;
954
955 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
956 spec, &flow_act, &dest, 1);
957 if (IS_ERR(flow_rule)) {
958 err = PTR_ERR(flow_rule);
959 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
960 rule_idx, PTR_ERR(flow_rule));
961 goto rule_err;
962 }
963 flows[rule_idx++] = flow_rule;
964 }
965
966 esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
967 kvfree(spec);
968 return 0;
969
970rule_err:
971 while (--rule_idx >= 0)
972 mlx5_del_flow_rules(flows[rule_idx]);
973 kvfree(spec);
974alloc_err:
975 kvfree(flows);
976 return err;
977}
978
5b7cb745
PB
979static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
980{
981 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
982 MLX5_FDB_TO_VPORT_REG_C_1;
983}
984
332bd3a5 985static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
986{
987 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
e08a6832
LR
988 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
989 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
5b7cb745 990 u8 curr, wanted;
c1286050
JL
991 int err;
992
5b7cb745
PB
993 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
994 !mlx5_eswitch_vport_match_metadata_enabled(esw))
332bd3a5 995 return 0;
c1286050 996
e08a6832
LR
997 MLX5_SET(query_esw_vport_context_in, in, opcode,
998 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
999 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
c1286050
JL
1000 if (err)
1001 return err;
1002
5b7cb745
PB
1003 curr = MLX5_GET(query_esw_vport_context_out, out,
1004 esw_vport_context.fdb_to_vport_reg_c_id);
1005 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
1006 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
1007 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
c1286050 1008
332bd3a5 1009 if (enable)
5b7cb745 1010 curr |= wanted;
332bd3a5 1011 else
5b7cb745 1012 curr &= ~wanted;
c1286050 1013
e08a6832 1014 MLX5_SET(modify_esw_vport_context_in, min,
5b7cb745 1015 esw_vport_context.fdb_to_vport_reg_c_id, curr);
e08a6832 1016 MLX5_SET(modify_esw_vport_context_in, min,
c1286050
JL
1017 field_select.fdb_to_vport_reg_c_id, 1);
1018
e08a6832 1019 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
5b7cb745
PB
1020 if (!err) {
1021 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
1022 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1023 else
1024 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1025 }
1026
1027 return err;
c1286050
JL
1028}
1029
a5641cb5
JL
1030static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1031 struct mlx5_core_dev *peer_dev,
ac004b83
RD
1032 struct mlx5_flow_spec *spec,
1033 struct mlx5_flow_destination *dest)
1034{
a5641cb5 1035 void *misc;
ac004b83 1036
a5641cb5
JL
1037 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1038 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1039 misc_parameters_2);
0f0d3827
PB
1040 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1041 mlx5_eswitch_get_vport_metadata_mask());
ac004b83 1042
a5641cb5
JL
1043 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1044 } else {
1045 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1046 misc_parameters);
ac004b83 1047
a5641cb5
JL
1048 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1049 MLX5_CAP_GEN(peer_dev, vhca_id));
1050
1051 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1052
1053 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1054 misc_parameters);
1055 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1056 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1057 source_eswitch_owner_vhca_id);
1058 }
ac004b83
RD
1059
1060 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1061 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 1062 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 1063 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
1064}
1065
a5641cb5
JL
1066static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1067 struct mlx5_eswitch *peer_esw,
1068 struct mlx5_flow_spec *spec,
1069 u16 vport)
1070{
1071 void *misc;
1072
1073 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1074 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1075 misc_parameters_2);
1076 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1077 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1078 vport));
1079 } else {
1080 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1081 misc_parameters);
1082 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1083 }
1084}
1085
ac004b83
RD
1086static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1087 struct mlx5_core_dev *peer_dev)
1088{
1089 struct mlx5_flow_destination dest = {};
1090 struct mlx5_flow_act flow_act = {0};
1091 struct mlx5_flow_handle **flows;
1092 struct mlx5_flow_handle *flow;
1093 struct mlx5_flow_spec *spec;
1094 /* total vports is the same for both e-switches */
1095 int nvports = esw->total_vports;
1096 void *misc;
1097 int err, i;
1098
1099 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1100 if (!spec)
1101 return -ENOMEM;
1102
a5641cb5 1103 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
1104
1105 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
1106 if (!flows) {
1107 err = -ENOMEM;
1108 goto alloc_flows_err;
1109 }
1110
1111 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1112 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1113 misc_parameters);
1114
81cd229c 1115 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
1116 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1117 spec, MLX5_VPORT_PF);
1118
81cd229c
BW
1119 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1120 spec, &flow_act, &dest, 1);
1121 if (IS_ERR(flow)) {
1122 err = PTR_ERR(flow);
1123 goto add_pf_flow_err;
1124 }
1125 flows[MLX5_VPORT_PF] = flow;
1126 }
1127
1128 if (mlx5_ecpf_vport_exists(esw->dev)) {
1129 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1130 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1131 spec, &flow_act, &dest, 1);
1132 if (IS_ERR(flow)) {
1133 err = PTR_ERR(flow);
1134 goto add_ecpf_flow_err;
1135 }
1136 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
1137 }
1138
786ef904 1139 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
1140 esw_set_peer_miss_rule_source_port(esw,
1141 peer_dev->priv.eswitch,
1142 spec, i);
1143
ac004b83
RD
1144 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1145 spec, &flow_act, &dest, 1);
1146 if (IS_ERR(flow)) {
1147 err = PTR_ERR(flow);
81cd229c 1148 goto add_vf_flow_err;
ac004b83
RD
1149 }
1150 flows[i] = flow;
1151 }
1152
1153 esw->fdb_table.offloads.peer_miss_rules = flows;
1154
1155 kvfree(spec);
1156 return 0;
1157
81cd229c 1158add_vf_flow_err:
879c8f84 1159 nvports = --i;
786ef904 1160 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 1161 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
1162
1163 if (mlx5_ecpf_vport_exists(esw->dev))
1164 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
1165add_ecpf_flow_err:
1166 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
1167 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
1168add_pf_flow_err:
1169 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
1170 kvfree(flows);
1171alloc_flows_err:
1172 kvfree(spec);
1173 return err;
1174}
1175
1176static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
1177{
1178 struct mlx5_flow_handle **flows;
1179 int i;
1180
1181 flows = esw->fdb_table.offloads.peer_miss_rules;
1182
786ef904
PP
1183 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
1184 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
1185 mlx5_del_flow_rules(flows[i]);
1186
81cd229c
BW
1187 if (mlx5_ecpf_vport_exists(esw->dev))
1188 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
1189
1190 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
1191 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
1192
ac004b83
RD
1193 kvfree(flows);
1194}
1195
3aa33572
OG
1196static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1197{
66958ed9 1198 struct mlx5_flow_act flow_act = {0};
4c5009c5 1199 struct mlx5_flow_destination dest = {};
74491de9 1200 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 1201 struct mlx5_flow_spec *spec;
f80be543
MB
1202 void *headers_c;
1203 void *headers_v;
3aa33572 1204 int err = 0;
f80be543
MB
1205 u8 *dmac_c;
1206 u8 *dmac_v;
3aa33572 1207
1b9a07ee 1208 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1209 if (!spec) {
3aa33572
OG
1210 err = -ENOMEM;
1211 goto out;
1212 }
1213
f80be543
MB
1214 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1215 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1216 outer_headers);
1217 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1218 outer_headers.dmac_47_16);
1219 dmac_c[0] = 0x01;
1220
3aa33572 1221 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1222 dest.vport.num = esw->manager_vport;
66958ed9 1223 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 1224
39ac237c
PB
1225 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1226 spec, &flow_act, &dest, 1);
3aa33572
OG
1227 if (IS_ERR(flow_rule)) {
1228 err = PTR_ERR(flow_rule);
f80be543 1229 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
1230 goto out;
1231 }
1232
f80be543
MB
1233 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1234
1235 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1236 outer_headers);
1237 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1238 outer_headers.dmac_47_16);
1239 dmac_v[0] = 0x01;
39ac237c
PB
1240 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1241 spec, &flow_act, &dest, 1);
f80be543
MB
1242 if (IS_ERR(flow_rule)) {
1243 err = PTR_ERR(flow_rule);
1244 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1245 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1246 goto out;
1247 }
1248
1249 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1250
3aa33572 1251out:
c5bb1730 1252 kvfree(spec);
3aa33572
OG
1253 return err;
1254}
1255
11b717d6
PB
1256struct mlx5_flow_handle *
1257esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1258{
1259 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1260 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1261 struct mlx5_flow_context *flow_context;
1262 struct mlx5_flow_handle *flow_rule;
1263 struct mlx5_flow_destination dest;
1264 struct mlx5_flow_spec *spec;
1265 void *misc;
1266
60acc105
PB
1267 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1268 return ERR_PTR(-EOPNOTSUPP);
1269
9f4d9283 1270 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
11b717d6
PB
1271 if (!spec)
1272 return ERR_PTR(-ENOMEM);
1273
1274 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1275 misc_parameters_2);
1276 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1277 ESW_CHAIN_TAG_METADATA_MASK);
1278 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1279 misc_parameters_2);
1280 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1281 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
6724e66b
PB
1282 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1283 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1284 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
11b717d6
PB
1285
1286 flow_context = &spec->flow_context;
1287 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1288 flow_context->flow_tag = tag;
1289 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1290 dest.ft = esw->offloads.ft_offloads;
1291
1292 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
9f4d9283 1293 kvfree(spec);
11b717d6
PB
1294
1295 if (IS_ERR(flow_rule))
1296 esw_warn(esw->dev,
1297 "Failed to create restore rule for tag: %d, err(%d)\n",
1298 tag, (int)PTR_ERR(flow_rule));
1299
1300 return flow_rule;
1301}
1302
1303u32
1304esw_get_max_restore_tag(struct mlx5_eswitch *esw)
1305{
1306 return ESW_CHAIN_TAG_METADATA_MASK;
1307}
1308
1967ce6e 1309#define MAX_PF_SQ 256
cd3d07e7 1310#define MAX_SQ_NVPORTS 32
1967ce6e 1311
a5641cb5
JL
1312static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1313 u32 *flow_group_in)
1314{
1315 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1316 flow_group_in,
1317 match_criteria);
1318
1319 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1320 MLX5_SET(create_flow_group_in, flow_group_in,
1321 match_criteria_enable,
1322 MLX5_MATCH_MISC_PARAMETERS_2);
1323
0f0d3827
PB
1324 MLX5_SET(fte_match_param, match_criteria,
1325 misc_parameters_2.metadata_reg_c_0,
1326 mlx5_eswitch_get_vport_metadata_mask());
a5641cb5
JL
1327 } else {
1328 MLX5_SET(create_flow_group_in, flow_group_in,
1329 match_criteria_enable,
1330 MLX5_MATCH_MISC_PARAMETERS);
1331
1332 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1333 misc_parameters.source_port);
1334 }
1335}
1336
ae430332 1337#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
4c7f4028
CM
1338static void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
1339{
1340 struct mlx5_vport_tbl_attr attr;
1341 struct mlx5_vport *vport;
1342 int i;
1343
1344 attr.chain = 0;
1345 attr.prio = 1;
1346 mlx5_esw_for_all_vports(esw, i, vport) {
1347 attr.vport = vport->vport;
1348 esw_vport_tbl_put(esw, &attr);
1349 }
1350}
1351
1352static int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
1353{
1354 struct mlx5_vport_tbl_attr attr;
1355 struct mlx5_flow_table *fdb;
1356 struct mlx5_vport *vport;
1357 int i;
1358
1359 attr.chain = 0;
1360 attr.prio = 1;
1361 mlx5_esw_for_all_vports(esw, i, vport) {
1362 attr.vport = vport->vport;
1363 fdb = esw_vport_tbl_get(esw, &attr);
1364 if (IS_ERR(fdb))
1365 goto out;
1366 }
1367 return 0;
1368
1369out:
1370 mlx5_esw_vport_tbl_put(esw);
1371 return PTR_ERR(fdb);
1372}
1373
ae430332
AL
1374#define fdb_modify_header_fwd_to_table_supported(esw) \
1375 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1376static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1377{
1378 struct mlx5_core_dev *dev = esw->dev;
1379
1380 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1381 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1382
1383 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1384 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1385 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1386 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1387 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1388 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1389 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1390 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1391 /* Disabled when ttl workaround is needed, e.g
1392 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1393 */
1394 esw_warn(dev,
1395 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1396 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1397 } else {
1398 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1399 esw_info(dev, "Supported tc chains and prios offload\n");
1400 }
1401
1402 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1403 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1404}
1405
1406static int
1407esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1408{
1409 struct mlx5_core_dev *dev = esw->dev;
1410 struct mlx5_flow_table *nf_ft, *ft;
1411 struct mlx5_chains_attr attr = {};
1412 struct mlx5_fs_chains *chains;
1413 u32 fdb_max;
1414 int err;
1415
1416 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1417
1418 esw_init_chains_offload_flags(esw, &attr.flags);
1419 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1420 attr.max_ft_sz = fdb_max;
1421 attr.max_grp_num = esw->params.large_group_num;
1422 attr.default_ft = miss_fdb;
1423 attr.max_restore_tag = esw_get_max_restore_tag(esw);
1424
1425 chains = mlx5_chains_create(dev, &attr);
1426 if (IS_ERR(chains)) {
1427 err = PTR_ERR(chains);
1428 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1429 return err;
1430 }
1431
1432 esw->fdb_table.offloads.esw_chains_priv = chains;
1433
1434 /* Create tc_end_ft which is the always created ft chain */
1435 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1436 1, 0);
1437 if (IS_ERR(nf_ft)) {
1438 err = PTR_ERR(nf_ft);
1439 goto nf_ft_err;
1440 }
1441
1442 /* Always open the root for fast path */
1443 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1444 if (IS_ERR(ft)) {
1445 err = PTR_ERR(ft);
1446 goto level_0_err;
1447 }
1448
1449 /* Open level 1 for split fdb rules now if prios isn't supported */
1450 if (!mlx5_chains_prios_supported(chains)) {
1451 err = mlx5_esw_vport_tbl_get(esw);
1452 if (err)
1453 goto level_1_err;
1454 }
1455
1456 mlx5_chains_set_end_ft(chains, nf_ft);
1457
1458 return 0;
1459
1460level_1_err:
1461 mlx5_chains_put_table(chains, 0, 1, 0);
1462level_0_err:
1463 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1464nf_ft_err:
1465 mlx5_chains_destroy(chains);
1466 esw->fdb_table.offloads.esw_chains_priv = NULL;
1467
1468 return err;
1469}
1470
1471static void
1472esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1473{
1474 if (!mlx5_chains_prios_supported(chains))
1475 mlx5_esw_vport_tbl_put(esw);
1476 mlx5_chains_put_table(chains, 0, 1, 0);
1477 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1478 mlx5_chains_destroy(chains);
1479}
1480
1481#else /* CONFIG_MLX5_CLS_ACT */
1482
1483static int
1484esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1485{ return 0; }
1486
1487static void
1488esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1489{}
1490
1491#endif
1492
0da3c12d 1493static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1967ce6e
OG
1494{
1495 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1496 struct mlx5_flow_table_attr ft_attr = {};
8e404fef 1497 int num_vfs, table_size, ix, err = 0;
1967ce6e
OG
1498 struct mlx5_core_dev *dev = esw->dev;
1499 struct mlx5_flow_namespace *root_ns;
1500 struct mlx5_flow_table *fdb = NULL;
39ac237c 1501 u32 flags = 0, *flow_group_in;
1967ce6e
OG
1502 struct mlx5_flow_group *g;
1503 void *match_criteria;
f80be543 1504 u8 *dmac;
1967ce6e
OG
1505
1506 esw_debug(esw->dev, "Create offloads FDB Tables\n");
39ac237c 1507
1b9a07ee 1508 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1509 if (!flow_group_in)
1510 return -ENOMEM;
1511
1512 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1513 if (!root_ns) {
1514 esw_warn(dev, "Failed to get FDB flow namespace\n");
1515 err = -EOPNOTSUPP;
1516 goto ns_err;
1517 }
8463daf1
MG
1518 esw->fdb_table.offloads.ns = root_ns;
1519 err = mlx5_flow_namespace_set_mode(root_ns,
1520 esw->dev->priv.steering->mode);
1521 if (err) {
1522 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1523 goto ns_err;
1524 }
1967ce6e 1525
0da3c12d 1526 table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
8e404fef 1527 MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
b3ba5149 1528
e52c2802
PB
1529 /* create the slow path fdb with encap set, so further table instances
1530 * can be created at run time while VFs are probed if the FW allows that.
1531 */
1532 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1533 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1534 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1535
1536 ft_attr.flags = flags;
b3ba5149
ES
1537 ft_attr.max_fte = table_size;
1538 ft_attr.prio = FDB_SLOW_PATH;
1539
1540 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1541 if (IS_ERR(fdb)) {
1542 err = PTR_ERR(fdb);
1543 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1544 goto slow_fdb_err;
1545 }
52fff327 1546 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1547
ae430332 1548 err = esw_chains_create(esw, fdb);
39ac237c 1549 if (err) {
ae430332 1550 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
39ac237c 1551 goto fdb_chains_err;
e52c2802
PB
1552 }
1553
69697b6e 1554 /* create send-to-vport group */
69697b6e
OG
1555 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1556 MLX5_MATCH_MISC_PARAMETERS);
1557
1558 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1559
1560 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1561 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1562
0da3c12d 1563 ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1564 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1565 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1566
1567 g = mlx5_create_flow_group(fdb, flow_group_in);
1568 if (IS_ERR(g)) {
1569 err = PTR_ERR(g);
1570 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1571 goto send_vport_err;
1572 }
1573 esw->fdb_table.offloads.send_to_vport_grp = g;
1574
8e404fef
VB
1575 /* meta send to vport */
1576 memset(flow_group_in, 0, inlen);
1577 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1578 MLX5_MATCH_MISC_PARAMETERS_2);
1579
1580 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1581
1582 MLX5_SET(fte_match_param, match_criteria,
1583 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
1584 MLX5_SET(fte_match_param, match_criteria,
1585 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1586
1587 num_vfs = esw->esw_funcs.num_vfs;
1588 if (num_vfs) {
1589 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1590 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1);
1591 ix += num_vfs;
1592
1593 g = mlx5_create_flow_group(fdb, flow_group_in);
1594 if (IS_ERR(g)) {
1595 err = PTR_ERR(g);
1596 esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
1597 err);
1598 goto send_vport_meta_err;
1599 }
1600 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1601
1602 err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
1603 if (err)
1604 goto meta_rule_err;
1605 }
1606
6cec0229
MD
1607 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1608 /* create peer esw miss group */
1609 memset(flow_group_in, 0, inlen);
ac004b83 1610
6cec0229 1611 esw_set_flow_group_source_port(esw, flow_group_in);
a5641cb5 1612
6cec0229
MD
1613 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1614 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1615 flow_group_in,
1616 match_criteria);
ac004b83 1617
6cec0229
MD
1618 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1619 misc_parameters.source_eswitch_owner_vhca_id);
a5641cb5 1620
6cec0229
MD
1621 MLX5_SET(create_flow_group_in, flow_group_in,
1622 source_eswitch_owner_vhca_id_valid, 1);
1623 }
ac004b83 1624
6cec0229
MD
1625 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1626 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1627 ix + esw->total_vports - 1);
1628 ix += esw->total_vports;
ac004b83 1629
6cec0229
MD
1630 g = mlx5_create_flow_group(fdb, flow_group_in);
1631 if (IS_ERR(g)) {
1632 err = PTR_ERR(g);
1633 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1634 goto peer_miss_err;
1635 }
1636 esw->fdb_table.offloads.peer_miss_grp = g;
ac004b83 1637 }
ac004b83 1638
69697b6e
OG
1639 /* create miss group */
1640 memset(flow_group_in, 0, inlen);
f80be543
MB
1641 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1642 MLX5_MATCH_OUTER_HEADERS);
1643 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1644 match_criteria);
1645 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1646 outer_headers.dmac_47_16);
1647 dmac[0] = 0x01;
69697b6e
OG
1648
1649 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1650 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1651 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1652
1653 g = mlx5_create_flow_group(fdb, flow_group_in);
1654 if (IS_ERR(g)) {
1655 err = PTR_ERR(g);
1656 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1657 goto miss_err;
1658 }
1659 esw->fdb_table.offloads.miss_grp = g;
1660
3aa33572
OG
1661 err = esw_add_fdb_miss_rule(esw);
1662 if (err)
1663 goto miss_rule_err;
1664
c88a026e 1665 kvfree(flow_group_in);
69697b6e
OG
1666 return 0;
1667
3aa33572
OG
1668miss_rule_err:
1669 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1670miss_err:
6cec0229
MD
1671 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1672 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
ac004b83 1673peer_miss_err:
8e404fef
VB
1674 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
1675meta_rule_err:
1676 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1677 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1678send_vport_meta_err:
69697b6e
OG
1679 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1680send_vport_err:
ae430332 1681 esw_chains_destroy(esw, esw_chains(esw));
39ac237c 1682fdb_chains_err:
52fff327 1683 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1684slow_fdb_err:
8463daf1
MG
1685 /* Holds true only as long as DMFS is the default */
1686 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1687ns_err:
1688 kvfree(flow_group_in);
1689 return err;
1690}
1691
1967ce6e 1692static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1693{
e52c2802 1694 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1695 return;
1696
1967ce6e 1697 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1698 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1699 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
8e404fef 1700 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
69697b6e 1701 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
8e404fef
VB
1702 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1703 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
6cec0229
MD
1704 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1705 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1706 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1707
ae430332
AL
1708 esw_chains_destroy(esw, esw_chains(esw));
1709
52fff327 1710 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
8463daf1
MG
1711 /* Holds true only as long as DMFS is the default */
1712 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1713 MLX5_FLOW_STEERING_MODE_DMFS);
7dc84de9 1714 atomic64_set(&esw->user_count, 0);
69697b6e 1715}
c116c6ee 1716
8d6bd3c3 1717static int esw_create_offloads_table(struct mlx5_eswitch *esw)
c116c6ee 1718{
b3ba5149 1719 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1720 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1721 struct mlx5_flow_table *ft_offloads;
1722 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1723 int err = 0;
1724
1725 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1726 if (!ns) {
1727 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1728 return -EOPNOTSUPP;
c116c6ee
OG
1729 }
1730
8d6bd3c3 1731 ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS;
11b717d6 1732 ft_attr.prio = 1;
b3ba5149
ES
1733
1734 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1735 if (IS_ERR(ft_offloads)) {
1736 err = PTR_ERR(ft_offloads);
1737 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1738 return err;
1739 }
1740
1741 esw->offloads.ft_offloads = ft_offloads;
1742 return 0;
1743}
1744
1745static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1746{
1747 struct mlx5_esw_offload *offloads = &esw->offloads;
1748
1749 mlx5_destroy_flow_table(offloads->ft_offloads);
1750}
fed9ce22 1751
8d6bd3c3 1752static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
fed9ce22
OG
1753{
1754 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1755 struct mlx5_flow_group *g;
fed9ce22 1756 u32 *flow_group_in;
8d6bd3c3 1757 int nvports;
fed9ce22 1758 int err = 0;
fed9ce22 1759
8d6bd3c3 1760 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1761 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1762 if (!flow_group_in)
1763 return -ENOMEM;
1764
1765 /* create vport rx group */
a5641cb5 1766 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1767
1768 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1769 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1770
1771 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1772
1773 if (IS_ERR(g)) {
1774 err = PTR_ERR(g);
1775 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1776 goto out;
1777 }
1778
1779 esw->offloads.vport_rx_group = g;
1780out:
e574978a 1781 kvfree(flow_group_in);
fed9ce22
OG
1782 return err;
1783}
1784
1785static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1786{
1787 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1788}
1789
74491de9 1790struct mlx5_flow_handle *
02f3afd9 1791mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1792 struct mlx5_flow_destination *dest)
fed9ce22 1793{
66958ed9 1794 struct mlx5_flow_act flow_act = {0};
74491de9 1795 struct mlx5_flow_handle *flow_rule;
c5bb1730 1796 struct mlx5_flow_spec *spec;
fed9ce22
OG
1797 void *misc;
1798
1b9a07ee 1799 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1800 if (!spec) {
fed9ce22
OG
1801 flow_rule = ERR_PTR(-ENOMEM);
1802 goto out;
1803 }
1804
a5641cb5
JL
1805 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1806 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1807 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1808 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1809
a5641cb5 1810 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
1811 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1812 mlx5_eswitch_get_vport_metadata_mask());
fed9ce22 1813
a5641cb5
JL
1814 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1815 } else {
1816 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1817 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1818
1819 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1820 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1821
1822 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1823 }
fed9ce22 1824
66958ed9 1825 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1826 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1827 &flow_act, dest, 1);
fed9ce22
OG
1828 if (IS_ERR(flow_rule)) {
1829 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1830 goto out;
1831 }
1832
1833out:
c5bb1730 1834 kvfree(spec);
fed9ce22
OG
1835 return flow_rule;
1836}
feae9087 1837
bf3347c4 1838
cc617ced
PP
1839static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
1840{
1841 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1842 struct mlx5_core_dev *dev = esw->dev;
1843 int vport;
1844
1845 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1846 return -EOPNOTSUPP;
1847
1848 if (esw->mode == MLX5_ESWITCH_NONE)
1849 return -EOPNOTSUPP;
1850
1851 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1852 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1853 mlx5_mode = MLX5_INLINE_MODE_NONE;
1854 goto out;
1855 case MLX5_CAP_INLINE_MODE_L2:
1856 mlx5_mode = MLX5_INLINE_MODE_L2;
1857 goto out;
1858 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1859 goto query_vports;
1860 }
1861
1862query_vports:
1863 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1864 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
1865 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1866 if (prev_mlx5_mode != mlx5_mode)
1867 return -EINVAL;
1868 prev_mlx5_mode = mlx5_mode;
1869 }
1870
1871out:
1872 *mode = mlx5_mode;
1873 return 0;
e08a6832 1874}
bf3347c4 1875
11b717d6
PB
1876static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1877{
1878 struct mlx5_esw_offload *offloads = &esw->offloads;
1879
60acc105
PB
1880 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1881 return;
1882
6724e66b 1883 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
11b717d6
PB
1884 mlx5_destroy_flow_group(offloads->restore_group);
1885 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1886}
1887
1888static int esw_create_restore_table(struct mlx5_eswitch *esw)
1889{
d65dbedf 1890 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
11b717d6
PB
1891 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1892 struct mlx5_flow_table_attr ft_attr = {};
1893 struct mlx5_core_dev *dev = esw->dev;
1894 struct mlx5_flow_namespace *ns;
6724e66b 1895 struct mlx5_modify_hdr *mod_hdr;
11b717d6
PB
1896 void *match_criteria, *misc;
1897 struct mlx5_flow_table *ft;
1898 struct mlx5_flow_group *g;
1899 u32 *flow_group_in;
1900 int err = 0;
1901
60acc105
PB
1902 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1903 return 0;
1904
11b717d6
PB
1905 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1906 if (!ns) {
1907 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1908 return -EOPNOTSUPP;
1909 }
1910
1911 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1912 if (!flow_group_in) {
1913 err = -ENOMEM;
1914 goto out_free;
1915 }
1916
1917 ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
1918 ft = mlx5_create_flow_table(ns, &ft_attr);
1919 if (IS_ERR(ft)) {
1920 err = PTR_ERR(ft);
1921 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
1922 err);
1923 goto out_free;
1924 }
1925
11b717d6
PB
1926 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1927 match_criteria);
1928 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
1929 misc_parameters_2);
1930
1931 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1932 ESW_CHAIN_TAG_METADATA_MASK);
1933 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1934 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1935 ft_attr.max_fte - 1);
1936 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1937 MLX5_MATCH_MISC_PARAMETERS_2);
1938 g = mlx5_create_flow_group(ft, flow_group_in);
1939 if (IS_ERR(g)) {
1940 err = PTR_ERR(g);
1941 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
1942 err);
1943 goto err_group;
1944 }
1945
6724e66b
PB
1946 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
1947 MLX5_SET(copy_action_in, modact, src_field,
1948 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1949 MLX5_SET(copy_action_in, modact, dst_field,
1950 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1951 mod_hdr = mlx5_modify_header_alloc(esw->dev,
1952 MLX5_FLOW_NAMESPACE_KERNEL, 1,
1953 modact);
1954 if (IS_ERR(mod_hdr)) {
e9864539 1955 err = PTR_ERR(mod_hdr);
6724e66b
PB
1956 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
1957 err);
6724e66b
PB
1958 goto err_mod_hdr;
1959 }
1960
11b717d6
PB
1961 esw->offloads.ft_offloads_restore = ft;
1962 esw->offloads.restore_group = g;
6724e66b 1963 esw->offloads.restore_copy_hdr_id = mod_hdr;
11b717d6 1964
c8508713
RD
1965 kvfree(flow_group_in);
1966
11b717d6
PB
1967 return 0;
1968
6724e66b
PB
1969err_mod_hdr:
1970 mlx5_destroy_flow_group(g);
11b717d6
PB
1971err_group:
1972 mlx5_destroy_flow_table(ft);
1973out_free:
1974 kvfree(flow_group_in);
1975
1976 return err;
cc617ced
PP
1977}
1978
db7ff19e
EB
1979static int esw_offloads_start(struct mlx5_eswitch *esw,
1980 struct netlink_ext_ack *extack)
c930a3ad 1981{
062f4bf4 1982 int err, err1;
c930a3ad 1983
8e0aa4bc
PP
1984 mlx5_eswitch_disable_locked(esw, false);
1985 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
1986 esw->dev->priv.sriov.num_vfs);
6c419ba8 1987 if (err) {
8c98ee77
EB
1988 NL_SET_ERR_MSG_MOD(extack,
1989 "Failed setting eswitch to offloads");
8e0aa4bc
PP
1990 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
1991 MLX5_ESWITCH_IGNORE_NUM_VFS);
8c98ee77
EB
1992 if (err1) {
1993 NL_SET_ERR_MSG_MOD(extack,
1994 "Failed setting eswitch back to legacy");
1995 }
6c419ba8 1996 }
bffaa916
RD
1997 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1998 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
1999 &esw->offloads.inline_mode)) {
2000 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
2001 NL_SET_ERR_MSG_MOD(extack,
2002 "Inline mode is different between vports");
bffaa916
RD
2003 }
2004 }
c930a3ad
OG
2005 return err;
2006}
2007
e8d31c4d
MB
2008void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2009{
2010 kfree(esw->offloads.vport_reps);
2011}
2012
2013int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2014{
2752b823 2015 int total_vports = esw->total_vports;
e8d31c4d 2016 struct mlx5_eswitch_rep *rep;
d6518db2 2017 int vport_index;
ef2e4094 2018 u8 rep_type;
e8d31c4d 2019
2aca1787 2020 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
2021 sizeof(struct mlx5_eswitch_rep),
2022 GFP_KERNEL);
2023 if (!esw->offloads.vport_reps)
2024 return -ENOMEM;
2025
d6518db2
BW
2026 mlx5_esw_for_all_reps(esw, vport_index, rep) {
2027 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 2028 rep->vport_index = vport_index;
f121e0ea
BW
2029
2030 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 2031 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 2032 REP_UNREGISTERED);
e8d31c4d
MB
2033 }
2034
e8d31c4d
MB
2035 return 0;
2036}
2037
c9b99abc
BW
2038static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2039 struct mlx5_eswitch_rep *rep, u8 rep_type)
2040{
8693115a 2041 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 2042 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 2043 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
2044}
2045
d7f33a45
VP
2046static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
2047{
2048 struct mlx5_eswitch_rep *rep;
2049 int i;
2050
2051 mlx5_esw_for_each_sf_rep(esw, i, rep)
2052 __esw_offloads_unload_rep(esw, rep, rep_type);
2053}
2054
4110fc59 2055static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
2056{
2057 struct mlx5_eswitch_rep *rep;
4110fc59
BW
2058 int i;
2059
d7f33a45
VP
2060 __unload_reps_sf_vport(esw, rep_type);
2061
4110fc59
BW
2062 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
2063 __esw_offloads_unload_rep(esw, rep, rep_type);
c9b99abc 2064
81cd229c
BW
2065 if (mlx5_ecpf_vport_exists(esw->dev)) {
2066 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
2067 __esw_offloads_unload_rep(esw, rep, rep_type);
2068 }
2069
2070 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
2071 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
2072 __esw_offloads_unload_rep(esw, rep, rep_type);
2073 }
2074
879c8f84 2075 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 2076 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
2077}
2078
d970812b 2079int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
a4b97ab4 2080{
c2d7712c
BW
2081 struct mlx5_eswitch_rep *rep;
2082 int rep_type;
a4b97ab4
MB
2083 int err;
2084
c2d7712c
BW
2085 rep = mlx5_eswitch_get_rep(esw, vport_num);
2086 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2087 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2088 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2089 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2090 if (err)
2091 goto err_reps;
2092 }
2093
2094 return 0;
a4b97ab4
MB
2095
2096err_reps:
c2d7712c
BW
2097 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2098 for (--rep_type; rep_type >= 0; rep_type--)
2099 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
2100 return err;
2101}
2102
d970812b 2103void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
c2d7712c
BW
2104{
2105 struct mlx5_eswitch_rep *rep;
2106 int rep_type;
2107
c2d7712c
BW
2108 rep = mlx5_eswitch_get_rep(esw, vport_num);
2109 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2110 __esw_offloads_unload_rep(esw, rep, rep_type);
2111}
2112
38679b5a
PP
2113int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
2114{
2115 int err;
2116
2117 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2118 return 0;
2119
865d6d1c
RD
2120 if (vport_num != MLX5_VPORT_UPLINK) {
2121 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
2122 if (err)
2123 return err;
2124 }
c7eddc60 2125
38679b5a 2126 err = mlx5_esw_offloads_rep_load(esw, vport_num);
c7eddc60
PP
2127 if (err)
2128 goto load_err;
2129 return err;
2130
2131load_err:
865d6d1c
RD
2132 if (vport_num != MLX5_VPORT_UPLINK)
2133 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
38679b5a
PP
2134 return err;
2135}
2136
2137void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
2138{
2139 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2140 return;
2141
2142 mlx5_esw_offloads_rep_unload(esw, vport_num);
865d6d1c
RD
2143
2144 if (vport_num != MLX5_VPORT_UPLINK)
2145 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
38679b5a
PP
2146}
2147
ac004b83
RD
2148#define ESW_OFFLOADS_DEVCOM_PAIR (0)
2149#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2150
2151static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2152 struct mlx5_eswitch *peer_esw)
2153{
2154 int err;
2155
2156 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2157 if (err)
2158 return err;
2159
2160 return 0;
2161}
2162
2163static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
2164{
d956873f 2165#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
04de7dda 2166 mlx5e_tc_clean_fdb_peer_flows(esw);
d956873f 2167#endif
ac004b83
RD
2168 esw_del_fdb_peer_miss_rules(esw);
2169}
2170
8463daf1
MG
2171static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2172 struct mlx5_eswitch *peer_esw,
2173 bool pair)
2174{
2175 struct mlx5_flow_root_namespace *peer_ns;
2176 struct mlx5_flow_root_namespace *ns;
2177 int err;
2178
2179 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2180 ns = esw->dev->priv.steering->fdb_root_ns;
2181
2182 if (pair) {
2183 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
2184 if (err)
2185 return err;
2186
e53e6655 2187 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
2188 if (err) {
2189 mlx5_flow_namespace_set_peer(ns, NULL);
2190 return err;
2191 }
2192 } else {
2193 mlx5_flow_namespace_set_peer(ns, NULL);
2194 mlx5_flow_namespace_set_peer(peer_ns, NULL);
2195 }
2196
2197 return 0;
2198}
2199
ac004b83
RD
2200static int mlx5_esw_offloads_devcom_event(int event,
2201 void *my_data,
2202 void *event_data)
2203{
2204 struct mlx5_eswitch *esw = my_data;
ac004b83 2205 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 2206 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
2207 int err;
2208
2209 switch (event) {
2210 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
2211 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2212 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2213 break;
2214
8463daf1 2215 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
2216 if (err)
2217 goto err_out;
8463daf1
MG
2218 err = mlx5_esw_offloads_pair(esw, peer_esw);
2219 if (err)
2220 goto err_peer;
ac004b83
RD
2221
2222 err = mlx5_esw_offloads_pair(peer_esw, esw);
2223 if (err)
2224 goto err_pair;
2225
2226 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
2227 break;
2228
2229 case ESW_OFFLOADS_DEVCOM_UNPAIR:
2230 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
2231 break;
2232
2233 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
2234 mlx5_esw_offloads_unpair(peer_esw);
2235 mlx5_esw_offloads_unpair(esw);
8463daf1 2236 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
2237 break;
2238 }
2239
2240 return 0;
2241
2242err_pair:
2243 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
2244err_peer:
2245 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
2246err_out:
2247 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
2248 event, err);
2249 return err;
2250}
2251
2252static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
2253{
2254 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2255
04de7dda
RD
2256 INIT_LIST_HEAD(&esw->offloads.peer_flows);
2257 mutex_init(&esw->offloads.peer_mutex);
2258
ac004b83
RD
2259 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2260 return;
2261
2262 mlx5_devcom_register_component(devcom,
2263 MLX5_DEVCOM_ESW_OFFLOADS,
2264 mlx5_esw_offloads_devcom_event,
2265 esw);
2266
2267 mlx5_devcom_send_event(devcom,
2268 MLX5_DEVCOM_ESW_OFFLOADS,
2269 ESW_OFFLOADS_DEVCOM_PAIR, esw);
2270}
2271
2272static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2273{
2274 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2275
2276 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2277 return;
2278
2279 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2280 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2281
2282 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2283}
2284
92ab1eb3
JL
2285static bool
2286esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2287{
2288 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2289 return false;
2290
2291 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2292 MLX5_FDB_TO_VPORT_REG_C_0))
2293 return false;
2294
2295 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2296 return false;
2297
2298 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2299 mlx5_ecpf_vport_exists(esw->dev))
2300 return false;
2301
2302 return true;
2303}
2304
133dcfc5
VP
2305u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2306{
7cd7becd 2307 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
2308 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1;
2309 u32 pf_num;
133dcfc5
VP
2310 int id;
2311
7cd7becd 2312 /* Only 4 bits of pf_num */
2313 pf_num = PCI_FUNC(esw->dev->pdev->devfn);
2314 if (pf_num > max_pf_num)
2315 return 0;
133dcfc5 2316
7cd7becd 2317 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
2318 /* Use only non-zero vport_id (1-4095) for all PF's */
2319 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
2320 if (id < 0)
2321 return 0;
2322 id = (pf_num << ESW_VPORT_BITS) | id;
2323 return id;
133dcfc5
VP
2324}
2325
2326void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2327{
7cd7becd 2328 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2329
2330 /* Metadata contains only 12 bits of actual ida id */
2331 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
133dcfc5
VP
2332}
2333
2334static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2335 struct mlx5_vport *vport)
2336{
133dcfc5
VP
2337 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
2338 vport->metadata = vport->default_metadata;
2339 return vport->metadata ? 0 : -ENOSPC;
2340}
2341
2342static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
2343 struct mlx5_vport *vport)
2344{
406493a5 2345 if (!vport->default_metadata)
133dcfc5
VP
2346 return;
2347
2348 WARN_ON(vport->metadata != vport->default_metadata);
2349 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
2350}
2351
fc99c3d6
VP
2352static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
2353{
2354 struct mlx5_vport *vport;
2355 int i;
2356
2357 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2358 return;
2359
2360 mlx5_esw_for_all_vports_reverse(esw, i, vport)
2361 esw_offloads_vport_metadata_cleanup(esw, vport);
2362}
2363
2364static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
2365{
2366 struct mlx5_vport *vport;
2367 int err;
2368 int i;
2369
2370 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2371 return 0;
2372
2373 mlx5_esw_for_all_vports(esw, i, vport) {
2374 err = esw_offloads_vport_metadata_setup(esw, vport);
2375 if (err)
2376 goto metadata_err;
2377 }
2378
2379 return 0;
2380
2381metadata_err:
2382 esw_offloads_metadata_uninit(esw);
2383 return err;
2384}
2385
748da30b 2386int
89a0f1fb
PP
2387esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2388 struct mlx5_vport *vport)
7445cfb1 2389{
7445cfb1
JL
2390 int err;
2391
07bab950 2392 err = esw_acl_ingress_ofld_setup(esw, vport);
89a0f1fb 2393 if (err)
fc99c3d6 2394 return err;
7445cfb1 2395
2c40db2f
PP
2396 err = esw_acl_egress_ofld_setup(esw, vport);
2397 if (err)
2398 goto egress_err;
07bab950
VP
2399
2400 return 0;
2401
2402egress_err:
2403 esw_acl_ingress_ofld_cleanup(esw, vport);
89a0f1fb
PP
2404 return err;
2405}
18486737 2406
748da30b 2407void
89a0f1fb
PP
2408esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2409 struct mlx5_vport *vport)
2410{
ea651a86 2411 esw_acl_egress_ofld_cleanup(vport);
07bab950 2412 esw_acl_ingress_ofld_cleanup(esw, vport);
89a0f1fb 2413}
7445cfb1 2414
748da30b 2415static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
2416{
2417 struct mlx5_vport *vport;
18486737 2418
748da30b 2419 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
7bef147a
SM
2420 if (IS_ERR(vport))
2421 return PTR_ERR(vport);
2422
4e9a9ef7 2423 return esw_vport_create_offloads_acl_tables(esw, vport);
18486737
EB
2424}
2425
748da30b 2426static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 2427{
786ef904 2428 struct mlx5_vport *vport;
7445cfb1 2429
748da30b 2430 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
7bef147a
SM
2431 if (IS_ERR(vport))
2432 return;
2433
748da30b 2434 esw_vport_destroy_offloads_acl_tables(esw, vport);
18486737
EB
2435}
2436
062f4bf4 2437static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 2438{
34ca6535 2439 struct mlx5_esw_indir_table *indir;
6ed1803a
MB
2440 int err;
2441
5c1d260e 2442 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
f8d1edda
PP
2443 mutex_init(&esw->fdb_table.offloads.vports.lock);
2444 hash_init(esw->fdb_table.offloads.vports.table);
7dc84de9 2445 atomic64_set(&esw->user_count, 0);
e52c2802 2446
34ca6535
VB
2447 indir = mlx5_esw_indir_table_init();
2448 if (IS_ERR(indir)) {
2449 err = PTR_ERR(indir);
2450 goto create_indir_err;
2451 }
2452 esw->fdb_table.offloads.indir = indir;
2453
748da30b 2454 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1 2455 if (err)
f8d1edda 2456 goto create_acl_err;
18486737 2457
8d6bd3c3 2458 err = esw_create_offloads_table(esw);
c930a3ad 2459 if (err)
11b717d6 2460 goto create_offloads_err;
c930a3ad 2461
11b717d6 2462 err = esw_create_restore_table(esw);
c930a3ad 2463 if (err)
11b717d6
PB
2464 goto create_restore_err;
2465
0da3c12d 2466 err = esw_create_offloads_fdb_tables(esw);
11b717d6
PB
2467 if (err)
2468 goto create_fdb_err;
c930a3ad 2469
8d6bd3c3 2470 err = esw_create_vport_rx_group(esw);
c930a3ad
OG
2471 if (err)
2472 goto create_fg_err;
2473
2474 return 0;
2475
2476create_fg_err:
1967ce6e 2477 esw_destroy_offloads_fdb_tables(esw);
7445cfb1 2478create_fdb_err:
11b717d6
PB
2479 esw_destroy_restore_table(esw);
2480create_restore_err:
2481 esw_destroy_offloads_table(esw);
2482create_offloads_err:
748da30b 2483 esw_destroy_uplink_offloads_acl_tables(esw);
f8d1edda 2484create_acl_err:
34ca6535
VB
2485 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
2486create_indir_err:
f8d1edda 2487 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
c930a3ad
OG
2488 return err;
2489}
2490
eca8cc38
BW
2491static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2492{
2493 esw_destroy_vport_rx_group(esw);
eca8cc38 2494 esw_destroy_offloads_fdb_tables(esw);
11b717d6
PB
2495 esw_destroy_restore_table(esw);
2496 esw_destroy_offloads_table(esw);
748da30b 2497 esw_destroy_uplink_offloads_acl_tables(esw);
34ca6535 2498 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
f8d1edda 2499 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
eca8cc38
BW
2500}
2501
7e736f9a
PP
2502static void
2503esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 2504{
5ccf2770 2505 bool host_pf_disabled;
7e736f9a 2506 u16 new_num_vfs;
a3888f33 2507
7e736f9a
PP
2508 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2509 host_params_context.host_num_of_vfs);
5ccf2770
BW
2510 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2511 host_params_context.host_pf_disabled);
a3888f33 2512
7e736f9a
PP
2513 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2514 return;
a3888f33
BW
2515
2516 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929 2517 if (esw->esw_funcs.num_vfs > 0) {
23bb50cf 2518 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
a3888f33 2519 } else {
7e736f9a 2520 int err;
a3888f33 2521
23bb50cf
BW
2522 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2523 MLX5_VPORT_UC_ADDR_CHANGE);
a3888f33 2524 if (err)
7e736f9a 2525 return;
a3888f33 2526 }
7e736f9a 2527 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
2528}
2529
7e736f9a 2530static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 2531{
7e736f9a
PP
2532 struct mlx5_host_work *host_work;
2533 struct mlx5_eswitch *esw;
dd28087c 2534 const u32 *out;
ac35dcd6 2535
7e736f9a
PP
2536 host_work = container_of(work, struct mlx5_host_work, work);
2537 esw = host_work->esw;
a3888f33 2538
dd28087c
PP
2539 out = mlx5_esw_query_functions(esw->dev);
2540 if (IS_ERR(out))
7e736f9a 2541 goto out;
a3888f33 2542
7e736f9a 2543 esw_vfs_changed_event_handler(esw, out);
dd28087c 2544 kvfree(out);
a3888f33 2545out:
ac35dcd6
VP
2546 kfree(host_work);
2547}
2548
16fff98a 2549int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 2550{
cd56f929 2551 struct mlx5_esw_functions *esw_funcs;
a3888f33 2552 struct mlx5_host_work *host_work;
a3888f33
BW
2553 struct mlx5_eswitch *esw;
2554
2555 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2556 if (!host_work)
2557 return NOTIFY_DONE;
2558
cd56f929
VP
2559 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2560 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2561
2562 host_work->esw = esw;
2563
062f4bf4 2564 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2565 queue_work(esw->work_queue, &host_work->work);
2566
2567 return NOTIFY_OK;
2568}
2569
a53cf949
PP
2570static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
2571{
2572 const u32 *query_host_out;
2573
2574 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
2575 return 0;
2576
2577 query_host_out = mlx5_esw_query_functions(esw->dev);
2578 if (IS_ERR(query_host_out))
2579 return PTR_ERR(query_host_out);
2580
2581 /* Mark non local controller with non zero controller number. */
2582 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
2583 host_params_context.host_number);
2584 kvfree(query_host_out);
2585 return 0;
2586}
2587
5896b972 2588int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38 2589{
3b83b6c2
DL
2590 struct mlx5_vport *vport;
2591 int err, i;
eca8cc38 2592
9a64144d
MG
2593 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2594 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2595 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2596 else
2597 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2598
2bb72e7e 2599 mutex_init(&esw->offloads.termtbl_mutex);
8463daf1 2600 mlx5_rdma_enable_roce(esw->dev);
eca8cc38 2601
a53cf949
PP
2602 err = mlx5_esw_host_number_init(esw);
2603 if (err)
cd1ef966 2604 goto err_metadata;
a53cf949 2605
cd1ef966 2606 if (esw_check_vport_match_metadata_supported(esw))
4e9a9ef7
VP
2607 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2608
fc99c3d6
VP
2609 err = esw_offloads_metadata_init(esw);
2610 if (err)
2611 goto err_metadata;
2612
332bd3a5
PP
2613 err = esw_set_passing_vport_metadata(esw, true);
2614 if (err)
2615 goto err_vport_metadata;
c1286050 2616
7983a675
PB
2617 err = esw_offloads_steering_init(esw);
2618 if (err)
2619 goto err_steering_init;
2620
3b83b6c2
DL
2621 /* Representor will control the vport link state */
2622 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2623 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2624
c2d7712c
BW
2625 /* Uplink vport rep must load first. */
2626 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
925a6acc 2627 if (err)
c2d7712c 2628 goto err_uplink;
c1286050 2629
c2d7712c 2630 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
eca8cc38 2631 if (err)
c2d7712c 2632 goto err_vports;
eca8cc38
BW
2633
2634 esw_offloads_devcom_init(esw);
a3888f33 2635
eca8cc38
BW
2636 return 0;
2637
925a6acc 2638err_vports:
c2d7712c
BW
2639 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2640err_uplink:
7983a675 2641 esw_offloads_steering_cleanup(esw);
79949985
PP
2642err_steering_init:
2643 esw_set_passing_vport_metadata(esw, false);
7983a675 2644err_vport_metadata:
fc99c3d6
VP
2645 esw_offloads_metadata_uninit(esw);
2646err_metadata:
4e9a9ef7 2647 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
8463daf1 2648 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2649 mutex_destroy(&esw->offloads.termtbl_mutex);
eca8cc38
BW
2650 return err;
2651}
2652
db7ff19e
EB
2653static int esw_offloads_stop(struct mlx5_eswitch *esw,
2654 struct netlink_ext_ack *extack)
c930a3ad 2655{
062f4bf4 2656 int err, err1;
c930a3ad 2657
8e0aa4bc
PP
2658 mlx5_eswitch_disable_locked(esw, false);
2659 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2660 MLX5_ESWITCH_IGNORE_NUM_VFS);
6c419ba8 2661 if (err) {
8c98ee77 2662 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
8e0aa4bc
PP
2663 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2664 MLX5_ESWITCH_IGNORE_NUM_VFS);
8c98ee77
EB
2665 if (err1) {
2666 NL_SET_ERR_MSG_MOD(extack,
2667 "Failed setting eswitch back to offloads");
2668 }
6c419ba8 2669 }
c930a3ad
OG
2670
2671 return err;
2672}
2673
5896b972 2674void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 2675{
ac004b83 2676 esw_offloads_devcom_cleanup(esw);
5896b972 2677 mlx5_eswitch_disable_pf_vf_vports(esw);
c2d7712c 2678 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
332bd3a5 2679 esw_set_passing_vport_metadata(esw, false);
eca8cc38 2680 esw_offloads_steering_cleanup(esw);
fc99c3d6 2681 esw_offloads_metadata_uninit(esw);
4e9a9ef7 2682 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
8463daf1 2683 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2684 mutex_destroy(&esw->offloads.termtbl_mutex);
9a64144d 2685 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2686}
2687
ef78618b 2688static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2689{
2690 switch (mode) {
2691 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2692 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2693 break;
2694 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2695 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2696 break;
2697 default:
2698 return -EINVAL;
2699 }
2700
2701 return 0;
2702}
2703
ef78618b
OG
2704static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2705{
2706 switch (mlx5_mode) {
f6455de0 2707 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2708 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2709 break;
f6455de0 2710 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2711 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2712 break;
2713 default:
2714 return -EINVAL;
2715 }
2716
2717 return 0;
2718}
2719
bffaa916
RD
2720static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2721{
2722 switch (mode) {
2723 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2724 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2725 break;
2726 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2727 *mlx5_mode = MLX5_INLINE_MODE_L2;
2728 break;
2729 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2730 *mlx5_mode = MLX5_INLINE_MODE_IP;
2731 break;
2732 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2733 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2734 break;
2735 default:
2736 return -EINVAL;
2737 }
2738
2739 return 0;
2740}
2741
2742static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2743{
2744 switch (mlx5_mode) {
2745 case MLX5_INLINE_MODE_NONE:
2746 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2747 break;
2748 case MLX5_INLINE_MODE_L2:
2749 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2750 break;
2751 case MLX5_INLINE_MODE_IP:
2752 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2753 break;
2754 case MLX5_INLINE_MODE_TCP_UDP:
2755 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2756 break;
2757 default:
2758 return -EINVAL;
2759 }
2760
2761 return 0;
2762}
2763
ae24432c
PP
2764static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
2765{
2766 /* devlink commands in NONE eswitch mode are currently supported only
2767 * on ECPF.
2768 */
2769 return (esw->mode == MLX5_ESWITCH_NONE &&
2770 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
2771}
2772
db7ff19e
EB
2773int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2774 struct netlink_ext_ack *extack)
9d1cef19 2775{
9d1cef19 2776 u16 cur_mlx5_mode, mlx5_mode = 0;
bd939753 2777 struct mlx5_eswitch *esw;
ea2128fd 2778 int err = 0;
9d1cef19 2779
bd939753
PP
2780 esw = mlx5_devlink_eswitch_get(devlink);
2781 if (IS_ERR(esw))
2782 return PTR_ERR(esw);
9d1cef19 2783
ef78618b 2784 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2785 return -EINVAL;
2786
7dc84de9
RD
2787 err = mlx5_esw_try_lock(esw);
2788 if (err < 0) {
2789 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
2790 return err;
2791 }
2792 cur_mlx5_mode = err;
2793 err = 0;
2794
c930a3ad 2795 if (cur_mlx5_mode == mlx5_mode)
8e0aa4bc 2796 goto unlock;
c930a3ad
OG
2797
2798 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
8e0aa4bc 2799 err = esw_offloads_start(esw, extack);
c930a3ad 2800 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
8e0aa4bc 2801 err = esw_offloads_stop(esw, extack);
c930a3ad 2802 else
8e0aa4bc
PP
2803 err = -EINVAL;
2804
2805unlock:
7dc84de9 2806 mlx5_esw_unlock(esw);
8e0aa4bc 2807 return err;
feae9087
OG
2808}
2809
2810int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2811{
bd939753 2812 struct mlx5_eswitch *esw;
9d1cef19 2813 int err;
c930a3ad 2814
bd939753
PP
2815 esw = mlx5_devlink_eswitch_get(devlink);
2816 if (IS_ERR(esw))
2817 return PTR_ERR(esw);
c930a3ad 2818
c55479d0 2819 down_write(&esw->mode_lock);
bd939753 2820 err = eswitch_devlink_esw_mode_check(esw);
ae24432c 2821 if (err)
8e0aa4bc 2822 goto unlock;
ae24432c 2823
8e0aa4bc
PP
2824 err = esw_mode_to_devlink(esw->mode, mode);
2825unlock:
c55479d0 2826 up_write(&esw->mode_lock);
8e0aa4bc 2827 return err;
feae9087 2828}
127ea380 2829
db7ff19e
EB
2830int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2831 struct netlink_ext_ack *extack)
bffaa916
RD
2832{
2833 struct mlx5_core_dev *dev = devlink_priv(devlink);
db68cc56 2834 int err, vport, num_vport;
bd939753 2835 struct mlx5_eswitch *esw;
bffaa916
RD
2836 u8 mlx5_mode;
2837
bd939753
PP
2838 esw = mlx5_devlink_eswitch_get(devlink);
2839 if (IS_ERR(esw))
2840 return PTR_ERR(esw);
bffaa916 2841
c55479d0 2842 down_write(&esw->mode_lock);
ae24432c
PP
2843 err = eswitch_devlink_esw_mode_check(esw);
2844 if (err)
8e0aa4bc 2845 goto out;
ae24432c 2846
c415f704
OG
2847 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2848 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2849 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
8e0aa4bc 2850 goto out;
c8b838d1 2851 fallthrough;
c415f704 2852 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2853 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
8e0aa4bc
PP
2854 err = -EOPNOTSUPP;
2855 goto out;
c415f704
OG
2856 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2857 break;
2858 }
bffaa916 2859
525e84be 2860 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2861 NL_SET_ERR_MSG_MOD(extack,
2862 "Can't set inline mode when flows are configured");
8e0aa4bc
PP
2863 err = -EOPNOTSUPP;
2864 goto out;
375f51e2
RD
2865 }
2866
bffaa916
RD
2867 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2868 if (err)
2869 goto out;
2870
411ec9e0 2871 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2872 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2873 if (err) {
8c98ee77
EB
2874 NL_SET_ERR_MSG_MOD(extack,
2875 "Failed to set min inline on vport");
bffaa916
RD
2876 goto revert_inline_mode;
2877 }
2878 }
2879
2880 esw->offloads.inline_mode = mlx5_mode;
c55479d0 2881 up_write(&esw->mode_lock);
bffaa916
RD
2882 return 0;
2883
2884revert_inline_mode:
db68cc56 2885 num_vport = --vport;
411ec9e0 2886 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
2887 mlx5_modify_nic_vport_min_inline(dev,
2888 vport,
2889 esw->offloads.inline_mode);
2890out:
c55479d0 2891 up_write(&esw->mode_lock);
bffaa916
RD
2892 return err;
2893}
2894
2895int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2896{
bd939753 2897 struct mlx5_eswitch *esw;
9d1cef19 2898 int err;
bffaa916 2899
bd939753
PP
2900 esw = mlx5_devlink_eswitch_get(devlink);
2901 if (IS_ERR(esw))
2902 return PTR_ERR(esw);
bffaa916 2903
c55479d0 2904 down_write(&esw->mode_lock);
ae24432c
PP
2905 err = eswitch_devlink_esw_mode_check(esw);
2906 if (err)
8e0aa4bc 2907 goto unlock;
ae24432c 2908
8e0aa4bc
PP
2909 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2910unlock:
c55479d0 2911 up_write(&esw->mode_lock);
8e0aa4bc 2912 return err;
bffaa916
RD
2913}
2914
98fdbea5
LR
2915int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2916 enum devlink_eswitch_encap_mode encap,
db7ff19e 2917 struct netlink_ext_ack *extack)
7768d197
RD
2918{
2919 struct mlx5_core_dev *dev = devlink_priv(devlink);
bd939753 2920 struct mlx5_eswitch *esw;
7768d197
RD
2921 int err;
2922
bd939753
PP
2923 esw = mlx5_devlink_eswitch_get(devlink);
2924 if (IS_ERR(esw))
2925 return PTR_ERR(esw);
7768d197 2926
c55479d0 2927 down_write(&esw->mode_lock);
ae24432c
PP
2928 err = eswitch_devlink_esw_mode_check(esw);
2929 if (err)
8e0aa4bc 2930 goto unlock;
ae24432c 2931
7768d197 2932 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2933 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
8e0aa4bc
PP
2934 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
2935 err = -EOPNOTSUPP;
2936 goto unlock;
2937 }
7768d197 2938
8e0aa4bc
PP
2939 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
2940 err = -EOPNOTSUPP;
2941 goto unlock;
2942 }
7768d197 2943
f6455de0 2944 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197 2945 esw->offloads.encap = encap;
8e0aa4bc 2946 goto unlock;
7768d197
RD
2947 }
2948
2949 if (esw->offloads.encap == encap)
8e0aa4bc 2950 goto unlock;
7768d197 2951
525e84be 2952 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2953 NL_SET_ERR_MSG_MOD(extack,
2954 "Can't set encapsulation when flows are configured");
8e0aa4bc
PP
2955 err = -EOPNOTSUPP;
2956 goto unlock;
7768d197
RD
2957 }
2958
e52c2802 2959 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2960
2961 esw->offloads.encap = encap;
e52c2802 2962
0da3c12d 2963 err = esw_create_offloads_fdb_tables(esw);
e52c2802 2964
7768d197 2965 if (err) {
8c98ee77
EB
2966 NL_SET_ERR_MSG_MOD(extack,
2967 "Failed re-creating fast FDB table");
7768d197 2968 esw->offloads.encap = !encap;
0da3c12d 2969 (void)esw_create_offloads_fdb_tables(esw);
7768d197 2970 }
e52c2802 2971
8e0aa4bc 2972unlock:
c55479d0 2973 up_write(&esw->mode_lock);
7768d197
RD
2974 return err;
2975}
2976
98fdbea5
LR
2977int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2978 enum devlink_eswitch_encap_mode *encap)
7768d197 2979{
bd939753 2980 struct mlx5_eswitch *esw;
9d1cef19 2981 int err;
7768d197 2982
bd939753
PP
2983 esw = mlx5_devlink_eswitch_get(devlink);
2984 if (IS_ERR(esw))
2985 return PTR_ERR(esw);
2986
7768d197 2987
c55479d0 2988 down_write(&esw->mode_lock);
ae24432c
PP
2989 err = eswitch_devlink_esw_mode_check(esw);
2990 if (err)
8e0aa4bc 2991 goto unlock;
ae24432c 2992
7768d197 2993 *encap = esw->offloads.encap;
8e0aa4bc 2994unlock:
c55479d0 2995 up_write(&esw->mode_lock);
7768d197
RD
2996 return 0;
2997}
2998
c2d7712c
BW
2999static bool
3000mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
3001{
3002 /* Currently, only ECPF based device has representor for host PF. */
3003 if (vport_num == MLX5_VPORT_PF &&
3004 !mlx5_core_is_ecpf_esw_manager(esw->dev))
3005 return false;
3006
3007 if (vport_num == MLX5_VPORT_ECPF &&
3008 !mlx5_ecpf_vport_exists(esw->dev))
3009 return false;
3010
3011 return true;
3012}
3013
f8e8fa02 3014void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 3015 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 3016 u8 rep_type)
127ea380 3017{
8693115a 3018 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
3019 struct mlx5_eswitch_rep *rep;
3020 int i;
9deb2241 3021
8693115a 3022 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 3023 mlx5_esw_for_all_reps(esw, i, rep) {
c2d7712c
BW
3024 if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
3025 rep_data = &rep->rep_data[rep_type];
3026 atomic_set(&rep_data->state, REP_REGISTERED);
3027 }
f8e8fa02 3028 }
127ea380 3029}
f8e8fa02 3030EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 3031
f8e8fa02 3032void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 3033{
cb67b832 3034 struct mlx5_eswitch_rep *rep;
f8e8fa02 3035 int i;
cb67b832 3036
f6455de0 3037 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 3038 __unload_reps_all_vport(esw, rep_type);
127ea380 3039
f8e8fa02 3040 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 3041 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 3042}
f8e8fa02 3043EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 3044
a4b97ab4 3045void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 3046{
726293f1
HHZ
3047 struct mlx5_eswitch_rep *rep;
3048
879c8f84 3049 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 3050 return rep->rep_data[rep_type].priv;
726293f1 3051}
22215908
MB
3052
3053void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 3054 u16 vport,
22215908
MB
3055 u8 rep_type)
3056{
22215908
MB
3057 struct mlx5_eswitch_rep *rep;
3058
879c8f84 3059 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 3060
8693115a
PP
3061 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3062 esw->offloads.rep_ops[rep_type]->get_proto_dev)
3063 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
3064 return NULL;
3065}
57cbd893 3066EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
3067
3068void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
3069{
879c8f84 3070 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 3071}
57cbd893
MB
3072EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
3073
3074struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 3075 u16 vport)
57cbd893 3076{
879c8f84 3077 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
3078}
3079EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
3080
3081bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
3082{
3083 return vport_num >= MLX5_VPORT_FIRST_VF &&
3084 vport_num <= esw->dev->priv.sriov.max_vfs;
3085}
7445cfb1 3086
5b7cb745
PB
3087bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
3088{
3089 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
3090}
3091EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
3092
7445cfb1
JL
3093bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
3094{
3095 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
3096}
3097EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
3098
0f0d3827 3099u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
7445cfb1
JL
3100 u16 vport_num)
3101{
133dcfc5 3102 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
0f0d3827 3103
133dcfc5
VP
3104 if (WARN_ON_ONCE(IS_ERR(vport)))
3105 return 0;
0f0d3827 3106
133dcfc5 3107 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
7445cfb1
JL
3108}
3109EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
d970812b
PP
3110
3111int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
3112 u16 vport_num, u32 sfnum)
3113{
3114 int err;
3115
3116 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
3117 if (err)
3118 return err;
3119
3120 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum);
3121 if (err)
3122 goto devlink_err;
3123
3124 err = mlx5_esw_offloads_rep_load(esw, vport_num);
3125 if (err)
3126 goto rep_err;
3127 return 0;
3128
3129rep_err:
3130 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3131devlink_err:
3132 mlx5_esw_vport_disable(esw, vport_num);
3133 return err;
3134}
3135
3136void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
3137{
3138 mlx5_esw_offloads_rep_unload(esw, vport_num);
3139 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3140 mlx5_esw_vport_disable(esw, vport_num);
3141}
84ae9c1f
VB
3142
3143static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
3144{
3145 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3146 void *query_ctx;
3147 void *hca_caps;
3148 int err;
3149
3150 *vhca_id = 0;
3151 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
3152 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
3153 return -EPERM;
3154
3155 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
3156 if (!query_ctx)
3157 return -ENOMEM;
3158
3159 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx);
3160 if (err)
3161 goto out_free;
3162
3163 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
3164 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
3165
3166out_free:
3167 kfree(query_ctx);
3168 return err;
3169}
3170
3171int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
3172{
3173 u16 *old_entry, *vhca_map_entry, vhca_id;
3174 int err;
3175
3176 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3177 if (err) {
3178 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
3179 vport_num, err);
3180 return err;
3181 }
3182
3183 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
3184 if (!vhca_map_entry)
3185 return -ENOMEM;
3186
3187 *vhca_map_entry = vport_num;
3188 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
3189 if (xa_is_err(old_entry)) {
3190 kfree(vhca_map_entry);
3191 return xa_err(old_entry);
3192 }
3193 kfree(old_entry);
3194 return 0;
3195}
3196
3197void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
3198{
3199 u16 *vhca_map_entry, vhca_id;
3200 int err;
3201
3202 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3203 if (err)
3204 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
3205 vport_num, err);
3206
3207 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
3208 kfree(vhca_map_entry);
3209}
3210
3211int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
3212{
3213 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
3214
3215 if (!res)
3216 return -ENOENT;
3217
3218 *vport_num = *res;
3219 return 0;
3220}
10742efc
VB
3221
3222u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
3223 u16 vport_num)
3224{
3225 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3226
3227 if (WARN_ON_ONCE(IS_ERR(vport)))
3228 return 0;
3229
3230 return vport->metadata;
3231}
3232EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);