]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: E-Switch, Enable chains only if regs loopback is enabled
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
39ac237c 40#include "eswitch_offloads_chains.h"
80f09dfc 41#include "rdma.h"
e52c2802
PB
42#include "en.h"
43#include "fs_core.h"
ac004b83 44#include "lib/devcom.h"
a3888f33 45#include "lib/eq.h"
69697b6e 46
cd7e4186
BW
47/* There are two match-all miss flows, one for unicast dst mac and
48 * one for multicast.
49 */
50#define MLX5_ESW_MISS_FLOWS (2)
c9b99abc
BW
51#define UPLINK_REP_INDEX 0
52
96e32687
EC
53/* Per vport tables */
54
55#define MLX5_ESW_VPORT_TABLE_SIZE 128
56
57/* This struct is used as a key to the hash table and we need it to be packed
58 * so hash result is consistent
59 */
60struct mlx5_vport_key {
61 u32 chain;
62 u16 prio;
63 u16 vport;
64 u16 vhca_id;
65} __packed;
66
67struct mlx5_vport_table {
68 struct hlist_node hlist;
69 struct mlx5_flow_table *fdb;
70 u32 num_rules;
71 struct mlx5_vport_key key;
72};
73
87dac697
JL
74#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
75
96e32687
EC
76static struct mlx5_flow_table *
77esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
78{
79 struct mlx5_flow_table_attr ft_attr = {};
80 struct mlx5_flow_table *fdb;
81
87dac697 82 ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
96e32687
EC
83 ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
84 ft_attr.prio = FDB_PER_VPORT;
85 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
86 if (IS_ERR(fdb)) {
87 esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
88 PTR_ERR(fdb));
89 }
90
91 return fdb;
92}
93
94static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
95 struct mlx5_esw_flow_attr *attr,
96 struct mlx5_vport_key *key)
97{
98 key->vport = attr->in_rep->vport;
99 key->chain = attr->chain;
100 key->prio = attr->prio;
101 key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
102 return jhash(key, sizeof(*key), 0);
103}
104
105/* caller must hold vports.lock */
106static struct mlx5_vport_table *
107esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
108{
109 struct mlx5_vport_table *e;
110
111 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
112 if (!memcmp(&e->key, skey, sizeof(*skey)))
113 return e;
114
115 return NULL;
116}
117
118static void
119esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
120{
121 struct mlx5_vport_table *e;
122 struct mlx5_vport_key key;
123 u32 hkey;
124
125 mutex_lock(&esw->fdb_table.offloads.vports.lock);
126 hkey = flow_attr_to_vport_key(esw, attr, &key);
127 e = esw_vport_tbl_lookup(esw, &key, hkey);
128 if (!e || --e->num_rules)
129 goto out;
130
131 hash_del(&e->hlist);
132 mlx5_destroy_flow_table(e->fdb);
133 kfree(e);
134out:
135 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
136}
137
138static struct mlx5_flow_table *
139esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
140{
141 struct mlx5_core_dev *dev = esw->dev;
142 struct mlx5_flow_namespace *ns;
143 struct mlx5_flow_table *fdb;
144 struct mlx5_vport_table *e;
145 struct mlx5_vport_key skey;
146 u32 hkey;
147
148 mutex_lock(&esw->fdb_table.offloads.vports.lock);
149 hkey = flow_attr_to_vport_key(esw, attr, &skey);
150 e = esw_vport_tbl_lookup(esw, &skey, hkey);
151 if (e) {
152 e->num_rules++;
153 goto out;
154 }
155
156 e = kzalloc(sizeof(*e), GFP_KERNEL);
157 if (!e) {
158 fdb = ERR_PTR(-ENOMEM);
159 goto err_alloc;
160 }
161
162 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
163 if (!ns) {
164 esw_warn(dev, "Failed to get FDB namespace\n");
165 fdb = ERR_PTR(-ENOENT);
166 goto err_ns;
167 }
168
169 fdb = esw_vport_tbl_create(esw, ns);
170 if (IS_ERR(fdb))
171 goto err_ns;
172
173 e->fdb = fdb;
174 e->num_rules = 1;
175 e->key = skey;
176 hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
177out:
178 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
179 return e->fdb;
180
181err_ns:
182 kfree(e);
183err_alloc:
184 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
185 return fdb;
186}
187
188int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
189{
190 struct mlx5_esw_flow_attr attr = {};
191 struct mlx5_eswitch_rep rep = {};
192 struct mlx5_flow_table *fdb;
193 struct mlx5_vport *vport;
194 int i;
195
196 attr.prio = 1;
197 attr.in_rep = &rep;
198 mlx5_esw_for_all_vports(esw, i, vport) {
199 attr.in_rep->vport = vport->vport;
200 fdb = esw_vport_tbl_get(esw, &attr);
d9fb932f 201 if (IS_ERR(fdb))
96e32687
EC
202 goto out;
203 }
204 return 0;
205
206out:
207 mlx5_esw_vport_tbl_put(esw);
208 return PTR_ERR(fdb);
209}
210
211void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
212{
213 struct mlx5_esw_flow_attr attr = {};
214 struct mlx5_eswitch_rep rep = {};
215 struct mlx5_vport *vport;
216 int i;
217
218 attr.prio = 1;
219 attr.in_rep = &rep;
220 mlx5_esw_for_all_vports(esw, i, vport) {
221 attr.in_rep->vport = vport->vport;
222 esw_vport_tbl_put(esw, &attr);
223 }
224}
225
226/* End: Per vport tables */
227
879c8f84
BW
228static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
229 u16 vport_num)
230{
02f3afd9 231 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
232
233 WARN_ON(idx > esw->total_vports - 1);
234 return &esw->offloads.vport_reps[idx];
235}
236
b7826076
PP
237static bool
238esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
239 const struct mlx5_vport *vport)
240{
241 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
242 mlx5_eswitch_is_vf_vport(esw, vport->vport));
243}
244
c01cfd0f
JL
245static void
246mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
247 struct mlx5_flow_spec *spec,
248 struct mlx5_esw_flow_attr *attr)
249{
250 void *misc2;
251 void *misc;
252
253 /* Use metadata matching because vport is not represented by single
254 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
255 */
256 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
257 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
258 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
259 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
260 attr->in_rep->vport));
261
262 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
263 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
264 mlx5_eswitch_get_vport_metadata_mask());
c01cfd0f
JL
265
266 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
267 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
268 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
269 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
270 } else {
271 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
272 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
273
274 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
275 MLX5_SET(fte_match_set_misc, misc,
276 source_eswitch_owner_vhca_id,
277 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
278
279 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
280 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
281 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
282 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
283 source_eswitch_owner_vhca_id);
284
285 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
286 }
287
288 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
289 attr->in_rep->vport == MLX5_VPORT_UPLINK)
290 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
291}
292
74491de9 293struct mlx5_flow_handle *
3d80d1a2
OG
294mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
295 struct mlx5_flow_spec *spec,
776b12b6 296 struct mlx5_esw_flow_attr *attr)
3d80d1a2 297{
592d3651 298 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 299 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 300 bool split = !!(attr->split_count);
74491de9 301 struct mlx5_flow_handle *rule;
e52c2802 302 struct mlx5_flow_table *fdb;
d8a2034f 303 bool hairpin = false;
592d3651 304 int j, i = 0;
3d80d1a2 305
f6455de0 306 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
307 return ERR_PTR(-EOPNOTSUPP);
308
6acfbf38
OG
309 flow_act.action = attr->action;
310 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 311 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
312 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
313 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
314 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
315 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
316 flow_act.vlan[0].vid = attr->vlan_vid[0];
317 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
318 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
319 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
320 flow_act.vlan[1].vid = attr->vlan_vid[1];
321 flow_act.vlan[1].prio = attr->vlan_prio[1];
322 }
6acfbf38 323 }
776b12b6 324
66958ed9 325 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
39ac237c 326 struct mlx5_flow_table *ft;
e52c2802 327
d18296ff
PB
328 if (attr->dest_ft) {
329 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
330 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
331 dest[i].ft = attr->dest_ft;
332 i++;
333 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
39ac237c
PB
334 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
335 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
278d51f2 336 dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
39ac237c
PB
337 i++;
338 } else if (attr->dest_chain) {
339 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
340 ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
341 1, 0);
e52c2802
PB
342 if (IS_ERR(ft)) {
343 rule = ERR_CAST(ft);
344 goto err_create_goto_table;
345 }
346
347 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
348 dest[i].ft = ft;
592d3651 349 i++;
e52c2802 350 } else {
e85e02ba 351 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 352 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 353 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 354 dest[i].vport.vhca_id =
df65a573 355 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
356 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
357 dest[i].vport.flags |=
358 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
359 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
360 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2b688ea5 361 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
a18e879d 362 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5
MG
363 dest[i].vport.pkt_reformat =
364 attr->dests[j].pkt_reformat;
f493f155 365 }
e52c2802
PB
366 i++;
367 }
56e858df 368 }
e37a79e5 369 }
66958ed9 370 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 371 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 372 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 373 i++;
3d80d1a2
OG
374 }
375
93b3586e 376 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 377 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
378 if (attr->inner_match_level != MLX5_MATCH_NONE)
379 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 380
aa24670e 381 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 382 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 383
96e32687
EC
384 if (split) {
385 fdb = esw_vport_tbl_get(esw, attr);
386 } else {
d18296ff
PB
387 if (attr->chain || attr->prio)
388 fdb = mlx5_esw_chains_get_table(esw, attr->chain,
389 attr->prio, 0);
390 else
391 fdb = attr->fdb;
6fb0701a
PB
392
393 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
394 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
96e32687 395 }
e52c2802
PB
396 if (IS_ERR(fdb)) {
397 rule = ERR_CAST(fdb);
398 goto err_esw_get;
399 }
400
d8a2034f 401 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) {
10caabda
OS
402 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
403 &flow_act, dest, i);
d8a2034f
EC
404 hairpin = true;
405 } else {
10caabda 406 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
d8a2034f 407 }
3d80d1a2 408 if (IS_ERR(rule))
e52c2802 409 goto err_add_rule;
375f51e2 410 else
525e84be 411 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 412
d8a2034f
EC
413 if (hairpin)
414 attr->flags |= MLX5_ESW_ATTR_FLAG_HAIRPIN;
415
e52c2802
PB
416 return rule;
417
418err_add_rule:
96e32687
EC
419 if (split)
420 esw_vport_tbl_put(esw, attr);
d18296ff 421 else if (attr->chain || attr->prio)
96e32687 422 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 423err_esw_get:
39ac237c
PB
424 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
425 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
e52c2802 426err_create_goto_table:
aa0cbbae 427 return rule;
3d80d1a2
OG
428}
429
e4ad91f2
CM
430struct mlx5_flow_handle *
431mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
432 struct mlx5_flow_spec *spec,
433 struct mlx5_esw_flow_attr *attr)
434{
435 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 436 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
437 struct mlx5_flow_table *fast_fdb;
438 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 439 struct mlx5_flow_handle *rule;
e4ad91f2
CM
440 int i;
441
39ac237c 442 fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
e52c2802
PB
443 if (IS_ERR(fast_fdb)) {
444 rule = ERR_CAST(fast_fdb);
445 goto err_get_fast;
446 }
447
96e32687 448 fwd_fdb = esw_vport_tbl_get(esw, attr);
e52c2802
PB
449 if (IS_ERR(fwd_fdb)) {
450 rule = ERR_CAST(fwd_fdb);
451 goto err_get_fwd;
452 }
453
e4ad91f2 454 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 455 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 456 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 457 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 458 dest[i].vport.vhca_id =
df65a573 459 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
460 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
461 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
462 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
463 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5 464 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
1cc26d74 465 }
e4ad91f2
CM
466 }
467 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 468 dest[i].ft = fwd_fdb,
e4ad91f2
CM
469 i++;
470
c01cfd0f 471 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
e4ad91f2 472
93b3586e 473 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 474 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 475
278d51f2 476 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
e52c2802 477 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 478
e52c2802
PB
479 if (IS_ERR(rule))
480 goto add_err;
e4ad91f2 481
525e84be 482 atomic64_inc(&esw->offloads.num_flows);
e52c2802
PB
483
484 return rule;
485add_err:
96e32687 486 esw_vport_tbl_put(esw, attr);
e52c2802 487err_get_fwd:
39ac237c 488 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 489err_get_fast:
e4ad91f2
CM
490 return rule;
491}
492
e52c2802
PB
493static void
494__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
495 struct mlx5_flow_handle *rule,
496 struct mlx5_esw_flow_attr *attr,
497 bool fwd_rule)
498{
e85e02ba 499 bool split = (attr->split_count > 0);
10caabda 500 int i;
e52c2802
PB
501
502 mlx5_del_flow_rules(rule);
10caabda 503
d8a2034f
EC
504 if (attr->flags & MLX5_ESW_ATTR_FLAG_HAIRPIN) {
505 /* unref the term table */
506 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
507 if (attr->dests[i].termtbl)
508 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
509 }
10caabda
OS
510 }
511
525e84be 512 atomic64_dec(&esw->offloads.num_flows);
e52c2802
PB
513
514 if (fwd_rule) {
96e32687 515 esw_vport_tbl_put(esw, attr);
39ac237c 516 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 517 } else {
96e32687
EC
518 if (split)
519 esw_vport_tbl_put(esw, attr);
d18296ff 520 else if (attr->chain || attr->prio)
96e32687
EC
521 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
522 0);
e52c2802 523 if (attr->dest_chain)
39ac237c 524 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
e52c2802
PB
525 }
526}
527
d85cdccb
OG
528void
529mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
530 struct mlx5_flow_handle *rule,
531 struct mlx5_esw_flow_attr *attr)
532{
e52c2802 533 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
534}
535
48265006
OG
536void
537mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
538 struct mlx5_flow_handle *rule,
539 struct mlx5_esw_flow_attr *attr)
540{
e52c2802 541 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
542}
543
f5f82476
OG
544static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
545{
546 struct mlx5_eswitch_rep *rep;
411ec9e0 547 int i, err = 0;
f5f82476
OG
548
549 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 550 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 551 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
552 continue;
553
554 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
555 if (err)
556 goto out;
557 }
558
559out:
560 return err;
561}
562
563static struct mlx5_eswitch_rep *
564esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
565{
566 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
567
568 in_rep = attr->in_rep;
df65a573 569 out_rep = attr->dests[0].rep;
f5f82476
OG
570
571 if (push)
572 vport = in_rep;
573 else if (pop)
574 vport = out_rep;
575 else
576 vport = in_rep;
577
578 return vport;
579}
580
581static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
582 bool push, bool pop, bool fwd)
583{
584 struct mlx5_eswitch_rep *in_rep, *out_rep;
585
586 if ((push || pop) && !fwd)
587 goto out_notsupp;
588
589 in_rep = attr->in_rep;
df65a573 590 out_rep = attr->dests[0].rep;
f5f82476 591
b05af6aa 592 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
593 goto out_notsupp;
594
b05af6aa 595 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
596 goto out_notsupp;
597
598 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
599 if (!push && !pop && fwd)
b05af6aa 600 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
601 goto out_notsupp;
602
603 /* protects against (1) setting rules with different vlans to push and
604 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
605 */
1482bd3d 606 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
607 goto out_notsupp;
608
609 return 0;
610
611out_notsupp:
9eb78923 612 return -EOPNOTSUPP;
f5f82476
OG
613}
614
615int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
616 struct mlx5_esw_flow_attr *attr)
617{
618 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
619 struct mlx5_eswitch_rep *vport = NULL;
620 bool push, pop, fwd;
621 int err = 0;
622
6acfbf38 623 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 624 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
625 return 0;
626
f5f82476
OG
627 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
628 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
629 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
630 !attr->dest_chain);
f5f82476 631
0e18134f
VB
632 mutex_lock(&esw->state_lock);
633
f5f82476
OG
634 err = esw_add_vlan_action_check(attr, push, pop, fwd);
635 if (err)
0e18134f 636 goto unlock;
f5f82476 637
39ac237c 638 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
639
640 vport = esw_vlan_action_get_vport(attr, push, pop);
641
642 if (!push && !pop && fwd) {
643 /* tracks VF --> wire rules without vlan push action */
b05af6aa 644 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476 645 vport->vlan_refcount++;
39ac237c 646 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
647 }
648
0e18134f 649 goto unlock;
f5f82476
OG
650 }
651
652 if (!push && !pop)
0e18134f 653 goto unlock;
f5f82476
OG
654
655 if (!(offloads->vlan_push_pop_refcount)) {
656 /* it's the 1st vlan rule, apply global vlan pop policy */
657 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
658 if (err)
659 goto out;
660 }
661 offloads->vlan_push_pop_refcount++;
662
663 if (push) {
664 if (vport->vlan_refcount)
665 goto skip_set_push;
666
1482bd3d 667 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
668 SET_VLAN_INSERT | SET_VLAN_STRIP);
669 if (err)
670 goto out;
1482bd3d 671 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
672skip_set_push:
673 vport->vlan_refcount++;
674 }
675out:
676 if (!err)
39ac237c 677 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
0e18134f
VB
678unlock:
679 mutex_unlock(&esw->state_lock);
f5f82476
OG
680 return err;
681}
682
683int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
684 struct mlx5_esw_flow_attr *attr)
685{
686 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
687 struct mlx5_eswitch_rep *vport = NULL;
688 bool push, pop, fwd;
689 int err = 0;
690
6acfbf38 691 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 692 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
693 return 0;
694
39ac237c 695 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
f5f82476
OG
696 return 0;
697
698 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
699 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
700 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
701
0e18134f
VB
702 mutex_lock(&esw->state_lock);
703
f5f82476
OG
704 vport = esw_vlan_action_get_vport(attr, push, pop);
705
706 if (!push && !pop && fwd) {
707 /* tracks VF --> wire rules without vlan push action */
b05af6aa 708 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
709 vport->vlan_refcount--;
710
0e18134f 711 goto out;
f5f82476
OG
712 }
713
714 if (push) {
715 vport->vlan_refcount--;
716 if (vport->vlan_refcount)
717 goto skip_unset_push;
718
719 vport->vlan = 0;
720 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
721 0, 0, SET_VLAN_STRIP);
722 if (err)
723 goto out;
724 }
725
726skip_unset_push:
727 offloads->vlan_push_pop_refcount--;
728 if (offloads->vlan_push_pop_refcount)
0e18134f 729 goto out;
f5f82476
OG
730
731 /* no more vlan rules, stop global vlan pop policy */
732 err = esw_set_global_vlan_pop(esw, 0);
733
734out:
0e18134f 735 mutex_unlock(&esw->state_lock);
f5f82476
OG
736 return err;
737}
738
f7a68945 739struct mlx5_flow_handle *
02f3afd9
PP
740mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
741 u32 sqn)
ab22be9b 742{
66958ed9 743 struct mlx5_flow_act flow_act = {0};
4c5009c5 744 struct mlx5_flow_destination dest = {};
74491de9 745 struct mlx5_flow_handle *flow_rule;
c5bb1730 746 struct mlx5_flow_spec *spec;
ab22be9b
OG
747 void *misc;
748
1b9a07ee 749 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 750 if (!spec) {
ab22be9b
OG
751 flow_rule = ERR_PTR(-ENOMEM);
752 goto out;
753 }
754
c5bb1730 755 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 756 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
757 /* source vport is the esw manager */
758 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 759
c5bb1730 760 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
761 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
762 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
763
c5bb1730 764 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 765 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 766 dest.vport.num = vport;
66958ed9 767 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 768
39ac237c
PB
769 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
770 spec, &flow_act, &dest, 1);
ab22be9b
OG
771 if (IS_ERR(flow_rule))
772 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
773out:
c5bb1730 774 kvfree(spec);
ab22be9b
OG
775 return flow_rule;
776}
57cbd893 777EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 778
159fe639
MB
779void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
780{
781 mlx5_del_flow_rules(rule);
782}
783
5b7cb745
PB
784static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
785{
786 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
787 MLX5_FDB_TO_VPORT_REG_C_1;
788}
789
332bd3a5 790static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
791{
792 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
793 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
5b7cb745 794 u8 curr, wanted;
c1286050
JL
795 int err;
796
5b7cb745
PB
797 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
798 !mlx5_eswitch_vport_match_metadata_enabled(esw))
332bd3a5 799 return 0;
c1286050 800
238302fa 801 err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
c1286050
JL
802 out, sizeof(out));
803 if (err)
804 return err;
805
5b7cb745
PB
806 curr = MLX5_GET(query_esw_vport_context_out, out,
807 esw_vport_context.fdb_to_vport_reg_c_id);
808 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
809 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
810 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
c1286050 811
332bd3a5 812 if (enable)
5b7cb745 813 curr |= wanted;
332bd3a5 814 else
5b7cb745 815 curr &= ~wanted;
c1286050
JL
816
817 MLX5_SET(modify_esw_vport_context_in, in,
5b7cb745 818 esw_vport_context.fdb_to_vport_reg_c_id, curr);
c1286050
JL
819
820 MLX5_SET(modify_esw_vport_context_in, in,
821 field_select.fdb_to_vport_reg_c_id, 1);
822
5b7cb745
PB
823 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in,
824 sizeof(in));
825 if (!err) {
826 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
827 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
828 else
829 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
830 }
831
832 return err;
c1286050
JL
833}
834
a5641cb5
JL
835static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
836 struct mlx5_core_dev *peer_dev,
ac004b83
RD
837 struct mlx5_flow_spec *spec,
838 struct mlx5_flow_destination *dest)
839{
a5641cb5 840 void *misc;
ac004b83 841
a5641cb5
JL
842 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
843 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
844 misc_parameters_2);
0f0d3827
PB
845 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
846 mlx5_eswitch_get_vport_metadata_mask());
ac004b83 847
a5641cb5
JL
848 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
849 } else {
850 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
851 misc_parameters);
ac004b83 852
a5641cb5
JL
853 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
854 MLX5_CAP_GEN(peer_dev, vhca_id));
855
856 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
857
858 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
859 misc_parameters);
860 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
861 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
862 source_eswitch_owner_vhca_id);
863 }
ac004b83
RD
864
865 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 866 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 867 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 868 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
869}
870
a5641cb5
JL
871static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
872 struct mlx5_eswitch *peer_esw,
873 struct mlx5_flow_spec *spec,
874 u16 vport)
875{
876 void *misc;
877
878 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
879 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
880 misc_parameters_2);
881 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
882 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
883 vport));
884 } else {
885 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
886 misc_parameters);
887 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
888 }
889}
890
ac004b83
RD
891static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
892 struct mlx5_core_dev *peer_dev)
893{
894 struct mlx5_flow_destination dest = {};
895 struct mlx5_flow_act flow_act = {0};
896 struct mlx5_flow_handle **flows;
897 struct mlx5_flow_handle *flow;
898 struct mlx5_flow_spec *spec;
899 /* total vports is the same for both e-switches */
900 int nvports = esw->total_vports;
901 void *misc;
902 int err, i;
903
904 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
905 if (!spec)
906 return -ENOMEM;
907
a5641cb5 908 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
909
910 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
911 if (!flows) {
912 err = -ENOMEM;
913 goto alloc_flows_err;
914 }
915
916 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
917 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
918 misc_parameters);
919
81cd229c 920 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
921 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
922 spec, MLX5_VPORT_PF);
923
81cd229c
BW
924 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
925 spec, &flow_act, &dest, 1);
926 if (IS_ERR(flow)) {
927 err = PTR_ERR(flow);
928 goto add_pf_flow_err;
929 }
930 flows[MLX5_VPORT_PF] = flow;
931 }
932
933 if (mlx5_ecpf_vport_exists(esw->dev)) {
934 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
935 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
936 spec, &flow_act, &dest, 1);
937 if (IS_ERR(flow)) {
938 err = PTR_ERR(flow);
939 goto add_ecpf_flow_err;
940 }
941 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
942 }
943
786ef904 944 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
945 esw_set_peer_miss_rule_source_port(esw,
946 peer_dev->priv.eswitch,
947 spec, i);
948
ac004b83
RD
949 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
950 spec, &flow_act, &dest, 1);
951 if (IS_ERR(flow)) {
952 err = PTR_ERR(flow);
81cd229c 953 goto add_vf_flow_err;
ac004b83
RD
954 }
955 flows[i] = flow;
956 }
957
958 esw->fdb_table.offloads.peer_miss_rules = flows;
959
960 kvfree(spec);
961 return 0;
962
81cd229c 963add_vf_flow_err:
879c8f84 964 nvports = --i;
786ef904 965 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 966 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
967
968 if (mlx5_ecpf_vport_exists(esw->dev))
969 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
970add_ecpf_flow_err:
971 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
972 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
973add_pf_flow_err:
974 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
975 kvfree(flows);
976alloc_flows_err:
977 kvfree(spec);
978 return err;
979}
980
981static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
982{
983 struct mlx5_flow_handle **flows;
984 int i;
985
986 flows = esw->fdb_table.offloads.peer_miss_rules;
987
786ef904
PP
988 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
989 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
990 mlx5_del_flow_rules(flows[i]);
991
81cd229c
BW
992 if (mlx5_ecpf_vport_exists(esw->dev))
993 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
994
995 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
996 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
997
ac004b83
RD
998 kvfree(flows);
999}
1000
3aa33572
OG
1001static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1002{
66958ed9 1003 struct mlx5_flow_act flow_act = {0};
4c5009c5 1004 struct mlx5_flow_destination dest = {};
74491de9 1005 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 1006 struct mlx5_flow_spec *spec;
f80be543
MB
1007 void *headers_c;
1008 void *headers_v;
3aa33572 1009 int err = 0;
f80be543
MB
1010 u8 *dmac_c;
1011 u8 *dmac_v;
3aa33572 1012
1b9a07ee 1013 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1014 if (!spec) {
3aa33572
OG
1015 err = -ENOMEM;
1016 goto out;
1017 }
1018
f80be543
MB
1019 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1020 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1021 outer_headers);
1022 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1023 outer_headers.dmac_47_16);
1024 dmac_c[0] = 0x01;
1025
3aa33572 1026 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1027 dest.vport.num = esw->manager_vport;
66958ed9 1028 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 1029
39ac237c
PB
1030 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1031 spec, &flow_act, &dest, 1);
3aa33572
OG
1032 if (IS_ERR(flow_rule)) {
1033 err = PTR_ERR(flow_rule);
f80be543 1034 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
1035 goto out;
1036 }
1037
f80be543
MB
1038 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1039
1040 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1041 outer_headers);
1042 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1043 outer_headers.dmac_47_16);
1044 dmac_v[0] = 0x01;
39ac237c
PB
1045 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1046 spec, &flow_act, &dest, 1);
f80be543
MB
1047 if (IS_ERR(flow_rule)) {
1048 err = PTR_ERR(flow_rule);
1049 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1050 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1051 goto out;
1052 }
1053
1054 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1055
3aa33572 1056out:
c5bb1730 1057 kvfree(spec);
3aa33572
OG
1058 return err;
1059}
1060
11b717d6
PB
1061struct mlx5_flow_handle *
1062esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1063{
1064 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1065 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1066 struct mlx5_flow_context *flow_context;
1067 struct mlx5_flow_handle *flow_rule;
1068 struct mlx5_flow_destination dest;
1069 struct mlx5_flow_spec *spec;
1070 void *misc;
1071
60acc105
PB
1072 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1073 return ERR_PTR(-EOPNOTSUPP);
1074
11b717d6
PB
1075 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1076 if (!spec)
1077 return ERR_PTR(-ENOMEM);
1078
1079 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1080 misc_parameters_2);
1081 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1082 ESW_CHAIN_TAG_METADATA_MASK);
1083 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1084 misc_parameters_2);
1085 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1086 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
6724e66b
PB
1087 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1088 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1089 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
11b717d6
PB
1090
1091 flow_context = &spec->flow_context;
1092 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1093 flow_context->flow_tag = tag;
1094 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1095 dest.ft = esw->offloads.ft_offloads;
1096
1097 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1098 kfree(spec);
1099
1100 if (IS_ERR(flow_rule))
1101 esw_warn(esw->dev,
1102 "Failed to create restore rule for tag: %d, err(%d)\n",
1103 tag, (int)PTR_ERR(flow_rule));
1104
1105 return flow_rule;
1106}
1107
1108u32
1109esw_get_max_restore_tag(struct mlx5_eswitch *esw)
1110{
1111 return ESW_CHAIN_TAG_METADATA_MASK;
1112}
1113
1967ce6e 1114#define MAX_PF_SQ 256
cd3d07e7 1115#define MAX_SQ_NVPORTS 32
1967ce6e 1116
a5641cb5
JL
1117static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1118 u32 *flow_group_in)
1119{
1120 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1121 flow_group_in,
1122 match_criteria);
1123
1124 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1125 MLX5_SET(create_flow_group_in, flow_group_in,
1126 match_criteria_enable,
1127 MLX5_MATCH_MISC_PARAMETERS_2);
1128
0f0d3827
PB
1129 MLX5_SET(fte_match_param, match_criteria,
1130 misc_parameters_2.metadata_reg_c_0,
1131 mlx5_eswitch_get_vport_metadata_mask());
a5641cb5
JL
1132 } else {
1133 MLX5_SET(create_flow_group_in, flow_group_in,
1134 match_criteria_enable,
1135 MLX5_MATCH_MISC_PARAMETERS);
1136
1137 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1138 misc_parameters.source_port);
1139 }
1140}
1141
1967ce6e
OG
1142static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1143{
1144 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1145 struct mlx5_flow_table_attr ft_attr = {};
1146 struct mlx5_core_dev *dev = esw->dev;
1147 struct mlx5_flow_namespace *root_ns;
1148 struct mlx5_flow_table *fdb = NULL;
39ac237c
PB
1149 u32 flags = 0, *flow_group_in;
1150 int table_size, ix, err = 0;
1967ce6e
OG
1151 struct mlx5_flow_group *g;
1152 void *match_criteria;
f80be543 1153 u8 *dmac;
1967ce6e
OG
1154
1155 esw_debug(esw->dev, "Create offloads FDB Tables\n");
39ac237c 1156
1b9a07ee 1157 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1158 if (!flow_group_in)
1159 return -ENOMEM;
1160
1161 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1162 if (!root_ns) {
1163 esw_warn(dev, "Failed to get FDB flow namespace\n");
1164 err = -EOPNOTSUPP;
1165 goto ns_err;
1166 }
8463daf1
MG
1167 esw->fdb_table.offloads.ns = root_ns;
1168 err = mlx5_flow_namespace_set_mode(root_ns,
1169 esw->dev->priv.steering->mode);
1170 if (err) {
1171 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1172 goto ns_err;
1173 }
1967ce6e 1174
cd7e4186
BW
1175 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1176 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 1177
e52c2802
PB
1178 /* create the slow path fdb with encap set, so further table instances
1179 * can be created at run time while VFs are probed if the FW allows that.
1180 */
1181 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1182 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1183 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1184
1185 ft_attr.flags = flags;
b3ba5149
ES
1186 ft_attr.max_fte = table_size;
1187 ft_attr.prio = FDB_SLOW_PATH;
1188
1189 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1190 if (IS_ERR(fdb)) {
1191 err = PTR_ERR(fdb);
1192 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1193 goto slow_fdb_err;
1194 }
52fff327 1195 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1196
39ac237c
PB
1197 err = mlx5_esw_chains_create(esw);
1198 if (err) {
1199 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1200 goto fdb_chains_err;
e52c2802
PB
1201 }
1202
69697b6e 1203 /* create send-to-vport group */
69697b6e
OG
1204 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1205 MLX5_MATCH_MISC_PARAMETERS);
1206
1207 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1208
1209 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1210 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1211
cd3d07e7 1212 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1213 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1214 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1215
1216 g = mlx5_create_flow_group(fdb, flow_group_in);
1217 if (IS_ERR(g)) {
1218 err = PTR_ERR(g);
1219 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1220 goto send_vport_err;
1221 }
1222 esw->fdb_table.offloads.send_to_vport_grp = g;
1223
ac004b83
RD
1224 /* create peer esw miss group */
1225 memset(flow_group_in, 0, inlen);
ac004b83 1226
a5641cb5
JL
1227 esw_set_flow_group_source_port(esw, flow_group_in);
1228
1229 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1230 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1231 flow_group_in,
1232 match_criteria);
ac004b83 1233
a5641cb5
JL
1234 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1235 misc_parameters.source_eswitch_owner_vhca_id);
1236
1237 MLX5_SET(create_flow_group_in, flow_group_in,
1238 source_eswitch_owner_vhca_id_valid, 1);
1239 }
ac004b83 1240
ac004b83
RD
1241 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1242 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1243 ix + esw->total_vports - 1);
1244 ix += esw->total_vports;
1245
1246 g = mlx5_create_flow_group(fdb, flow_group_in);
1247 if (IS_ERR(g)) {
1248 err = PTR_ERR(g);
1249 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1250 goto peer_miss_err;
1251 }
1252 esw->fdb_table.offloads.peer_miss_grp = g;
1253
69697b6e
OG
1254 /* create miss group */
1255 memset(flow_group_in, 0, inlen);
f80be543
MB
1256 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1257 MLX5_MATCH_OUTER_HEADERS);
1258 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1259 match_criteria);
1260 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1261 outer_headers.dmac_47_16);
1262 dmac[0] = 0x01;
69697b6e
OG
1263
1264 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1265 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1266 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1267
1268 g = mlx5_create_flow_group(fdb, flow_group_in);
1269 if (IS_ERR(g)) {
1270 err = PTR_ERR(g);
1271 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1272 goto miss_err;
1273 }
1274 esw->fdb_table.offloads.miss_grp = g;
1275
3aa33572
OG
1276 err = esw_add_fdb_miss_rule(esw);
1277 if (err)
1278 goto miss_rule_err;
1279
e52c2802 1280 esw->nvports = nvports;
c88a026e 1281 kvfree(flow_group_in);
69697b6e
OG
1282 return 0;
1283
3aa33572
OG
1284miss_rule_err:
1285 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1286miss_err:
ac004b83
RD
1287 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1288peer_miss_err:
69697b6e
OG
1289 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1290send_vport_err:
39ac237c
PB
1291 mlx5_esw_chains_destroy(esw);
1292fdb_chains_err:
52fff327 1293 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1294slow_fdb_err:
8463daf1
MG
1295 /* Holds true only as long as DMFS is the default */
1296 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1297ns_err:
1298 kvfree(flow_group_in);
1299 return err;
1300}
1301
1967ce6e 1302static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1303{
e52c2802 1304 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1305 return;
1306
1967ce6e 1307 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1308 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1309 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1310 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1311 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1312 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1313
39ac237c 1314 mlx5_esw_chains_destroy(esw);
52fff327 1315 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
8463daf1
MG
1316 /* Holds true only as long as DMFS is the default */
1317 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1318 MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e 1319}
c116c6ee 1320
cd7e4186 1321static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1322{
b3ba5149 1323 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1324 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1325 struct mlx5_flow_table *ft_offloads;
1326 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1327 int err = 0;
1328
1329 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1330 if (!ns) {
1331 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1332 return -EOPNOTSUPP;
c116c6ee
OG
1333 }
1334
cd7e4186 1335 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
11b717d6 1336 ft_attr.prio = 1;
b3ba5149
ES
1337
1338 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1339 if (IS_ERR(ft_offloads)) {
1340 err = PTR_ERR(ft_offloads);
1341 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1342 return err;
1343 }
1344
1345 esw->offloads.ft_offloads = ft_offloads;
1346 return 0;
1347}
1348
1349static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1350{
1351 struct mlx5_esw_offload *offloads = &esw->offloads;
1352
1353 mlx5_destroy_flow_table(offloads->ft_offloads);
1354}
fed9ce22 1355
cd7e4186 1356static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1357{
1358 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1359 struct mlx5_flow_group *g;
fed9ce22 1360 u32 *flow_group_in;
fed9ce22 1361 int err = 0;
fed9ce22 1362
cd7e4186 1363 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1364 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1365 if (!flow_group_in)
1366 return -ENOMEM;
1367
1368 /* create vport rx group */
a5641cb5 1369 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1370
1371 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1372 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1373
1374 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1375
1376 if (IS_ERR(g)) {
1377 err = PTR_ERR(g);
1378 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1379 goto out;
1380 }
1381
1382 esw->offloads.vport_rx_group = g;
1383out:
e574978a 1384 kvfree(flow_group_in);
fed9ce22
OG
1385 return err;
1386}
1387
1388static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1389{
1390 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1391}
1392
74491de9 1393struct mlx5_flow_handle *
02f3afd9 1394mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1395 struct mlx5_flow_destination *dest)
fed9ce22 1396{
66958ed9 1397 struct mlx5_flow_act flow_act = {0};
74491de9 1398 struct mlx5_flow_handle *flow_rule;
c5bb1730 1399 struct mlx5_flow_spec *spec;
fed9ce22
OG
1400 void *misc;
1401
1b9a07ee 1402 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1403 if (!spec) {
fed9ce22
OG
1404 flow_rule = ERR_PTR(-ENOMEM);
1405 goto out;
1406 }
1407
a5641cb5
JL
1408 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1409 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1410 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1411 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1412
a5641cb5 1413 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
1414 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1415 mlx5_eswitch_get_vport_metadata_mask());
fed9ce22 1416
a5641cb5
JL
1417 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1418 } else {
1419 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1420 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1421
1422 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1423 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1424
1425 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1426 }
fed9ce22 1427
66958ed9 1428 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1429 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1430 &flow_act, dest, 1);
fed9ce22
OG
1431 if (IS_ERR(flow_rule)) {
1432 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1433 goto out;
1434 }
1435
1436out:
c5bb1730 1437 kvfree(spec);
fed9ce22
OG
1438 return flow_rule;
1439}
feae9087 1440
bf3347c4 1441
cc617ced
PP
1442static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
1443{
1444 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1445 struct mlx5_core_dev *dev = esw->dev;
1446 int vport;
1447
1448 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1449 return -EOPNOTSUPP;
1450
1451 if (esw->mode == MLX5_ESWITCH_NONE)
1452 return -EOPNOTSUPP;
1453
1454 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1455 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1456 mlx5_mode = MLX5_INLINE_MODE_NONE;
1457 goto out;
1458 case MLX5_CAP_INLINE_MODE_L2:
1459 mlx5_mode = MLX5_INLINE_MODE_L2;
1460 goto out;
1461 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1462 goto query_vports;
1463 }
1464
1465query_vports:
1466 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1467 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
1468 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1469 if (prev_mlx5_mode != mlx5_mode)
1470 return -EINVAL;
1471 prev_mlx5_mode = mlx5_mode;
1472 }
1473
1474out:
1475 *mode = mlx5_mode;
1476 return 0;
bf3347c4
DM
1477}
1478
11b717d6
PB
1479static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1480{
1481 struct mlx5_esw_offload *offloads = &esw->offloads;
1482
60acc105
PB
1483 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1484 return;
1485
6724e66b 1486 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
11b717d6
PB
1487 mlx5_destroy_flow_group(offloads->restore_group);
1488 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1489}
1490
1491static int esw_create_restore_table(struct mlx5_eswitch *esw)
1492{
6724e66b 1493 u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
11b717d6
PB
1494 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1495 struct mlx5_flow_table_attr ft_attr = {};
1496 struct mlx5_core_dev *dev = esw->dev;
1497 struct mlx5_flow_namespace *ns;
6724e66b 1498 struct mlx5_modify_hdr *mod_hdr;
11b717d6
PB
1499 void *match_criteria, *misc;
1500 struct mlx5_flow_table *ft;
1501 struct mlx5_flow_group *g;
1502 u32 *flow_group_in;
1503 int err = 0;
1504
60acc105
PB
1505 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1506 return 0;
1507
11b717d6
PB
1508 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1509 if (!ns) {
1510 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1511 return -EOPNOTSUPP;
1512 }
1513
1514 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1515 if (!flow_group_in) {
1516 err = -ENOMEM;
1517 goto out_free;
1518 }
1519
1520 ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
1521 ft = mlx5_create_flow_table(ns, &ft_attr);
1522 if (IS_ERR(ft)) {
1523 err = PTR_ERR(ft);
1524 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
1525 err);
1526 goto out_free;
1527 }
1528
1529 memset(flow_group_in, 0, inlen);
1530 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1531 match_criteria);
1532 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
1533 misc_parameters_2);
1534
1535 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1536 ESW_CHAIN_TAG_METADATA_MASK);
1537 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1538 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1539 ft_attr.max_fte - 1);
1540 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1541 MLX5_MATCH_MISC_PARAMETERS_2);
1542 g = mlx5_create_flow_group(ft, flow_group_in);
1543 if (IS_ERR(g)) {
1544 err = PTR_ERR(g);
1545 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
1546 err);
1547 goto err_group;
1548 }
1549
6724e66b
PB
1550 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
1551 MLX5_SET(copy_action_in, modact, src_field,
1552 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1553 MLX5_SET(copy_action_in, modact, dst_field,
1554 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1555 mod_hdr = mlx5_modify_header_alloc(esw->dev,
1556 MLX5_FLOW_NAMESPACE_KERNEL, 1,
1557 modact);
1558 if (IS_ERR(mod_hdr)) {
1559 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
1560 err);
1561 err = PTR_ERR(mod_hdr);
1562 goto err_mod_hdr;
1563 }
1564
11b717d6
PB
1565 esw->offloads.ft_offloads_restore = ft;
1566 esw->offloads.restore_group = g;
6724e66b 1567 esw->offloads.restore_copy_hdr_id = mod_hdr;
11b717d6
PB
1568
1569 return 0;
1570
6724e66b
PB
1571err_mod_hdr:
1572 mlx5_destroy_flow_group(g);
11b717d6
PB
1573err_group:
1574 mlx5_destroy_flow_table(ft);
1575out_free:
1576 kvfree(flow_group_in);
1577
1578 return err;
cc617ced
PP
1579}
1580
db7ff19e
EB
1581static int esw_offloads_start(struct mlx5_eswitch *esw,
1582 struct netlink_ext_ack *extack)
c930a3ad 1583{
062f4bf4 1584 int err, err1;
c930a3ad 1585
f6455de0 1586 if (esw->mode != MLX5_ESWITCH_LEGACY &&
c96692fb 1587 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1588 NL_SET_ERR_MSG_MOD(extack,
1589 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1590 return -EINVAL;
1591 }
1592
383de108 1593 mlx5_eswitch_disable(esw, false);
062f4bf4
BW
1594 mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
1595 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
6c419ba8 1596 if (err) {
8c98ee77
EB
1597 NL_SET_ERR_MSG_MOD(extack,
1598 "Failed setting eswitch to offloads");
062f4bf4 1599 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
8c98ee77
EB
1600 if (err1) {
1601 NL_SET_ERR_MSG_MOD(extack,
1602 "Failed setting eswitch back to legacy");
1603 }
6c419ba8 1604 }
bffaa916
RD
1605 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1606 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
1607 &esw->offloads.inline_mode)) {
1608 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1609 NL_SET_ERR_MSG_MOD(extack,
1610 "Inline mode is different between vports");
bffaa916
RD
1611 }
1612 }
c930a3ad
OG
1613 return err;
1614}
1615
e8d31c4d
MB
1616void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1617{
1618 kfree(esw->offloads.vport_reps);
1619}
1620
1621int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1622{
2752b823 1623 int total_vports = esw->total_vports;
e8d31c4d 1624 struct mlx5_eswitch_rep *rep;
d6518db2 1625 int vport_index;
ef2e4094 1626 u8 rep_type;
e8d31c4d 1627
2aca1787 1628 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1629 sizeof(struct mlx5_eswitch_rep),
1630 GFP_KERNEL);
1631 if (!esw->offloads.vport_reps)
1632 return -ENOMEM;
1633
d6518db2
BW
1634 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1635 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 1636 rep->vport_index = vport_index;
f121e0ea
BW
1637
1638 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 1639 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 1640 REP_UNREGISTERED);
e8d31c4d
MB
1641 }
1642
e8d31c4d
MB
1643 return 0;
1644}
1645
c9b99abc
BW
1646static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1647 struct mlx5_eswitch_rep *rep, u8 rep_type)
1648{
8693115a 1649 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1650 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 1651 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
1652}
1653
4110fc59 1654static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1655{
1656 struct mlx5_eswitch_rep *rep;
4110fc59
BW
1657 int i;
1658
1659 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
1660 __esw_offloads_unload_rep(esw, rep, rep_type);
c9b99abc 1661
81cd229c
BW
1662 if (mlx5_ecpf_vport_exists(esw->dev)) {
1663 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1664 __esw_offloads_unload_rep(esw, rep, rep_type);
1665 }
1666
1667 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1668 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1669 __esw_offloads_unload_rep(esw, rep, rep_type);
1670 }
1671
879c8f84 1672 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1673 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1674}
1675
c2d7712c 1676int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
a4b97ab4 1677{
c2d7712c
BW
1678 struct mlx5_eswitch_rep *rep;
1679 int rep_type;
a4b97ab4
MB
1680 int err;
1681
c2d7712c
BW
1682 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1683 return 0;
a4b97ab4 1684
c2d7712c
BW
1685 rep = mlx5_eswitch_get_rep(esw, vport_num);
1686 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1687 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1688 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1689 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1690 if (err)
1691 goto err_reps;
1692 }
1693
1694 return 0;
a4b97ab4
MB
1695
1696err_reps:
c2d7712c
BW
1697 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
1698 for (--rep_type; rep_type >= 0; rep_type--)
1699 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1700 return err;
1701}
1702
c2d7712c
BW
1703void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
1704{
1705 struct mlx5_eswitch_rep *rep;
1706 int rep_type;
1707
1708 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1709 return;
1710
1711 rep = mlx5_eswitch_get_rep(esw, vport_num);
1712 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
1713 __esw_offloads_unload_rep(esw, rep, rep_type);
1714}
1715
ac004b83
RD
1716#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1717#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1718
1719static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1720 struct mlx5_eswitch *peer_esw)
1721{
1722 int err;
1723
1724 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1725 if (err)
1726 return err;
1727
1728 return 0;
1729}
1730
1731static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1732{
04de7dda 1733 mlx5e_tc_clean_fdb_peer_flows(esw);
ac004b83
RD
1734 esw_del_fdb_peer_miss_rules(esw);
1735}
1736
8463daf1
MG
1737static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1738 struct mlx5_eswitch *peer_esw,
1739 bool pair)
1740{
1741 struct mlx5_flow_root_namespace *peer_ns;
1742 struct mlx5_flow_root_namespace *ns;
1743 int err;
1744
1745 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1746 ns = esw->dev->priv.steering->fdb_root_ns;
1747
1748 if (pair) {
1749 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1750 if (err)
1751 return err;
1752
e53e6655 1753 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
1754 if (err) {
1755 mlx5_flow_namespace_set_peer(ns, NULL);
1756 return err;
1757 }
1758 } else {
1759 mlx5_flow_namespace_set_peer(ns, NULL);
1760 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1761 }
1762
1763 return 0;
1764}
1765
ac004b83
RD
1766static int mlx5_esw_offloads_devcom_event(int event,
1767 void *my_data,
1768 void *event_data)
1769{
1770 struct mlx5_eswitch *esw = my_data;
ac004b83 1771 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 1772 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
1773 int err;
1774
1775 switch (event) {
1776 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
1777 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1778 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1779 break;
1780
8463daf1 1781 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
1782 if (err)
1783 goto err_out;
8463daf1
MG
1784 err = mlx5_esw_offloads_pair(esw, peer_esw);
1785 if (err)
1786 goto err_peer;
ac004b83
RD
1787
1788 err = mlx5_esw_offloads_pair(peer_esw, esw);
1789 if (err)
1790 goto err_pair;
1791
1792 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1793 break;
1794
1795 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1796 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1797 break;
1798
1799 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1800 mlx5_esw_offloads_unpair(peer_esw);
1801 mlx5_esw_offloads_unpair(esw);
8463daf1 1802 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1803 break;
1804 }
1805
1806 return 0;
1807
1808err_pair:
1809 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
1810err_peer:
1811 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1812err_out:
1813 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1814 event, err);
1815 return err;
1816}
1817
1818static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1819{
1820 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1821
04de7dda
RD
1822 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1823 mutex_init(&esw->offloads.peer_mutex);
1824
ac004b83
RD
1825 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1826 return;
1827
1828 mlx5_devcom_register_component(devcom,
1829 MLX5_DEVCOM_ESW_OFFLOADS,
1830 mlx5_esw_offloads_devcom_event,
1831 esw);
1832
1833 mlx5_devcom_send_event(devcom,
1834 MLX5_DEVCOM_ESW_OFFLOADS,
1835 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1836}
1837
1838static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1839{
1840 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1841
1842 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1843 return;
1844
1845 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1846 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1847
1848 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1849}
1850
18486737
EB
1851static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1852 struct mlx5_vport *vport)
1853{
18486737
EB
1854 struct mlx5_flow_act flow_act = {0};
1855 struct mlx5_flow_spec *spec;
1856 int err = 0;
1857
1858 /* For prio tag mode, there is only 1 FTEs:
7445cfb1
JL
1859 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1860 * required, allow
18486737
EB
1861 * Unmatched traffic is allowed by default
1862 */
18486737 1863 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
b7826076
PP
1864 if (!spec)
1865 return -ENOMEM;
18486737
EB
1866
1867 /* Untagged packets - push prio tag VLAN, allow */
1868 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1869 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1870 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1871 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1872 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1873 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1874 flow_act.vlan[0].vid = 0;
1875 flow_act.vlan[0].prio = 0;
7445cfb1 1876
d68316b5 1877 if (vport->ingress.offloads.modify_metadata_rule) {
7445cfb1 1878 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
d68316b5 1879 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
7445cfb1
JL
1880 }
1881
18486737
EB
1882 vport->ingress.allow_rule =
1883 mlx5_add_flow_rules(vport->ingress.acl, spec,
1884 &flow_act, NULL, 0);
1885 if (IS_ERR(vport->ingress.allow_rule)) {
1886 err = PTR_ERR(vport->ingress.allow_rule);
1887 esw_warn(esw->dev,
1888 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1889 vport->vport, err);
1890 vport->ingress.allow_rule = NULL;
18486737
EB
1891 }
1892
18486737 1893 kvfree(spec);
18486737
EB
1894 return err;
1895}
1896
7445cfb1
JL
1897static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1898 struct mlx5_vport *vport)
1899{
1900 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
1901 struct mlx5_flow_act flow_act = {};
7445cfb1 1902 int err = 0;
0f0d3827
PB
1903 u32 key;
1904
1905 key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
1906 key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
7445cfb1
JL
1907
1908 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
0f0d3827
PB
1909 MLX5_SET(set_action_in, action, field,
1910 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1911 MLX5_SET(set_action_in, action, data, key);
1912 MLX5_SET(set_action_in, action, offset,
1913 ESW_SOURCE_PORT_METADATA_OFFSET);
1914 MLX5_SET(set_action_in, action, length,
1915 ESW_SOURCE_PORT_METADATA_BITS);
7445cfb1 1916
d68316b5 1917 vport->ingress.offloads.modify_metadata =
2b688ea5
MG
1918 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1919 1, action);
d68316b5
PP
1920 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
1921 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
7445cfb1
JL
1922 esw_warn(esw->dev,
1923 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1924 vport->vport, err);
1925 return err;
1926 }
1927
1928 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
d68316b5
PP
1929 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1930 vport->ingress.offloads.modify_metadata_rule =
1931 mlx5_add_flow_rules(vport->ingress.acl,
5c2aa8ae 1932 NULL, &flow_act, NULL, 0);
d68316b5
PP
1933 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
1934 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
7445cfb1
JL
1935 esw_warn(esw->dev,
1936 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1937 vport->vport, err);
b7826076 1938 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
d68316b5 1939 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1 1940 }
7445cfb1
JL
1941 return err;
1942}
1943
a962d7a6
PP
1944static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1945 struct mlx5_vport *vport)
7445cfb1 1946{
d68316b5
PP
1947 if (vport->ingress.offloads.modify_metadata_rule) {
1948 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
1949 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
7445cfb1 1950
d68316b5 1951 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1
JL
1952 }
1953}
1954
10652f39
PP
1955static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
1956 struct mlx5_vport *vport)
18486737 1957{
10652f39
PP
1958 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1959 struct mlx5_flow_group *g;
b7826076 1960 void *match_criteria;
10652f39 1961 u32 *flow_group_in;
b7826076 1962 u32 flow_index = 0;
10652f39 1963 int ret = 0;
18486737 1964
10652f39
PP
1965 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1966 if (!flow_group_in)
1967 return -ENOMEM;
18486737 1968
b7826076
PP
1969 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
1970 /* This group is to hold FTE to match untagged packets when prio_tag
1971 * is enabled.
1972 */
1973 memset(flow_group_in, 0, inlen);
18486737 1974
b7826076
PP
1975 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1976 flow_group_in, match_criteria);
1977 MLX5_SET(create_flow_group_in, flow_group_in,
1978 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1979 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1980 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1981 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1982
1983 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1984 if (IS_ERR(g)) {
1985 ret = PTR_ERR(g);
1986 esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
1987 vport->vport, ret);
1988 goto prio_tag_err;
1989 }
1990 vport->ingress.offloads.metadata_prio_tag_grp = g;
1991 flow_index++;
1992 }
1993
1994 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1995 /* This group holds an FTE with no matches for add metadata for
1996 * tagged packets, if prio-tag is enabled (as a fallthrough),
1997 * or all traffic in case prio-tag is disabled.
1998 */
1999 memset(flow_group_in, 0, inlen);
2000 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
2001 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
2002
2003 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
2004 if (IS_ERR(g)) {
2005 ret = PTR_ERR(g);
2006 esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
2007 vport->vport, ret);
2008 goto metadata_err;
2009 }
2010 vport->ingress.offloads.metadata_allmatch_grp = g;
2011 }
2012
2013 kvfree(flow_group_in);
2014 return 0;
2015
2016metadata_err:
2017 if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
2018 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
2019 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
18486737 2020 }
b7826076 2021prio_tag_err:
10652f39
PP
2022 kvfree(flow_group_in);
2023 return ret;
2024}
18486737 2025
10652f39
PP
2026static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
2027{
b7826076
PP
2028 if (vport->ingress.offloads.metadata_allmatch_grp) {
2029 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
2030 vport->ingress.offloads.metadata_allmatch_grp = NULL;
2031 }
2032
2033 if (vport->ingress.offloads.metadata_prio_tag_grp) {
2034 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
2035 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
10652f39 2036 }
18486737
EB
2037}
2038
b1a3380a
VP
2039static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
2040 struct mlx5_vport *vport)
18486737 2041{
b7826076 2042 int num_ftes = 0;
18486737
EB
2043 int err;
2044
7445cfb1 2045 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
b7826076 2046 !esw_check_ingress_prio_tag_enabled(esw, vport))
7445cfb1
JL
2047 return 0;
2048
2049 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076
PP
2050
2051 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2052 num_ftes++;
2053 if (esw_check_ingress_prio_tag_enabled(esw, vport))
2054 num_ftes++;
2055
2056 err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
7445cfb1
JL
2057 if (err) {
2058 esw_warn(esw->dev,
2059 "failed to enable ingress acl (%d) on vport[%d]\n",
2060 err, vport->vport);
2061 return err;
2062 }
2063
10652f39
PP
2064 err = esw_vport_create_ingress_acl_group(esw, vport);
2065 if (err)
2066 goto group_err;
2067
7445cfb1
JL
2068 esw_debug(esw->dev,
2069 "vport[%d] configure ingress rules\n", vport->vport);
2070
2071 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2072 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
2073 if (err)
10652f39 2074 goto metadata_err;
7445cfb1
JL
2075 }
2076
b7826076 2077 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
786ef904 2078 err = esw_vport_ingress_prio_tag_config(esw, vport);
18486737 2079 if (err)
10652f39 2080 goto prio_tag_err;
7445cfb1 2081 }
10652f39 2082 return 0;
7445cfb1 2083
10652f39
PP
2084prio_tag_err:
2085 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2086metadata_err:
10652f39
PP
2087 esw_vport_destroy_ingress_acl_group(vport);
2088group_err:
2089 esw_vport_destroy_ingress_acl_table(vport);
7445cfb1
JL
2090 return err;
2091}
2092
6d94e610
VP
2093static int esw_vport_egress_config(struct mlx5_eswitch *esw,
2094 struct mlx5_vport *vport)
2095{
2096 int err;
2097
2098 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
2099 return 0;
2100
2101 esw_vport_cleanup_egress_rules(esw, vport);
2102
2103 err = esw_vport_enable_egress_acl(esw, vport);
2104 if (err)
2105 return err;
2106
fdde49e0
PP
2107 /* For prio tag mode, there is only 1 FTEs:
2108 * 1) prio tag packets - pop the prio tag VLAN, allow
2109 * Unmatched traffic is allowed by default
2110 */
2111 esw_debug(esw->dev,
2112 "vport[%d] configure prio tag egress rules\n", vport->vport);
6d94e610 2113
fdde49e0
PP
2114 /* prio tag vlan rule - pop it so VF receives untagged packets */
2115 err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
2116 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
2117 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
7445cfb1 2118 if (err)
6d94e610
VP
2119 esw_vport_disable_egress_acl(esw, vport);
2120
7445cfb1
JL
2121 return err;
2122}
2123
92ab1eb3
JL
2124static bool
2125esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2126{
2127 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2128 return false;
2129
2130 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2131 MLX5_FDB_TO_VPORT_REG_C_0))
2132 return false;
2133
2134 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2135 return false;
2136
2137 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2138 mlx5_ecpf_vport_exists(esw->dev))
2139 return false;
2140
2141 return true;
2142}
2143
1e62e222
MD
2144static bool
2145esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
2146{
2147 return mlx5_core_mp_enabled(esw->dev);
2148}
2149
2150static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
2151{
2152 return esw_check_vport_match_metadata_mandatory(esw) &&
2153 esw_check_vport_match_metadata_supported(esw);
2154}
2155
748da30b 2156int
89a0f1fb
PP
2157esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2158 struct mlx5_vport *vport)
7445cfb1 2159{
7445cfb1
JL
2160 int err;
2161
89a0f1fb
PP
2162 err = esw_vport_ingress_config(esw, vport);
2163 if (err)
2164 return err;
7445cfb1 2165
89a0f1fb
PP
2166 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
2167 err = esw_vport_egress_config(esw, vport);
a962d7a6 2168 if (err) {
10652f39 2169 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076
PP
2170 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2171 esw_vport_destroy_ingress_acl_group(vport);
10652f39 2172 esw_vport_destroy_ingress_acl_table(vport);
7445cfb1 2173 }
18486737 2174 }
89a0f1fb
PP
2175 return err;
2176}
18486737 2177
748da30b 2178void
89a0f1fb
PP
2179esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2180 struct mlx5_vport *vport)
2181{
2182 esw_vport_disable_egress_acl(esw, vport);
10652f39 2183 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076 2184 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
10652f39
PP
2185 esw_vport_destroy_ingress_acl_group(vport);
2186 esw_vport_destroy_ingress_acl_table(vport);
89a0f1fb 2187}
7445cfb1 2188
748da30b 2189static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
2190{
2191 struct mlx5_vport *vport;
7445cfb1 2192 int err;
18486737 2193
1e62e222 2194 if (esw_use_vport_metadata(esw))
92ab1eb3 2195 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737 2196
748da30b
VP
2197 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2198 err = esw_vport_create_offloads_acl_tables(esw, vport);
2199 if (err)
2200 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
2201 return err;
2202}
2203
748da30b 2204static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 2205{
786ef904 2206 struct mlx5_vport *vport;
7445cfb1 2207
748da30b
VP
2208 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2209 esw_vport_destroy_offloads_acl_tables(esw, vport);
7445cfb1 2210 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
2211}
2212
062f4bf4 2213static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 2214{
062f4bf4
BW
2215 int num_vfs = esw->esw_funcs.num_vfs;
2216 int total_vports;
6ed1803a
MB
2217 int err;
2218
062f4bf4
BW
2219 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2220 total_vports = esw->total_vports;
2221 else
2222 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2223
5c1d260e 2224 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
e52c2802 2225
748da30b 2226 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1
JL
2227 if (err)
2228 return err;
18486737 2229
11b717d6 2230 err = esw_create_offloads_table(esw, total_vports);
c930a3ad 2231 if (err)
11b717d6 2232 goto create_offloads_err;
c930a3ad 2233
11b717d6 2234 err = esw_create_restore_table(esw);
c930a3ad 2235 if (err)
11b717d6
PB
2236 goto create_restore_err;
2237
2238 err = esw_create_offloads_fdb_tables(esw, total_vports);
2239 if (err)
2240 goto create_fdb_err;
c930a3ad 2241
062f4bf4 2242 err = esw_create_vport_rx_group(esw, total_vports);
c930a3ad
OG
2243 if (err)
2244 goto create_fg_err;
2245
96e32687
EC
2246 mutex_init(&esw->fdb_table.offloads.vports.lock);
2247 hash_init(esw->fdb_table.offloads.vports.table);
2248
c930a3ad
OG
2249 return 0;
2250
2251create_fg_err:
1967ce6e 2252 esw_destroy_offloads_fdb_tables(esw);
7445cfb1 2253create_fdb_err:
11b717d6
PB
2254 esw_destroy_restore_table(esw);
2255create_restore_err:
2256 esw_destroy_offloads_table(esw);
2257create_offloads_err:
748da30b 2258 esw_destroy_uplink_offloads_acl_tables(esw);
7445cfb1 2259
c930a3ad
OG
2260 return err;
2261}
2262
eca8cc38
BW
2263static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2264{
96e32687 2265 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
eca8cc38 2266 esw_destroy_vport_rx_group(esw);
eca8cc38 2267 esw_destroy_offloads_fdb_tables(esw);
11b717d6
PB
2268 esw_destroy_restore_table(esw);
2269 esw_destroy_offloads_table(esw);
748da30b 2270 esw_destroy_uplink_offloads_acl_tables(esw);
eca8cc38
BW
2271}
2272
7e736f9a
PP
2273static void
2274esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 2275{
5ccf2770 2276 bool host_pf_disabled;
7e736f9a 2277 u16 new_num_vfs;
a3888f33 2278
7e736f9a
PP
2279 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2280 host_params_context.host_num_of_vfs);
5ccf2770
BW
2281 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2282 host_params_context.host_pf_disabled);
a3888f33 2283
7e736f9a
PP
2284 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2285 return;
a3888f33
BW
2286
2287 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929 2288 if (esw->esw_funcs.num_vfs > 0) {
23bb50cf 2289 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
a3888f33 2290 } else {
7e736f9a 2291 int err;
a3888f33 2292
23bb50cf
BW
2293 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2294 MLX5_VPORT_UC_ADDR_CHANGE);
a3888f33 2295 if (err)
7e736f9a 2296 return;
a3888f33 2297 }
7e736f9a 2298 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
2299}
2300
7e736f9a 2301static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 2302{
7e736f9a
PP
2303 struct mlx5_host_work *host_work;
2304 struct mlx5_eswitch *esw;
dd28087c 2305 const u32 *out;
ac35dcd6 2306
7e736f9a
PP
2307 host_work = container_of(work, struct mlx5_host_work, work);
2308 esw = host_work->esw;
a3888f33 2309
dd28087c
PP
2310 out = mlx5_esw_query_functions(esw->dev);
2311 if (IS_ERR(out))
7e736f9a 2312 goto out;
a3888f33 2313
7e736f9a 2314 esw_vfs_changed_event_handler(esw, out);
dd28087c 2315 kvfree(out);
a3888f33 2316out:
ac35dcd6
VP
2317 kfree(host_work);
2318}
2319
16fff98a 2320int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 2321{
cd56f929 2322 struct mlx5_esw_functions *esw_funcs;
a3888f33 2323 struct mlx5_host_work *host_work;
a3888f33
BW
2324 struct mlx5_eswitch *esw;
2325
2326 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2327 if (!host_work)
2328 return NOTIFY_DONE;
2329
cd56f929
VP
2330 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2331 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2332
2333 host_work->esw = esw;
2334
062f4bf4 2335 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2336 queue_work(esw->work_queue, &host_work->work);
2337
2338 return NOTIFY_OK;
2339}
2340
5896b972 2341int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38 2342{
3b83b6c2
DL
2343 struct mlx5_vport *vport;
2344 int err, i;
eca8cc38 2345
9a64144d
MG
2346 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2347 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2348 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2349 else
2350 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2351
2bb72e7e 2352 mutex_init(&esw->offloads.termtbl_mutex);
8463daf1 2353 mlx5_rdma_enable_roce(esw->dev);
eca8cc38 2354
332bd3a5
PP
2355 err = esw_set_passing_vport_metadata(esw, true);
2356 if (err)
2357 goto err_vport_metadata;
c1286050 2358
7983a675
PB
2359 err = esw_offloads_steering_init(esw);
2360 if (err)
2361 goto err_steering_init;
2362
3b83b6c2
DL
2363 /* Representor will control the vport link state */
2364 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2365 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2366
c2d7712c
BW
2367 /* Uplink vport rep must load first. */
2368 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
925a6acc 2369 if (err)
c2d7712c 2370 goto err_uplink;
c1286050 2371
c2d7712c 2372 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
eca8cc38 2373 if (err)
c2d7712c 2374 goto err_vports;
eca8cc38
BW
2375
2376 esw_offloads_devcom_init(esw);
a3888f33 2377
eca8cc38
BW
2378 return 0;
2379
925a6acc 2380err_vports:
c2d7712c
BW
2381 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2382err_uplink:
332bd3a5 2383 esw_set_passing_vport_metadata(esw, false);
8463daf1 2384err_steering_init:
7983a675
PB
2385 esw_offloads_steering_cleanup(esw);
2386err_vport_metadata:
8463daf1 2387 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2388 mutex_destroy(&esw->offloads.termtbl_mutex);
eca8cc38
BW
2389 return err;
2390}
2391
db7ff19e
EB
2392static int esw_offloads_stop(struct mlx5_eswitch *esw,
2393 struct netlink_ext_ack *extack)
c930a3ad 2394{
062f4bf4 2395 int err, err1;
c930a3ad 2396
383de108 2397 mlx5_eswitch_disable(esw, false);
062f4bf4 2398 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
6c419ba8 2399 if (err) {
8c98ee77 2400 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
062f4bf4 2401 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
8c98ee77
EB
2402 if (err1) {
2403 NL_SET_ERR_MSG_MOD(extack,
2404 "Failed setting eswitch back to offloads");
2405 }
6c419ba8 2406 }
c930a3ad
OG
2407
2408 return err;
2409}
2410
5896b972 2411void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 2412{
ac004b83 2413 esw_offloads_devcom_cleanup(esw);
5896b972 2414 mlx5_eswitch_disable_pf_vf_vports(esw);
c2d7712c 2415 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
332bd3a5 2416 esw_set_passing_vport_metadata(esw, false);
eca8cc38 2417 esw_offloads_steering_cleanup(esw);
8463daf1 2418 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2419 mutex_destroy(&esw->offloads.termtbl_mutex);
9a64144d 2420 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2421}
2422
ef78618b 2423static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2424{
2425 switch (mode) {
2426 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2427 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2428 break;
2429 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2430 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2431 break;
2432 default:
2433 return -EINVAL;
2434 }
2435
2436 return 0;
2437}
2438
ef78618b
OG
2439static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2440{
2441 switch (mlx5_mode) {
f6455de0 2442 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2443 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2444 break;
f6455de0 2445 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2446 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2447 break;
2448 default:
2449 return -EINVAL;
2450 }
2451
2452 return 0;
2453}
2454
bffaa916
RD
2455static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2456{
2457 switch (mode) {
2458 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2459 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2460 break;
2461 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2462 *mlx5_mode = MLX5_INLINE_MODE_L2;
2463 break;
2464 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2465 *mlx5_mode = MLX5_INLINE_MODE_IP;
2466 break;
2467 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2468 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2469 break;
2470 default:
2471 return -EINVAL;
2472 }
2473
2474 return 0;
2475}
2476
2477static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2478{
2479 switch (mlx5_mode) {
2480 case MLX5_INLINE_MODE_NONE:
2481 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2482 break;
2483 case MLX5_INLINE_MODE_L2:
2484 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2485 break;
2486 case MLX5_INLINE_MODE_IP:
2487 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2488 break;
2489 case MLX5_INLINE_MODE_TCP_UDP:
2490 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2491 break;
2492 default:
2493 return -EINVAL;
2494 }
2495
2496 return 0;
2497}
2498
0e6fa491 2499static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
feae9087 2500{
9d1cef19
OG
2501 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2502 return -EOPNOTSUPP;
c930a3ad 2503
733d3e54
OG
2504 if(!MLX5_ESWITCH_MANAGER(dev))
2505 return -EPERM;
c930a3ad 2506
f6455de0 2507 if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
c96692fb 2508 !mlx5_core_is_ecpf_esw_manager(dev))
c930a3ad
OG
2509 return -EOPNOTSUPP;
2510
9d1cef19
OG
2511 return 0;
2512}
2513
db7ff19e
EB
2514int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2515 struct netlink_ext_ack *extack)
9d1cef19
OG
2516{
2517 struct mlx5_core_dev *dev = devlink_priv(devlink);
2518 u16 cur_mlx5_mode, mlx5_mode = 0;
2519 int err;
2520
0e6fa491 2521 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2522 if (err)
2523 return err;
2524
2525 cur_mlx5_mode = dev->priv.eswitch->mode;
2526
ef78618b 2527 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2528 return -EINVAL;
2529
2530 if (cur_mlx5_mode == mlx5_mode)
2531 return 0;
2532
2533 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 2534 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 2535 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 2536 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
2537 else
2538 return -EINVAL;
feae9087
OG
2539}
2540
2541int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2542{
9d1cef19
OG
2543 struct mlx5_core_dev *dev = devlink_priv(devlink);
2544 int err;
c930a3ad 2545
0e6fa491 2546 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2547 if (err)
2548 return err;
c930a3ad 2549
ef78618b 2550 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 2551}
127ea380 2552
db7ff19e
EB
2553int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2554 struct netlink_ext_ack *extack)
bffaa916
RD
2555{
2556 struct mlx5_core_dev *dev = devlink_priv(devlink);
2557 struct mlx5_eswitch *esw = dev->priv.eswitch;
db68cc56 2558 int err, vport, num_vport;
bffaa916
RD
2559 u8 mlx5_mode;
2560
0e6fa491 2561 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2562 if (err)
2563 return err;
bffaa916 2564
c415f704
OG
2565 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2566 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2567 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2568 return 0;
2569 /* fall through */
2570 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2571 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 2572 return -EOPNOTSUPP;
c415f704
OG
2573 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2574 break;
2575 }
bffaa916 2576
525e84be 2577 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2578 NL_SET_ERR_MSG_MOD(extack,
2579 "Can't set inline mode when flows are configured");
375f51e2
RD
2580 return -EOPNOTSUPP;
2581 }
2582
bffaa916
RD
2583 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2584 if (err)
2585 goto out;
2586
411ec9e0 2587 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2588 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2589 if (err) {
8c98ee77
EB
2590 NL_SET_ERR_MSG_MOD(extack,
2591 "Failed to set min inline on vport");
bffaa916
RD
2592 goto revert_inline_mode;
2593 }
2594 }
2595
2596 esw->offloads.inline_mode = mlx5_mode;
2597 return 0;
2598
2599revert_inline_mode:
db68cc56 2600 num_vport = --vport;
411ec9e0 2601 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
2602 mlx5_modify_nic_vport_min_inline(dev,
2603 vport,
2604 esw->offloads.inline_mode);
2605out:
2606 return err;
2607}
2608
2609int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2610{
2611 struct mlx5_core_dev *dev = devlink_priv(devlink);
2612 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2613 int err;
bffaa916 2614
0e6fa491 2615 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2616 if (err)
2617 return err;
bffaa916 2618
bffaa916
RD
2619 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2620}
2621
98fdbea5
LR
2622int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2623 enum devlink_eswitch_encap_mode encap,
db7ff19e 2624 struct netlink_ext_ack *extack)
7768d197
RD
2625{
2626 struct mlx5_core_dev *dev = devlink_priv(devlink);
2627 struct mlx5_eswitch *esw = dev->priv.eswitch;
2628 int err;
2629
0e6fa491 2630 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2631 if (err)
2632 return err;
7768d197
RD
2633
2634 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2635 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
2636 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2637 return -EOPNOTSUPP;
2638
2639 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2640 return -EOPNOTSUPP;
2641
f6455de0 2642 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197
RD
2643 esw->offloads.encap = encap;
2644 return 0;
2645 }
2646
2647 if (esw->offloads.encap == encap)
2648 return 0;
2649
525e84be 2650 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2651 NL_SET_ERR_MSG_MOD(extack,
2652 "Can't set encapsulation when flows are configured");
7768d197
RD
2653 return -EOPNOTSUPP;
2654 }
2655
e52c2802 2656 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2657
2658 esw->offloads.encap = encap;
e52c2802
PB
2659
2660 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2661
7768d197 2662 if (err) {
8c98ee77
EB
2663 NL_SET_ERR_MSG_MOD(extack,
2664 "Failed re-creating fast FDB table");
7768d197 2665 esw->offloads.encap = !encap;
e52c2802 2666 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2667 }
e52c2802 2668
7768d197
RD
2669 return err;
2670}
2671
98fdbea5
LR
2672int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2673 enum devlink_eswitch_encap_mode *encap)
7768d197
RD
2674{
2675 struct mlx5_core_dev *dev = devlink_priv(devlink);
2676 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2677 int err;
7768d197 2678
0e6fa491 2679 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2680 if (err)
2681 return err;
7768d197
RD
2682
2683 *encap = esw->offloads.encap;
2684 return 0;
2685}
2686
c2d7712c
BW
2687static bool
2688mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
2689{
2690 /* Currently, only ECPF based device has representor for host PF. */
2691 if (vport_num == MLX5_VPORT_PF &&
2692 !mlx5_core_is_ecpf_esw_manager(esw->dev))
2693 return false;
2694
2695 if (vport_num == MLX5_VPORT_ECPF &&
2696 !mlx5_ecpf_vport_exists(esw->dev))
2697 return false;
2698
2699 return true;
2700}
2701
f8e8fa02 2702void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 2703 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 2704 u8 rep_type)
127ea380 2705{
8693115a 2706 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
2707 struct mlx5_eswitch_rep *rep;
2708 int i;
9deb2241 2709
8693115a 2710 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 2711 mlx5_esw_for_all_reps(esw, i, rep) {
c2d7712c
BW
2712 if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
2713 rep_data = &rep->rep_data[rep_type];
2714 atomic_set(&rep_data->state, REP_REGISTERED);
2715 }
f8e8fa02 2716 }
127ea380 2717}
f8e8fa02 2718EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2719
f8e8fa02 2720void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2721{
cb67b832 2722 struct mlx5_eswitch_rep *rep;
f8e8fa02 2723 int i;
cb67b832 2724
f6455de0 2725 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 2726 __unload_reps_all_vport(esw, rep_type);
127ea380 2727
f8e8fa02 2728 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 2729 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 2730}
f8e8fa02 2731EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2732
a4b97ab4 2733void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2734{
726293f1
HHZ
2735 struct mlx5_eswitch_rep *rep;
2736
879c8f84 2737 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 2738 return rep->rep_data[rep_type].priv;
726293f1 2739}
22215908
MB
2740
2741void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 2742 u16 vport,
22215908
MB
2743 u8 rep_type)
2744{
22215908
MB
2745 struct mlx5_eswitch_rep *rep;
2746
879c8f84 2747 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2748
8693115a
PP
2749 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2750 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2751 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
2752 return NULL;
2753}
57cbd893 2754EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2755
2756void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2757{
879c8f84 2758 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2759}
57cbd893
MB
2760EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2761
2762struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 2763 u16 vport)
57cbd893 2764{
879c8f84 2765 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2766}
2767EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
2768
2769bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2770{
2771 return vport_num >= MLX5_VPORT_FIRST_VF &&
2772 vport_num <= esw->dev->priv.sriov.max_vfs;
2773}
7445cfb1 2774
5b7cb745
PB
2775bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
2776{
2777 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
2778}
2779EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
2780
7445cfb1
JL
2781bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2782{
2783 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2784}
2785EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2786
0f0d3827 2787u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
7445cfb1
JL
2788 u16 vport_num)
2789{
0f0d3827
PB
2790 u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0);
2791 u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
2792 u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
2793 u32 val;
2794
2795 /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
2796 WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
2797
2798 /* Trim vhca_id to ESW_VHCA_ID_BITS */
2799 vhca_id &= vhca_id_mask;
2800
2801 /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
2802 * don't overlap with VF numbers, and themselves, after trimming.
2803 */
2804 WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) <
2805 vport_num_mask - 1);
2806 WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) <
2807 vport_num_mask - 1);
2808 WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) ==
2809 (MLX5_VPORT_ECPF & vport_num_mask));
2810
2811 /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
2812 * overlap with pf and ecpf.
2813 */
2814 if (vport_num != MLX5_VPORT_UPLINK &&
2815 vport_num != MLX5_VPORT_ECPF)
2816 WARN_ON_ONCE(vport_num >= vport_num_mask - 1);
2817
2818 /* We can now trim vport_num to ESW_VPORT_BITS */
2819 vport_num &= vport_num_mask;
2820
2821 val = (vhca_id << ESW_VPORT_BITS) | vport_num;
2822 return val << (32 - ESW_SOURCE_PORT_METADATA_BITS);
7445cfb1
JL
2823}
2824EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);