]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: Accept flow rules without match
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
39ac237c 40#include "eswitch_offloads_chains.h"
80f09dfc 41#include "rdma.h"
e52c2802
PB
42#include "en.h"
43#include "fs_core.h"
ac004b83 44#include "lib/devcom.h"
a3888f33 45#include "lib/eq.h"
69697b6e 46
cd7e4186
BW
47/* There are two match-all miss flows, one for unicast dst mac and
48 * one for multicast.
49 */
50#define MLX5_ESW_MISS_FLOWS (2)
c9b99abc
BW
51#define UPLINK_REP_INDEX 0
52
96e32687
EC
53/* Per vport tables */
54
55#define MLX5_ESW_VPORT_TABLE_SIZE 128
56
57/* This struct is used as a key to the hash table and we need it to be packed
58 * so hash result is consistent
59 */
60struct mlx5_vport_key {
61 u32 chain;
62 u16 prio;
63 u16 vport;
64 u16 vhca_id;
65} __packed;
66
67struct mlx5_vport_table {
68 struct hlist_node hlist;
69 struct mlx5_flow_table *fdb;
70 u32 num_rules;
71 struct mlx5_vport_key key;
72};
73
87dac697
JL
74#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
75
96e32687
EC
76static struct mlx5_flow_table *
77esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
78{
79 struct mlx5_flow_table_attr ft_attr = {};
80 struct mlx5_flow_table *fdb;
81
87dac697 82 ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
96e32687
EC
83 ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
84 ft_attr.prio = FDB_PER_VPORT;
85 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
86 if (IS_ERR(fdb)) {
87 esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
88 PTR_ERR(fdb));
89 }
90
91 return fdb;
92}
93
94static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
95 struct mlx5_esw_flow_attr *attr,
96 struct mlx5_vport_key *key)
97{
98 key->vport = attr->in_rep->vport;
99 key->chain = attr->chain;
100 key->prio = attr->prio;
101 key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
102 return jhash(key, sizeof(*key), 0);
103}
104
105/* caller must hold vports.lock */
106static struct mlx5_vport_table *
107esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
108{
109 struct mlx5_vport_table *e;
110
111 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
112 if (!memcmp(&e->key, skey, sizeof(*skey)))
113 return e;
114
115 return NULL;
116}
117
118static void
119esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
120{
121 struct mlx5_vport_table *e;
122 struct mlx5_vport_key key;
123 u32 hkey;
124
125 mutex_lock(&esw->fdb_table.offloads.vports.lock);
126 hkey = flow_attr_to_vport_key(esw, attr, &key);
127 e = esw_vport_tbl_lookup(esw, &key, hkey);
128 if (!e || --e->num_rules)
129 goto out;
130
131 hash_del(&e->hlist);
132 mlx5_destroy_flow_table(e->fdb);
133 kfree(e);
134out:
135 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
136}
137
138static struct mlx5_flow_table *
139esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
140{
141 struct mlx5_core_dev *dev = esw->dev;
142 struct mlx5_flow_namespace *ns;
143 struct mlx5_flow_table *fdb;
144 struct mlx5_vport_table *e;
145 struct mlx5_vport_key skey;
146 u32 hkey;
147
148 mutex_lock(&esw->fdb_table.offloads.vports.lock);
149 hkey = flow_attr_to_vport_key(esw, attr, &skey);
150 e = esw_vport_tbl_lookup(esw, &skey, hkey);
151 if (e) {
152 e->num_rules++;
153 goto out;
154 }
155
156 e = kzalloc(sizeof(*e), GFP_KERNEL);
157 if (!e) {
158 fdb = ERR_PTR(-ENOMEM);
159 goto err_alloc;
160 }
161
162 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
163 if (!ns) {
164 esw_warn(dev, "Failed to get FDB namespace\n");
165 fdb = ERR_PTR(-ENOENT);
166 goto err_ns;
167 }
168
169 fdb = esw_vport_tbl_create(esw, ns);
170 if (IS_ERR(fdb))
171 goto err_ns;
172
173 e->fdb = fdb;
174 e->num_rules = 1;
175 e->key = skey;
176 hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
177out:
178 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
179 return e->fdb;
180
181err_ns:
182 kfree(e);
183err_alloc:
184 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
185 return fdb;
186}
187
188int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
189{
190 struct mlx5_esw_flow_attr attr = {};
191 struct mlx5_eswitch_rep rep = {};
192 struct mlx5_flow_table *fdb;
193 struct mlx5_vport *vport;
194 int i;
195
196 attr.prio = 1;
197 attr.in_rep = &rep;
198 mlx5_esw_for_all_vports(esw, i, vport) {
199 attr.in_rep->vport = vport->vport;
200 fdb = esw_vport_tbl_get(esw, &attr);
d9fb932f 201 if (IS_ERR(fdb))
96e32687
EC
202 goto out;
203 }
204 return 0;
205
206out:
207 mlx5_esw_vport_tbl_put(esw);
208 return PTR_ERR(fdb);
209}
210
211void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
212{
213 struct mlx5_esw_flow_attr attr = {};
214 struct mlx5_eswitch_rep rep = {};
215 struct mlx5_vport *vport;
216 int i;
217
218 attr.prio = 1;
219 attr.in_rep = &rep;
220 mlx5_esw_for_all_vports(esw, i, vport) {
221 attr.in_rep->vport = vport->vport;
222 esw_vport_tbl_put(esw, &attr);
223 }
224}
225
226/* End: Per vport tables */
227
879c8f84
BW
228static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
229 u16 vport_num)
230{
02f3afd9 231 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
232
233 WARN_ON(idx > esw->total_vports - 1);
234 return &esw->offloads.vport_reps[idx];
235}
236
b7826076
PP
237static bool
238esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
239 const struct mlx5_vport *vport)
240{
241 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
242 mlx5_eswitch_is_vf_vport(esw, vport->vport));
243}
244
c01cfd0f
JL
245static void
246mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
247 struct mlx5_flow_spec *spec,
248 struct mlx5_esw_flow_attr *attr)
249{
250 void *misc2;
251 void *misc;
252
253 /* Use metadata matching because vport is not represented by single
254 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
255 */
256 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
257 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
258 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
259 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
260 attr->in_rep->vport));
261
262 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
263 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
264 mlx5_eswitch_get_vport_metadata_mask());
c01cfd0f
JL
265
266 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
267 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
268 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
269 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
270 } else {
271 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
272 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
273
274 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
275 MLX5_SET(fte_match_set_misc, misc,
276 source_eswitch_owner_vhca_id,
277 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
278
279 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
280 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
281 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
282 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
283 source_eswitch_owner_vhca_id);
284
285 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
286 }
287
288 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
289 attr->in_rep->vport == MLX5_VPORT_UPLINK)
290 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
291}
292
74491de9 293struct mlx5_flow_handle *
3d80d1a2
OG
294mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
295 struct mlx5_flow_spec *spec,
776b12b6 296 struct mlx5_esw_flow_attr *attr)
3d80d1a2 297{
592d3651 298 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 299 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 300 bool split = !!(attr->split_count);
74491de9 301 struct mlx5_flow_handle *rule;
e52c2802 302 struct mlx5_flow_table *fdb;
592d3651 303 int j, i = 0;
3d80d1a2 304
f6455de0 305 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
306 return ERR_PTR(-EOPNOTSUPP);
307
6acfbf38
OG
308 flow_act.action = attr->action;
309 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 310 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
311 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
312 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
313 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
314 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
315 flow_act.vlan[0].vid = attr->vlan_vid[0];
316 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
317 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
318 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
319 flow_act.vlan[1].vid = attr->vlan_vid[1];
320 flow_act.vlan[1].prio = attr->vlan_prio[1];
321 }
6acfbf38 322 }
776b12b6 323
66958ed9 324 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
39ac237c 325 struct mlx5_flow_table *ft;
e52c2802 326
d18296ff
PB
327 if (attr->dest_ft) {
328 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
329 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
330 dest[i].ft = attr->dest_ft;
331 i++;
332 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
39ac237c
PB
333 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
334 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
278d51f2 335 dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
39ac237c
PB
336 i++;
337 } else if (attr->dest_chain) {
338 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
339 ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
340 1, 0);
e52c2802
PB
341 if (IS_ERR(ft)) {
342 rule = ERR_CAST(ft);
343 goto err_create_goto_table;
344 }
345
346 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
347 dest[i].ft = ft;
592d3651 348 i++;
e52c2802 349 } else {
e85e02ba 350 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 351 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 352 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 353 dest[i].vport.vhca_id =
df65a573 354 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
355 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
356 dest[i].vport.flags |=
357 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
358 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
359 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2b688ea5 360 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
a18e879d 361 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5
MG
362 dest[i].vport.pkt_reformat =
363 attr->dests[j].pkt_reformat;
f493f155 364 }
e52c2802
PB
365 i++;
366 }
56e858df 367 }
e37a79e5 368 }
66958ed9 369 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 370 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 371 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 372 i++;
3d80d1a2
OG
373 }
374
93b3586e 375 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 376 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
377 if (attr->inner_match_level != MLX5_MATCH_NONE)
378 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 379
aa24670e 380 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 381 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 382
96e32687
EC
383 if (split) {
384 fdb = esw_vport_tbl_get(esw, attr);
385 } else {
d18296ff
PB
386 if (attr->chain || attr->prio)
387 fdb = mlx5_esw_chains_get_table(esw, attr->chain,
388 attr->prio, 0);
389 else
390 fdb = attr->fdb;
6fb0701a
PB
391
392 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
393 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
96e32687 394 }
e52c2802
PB
395 if (IS_ERR(fdb)) {
396 rule = ERR_CAST(fdb);
397 goto err_esw_get;
398 }
399
10caabda
OS
400 if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
401 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
402 &flow_act, dest, i);
403 else
404 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 405 if (IS_ERR(rule))
e52c2802 406 goto err_add_rule;
375f51e2 407 else
525e84be 408 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 409
e52c2802
PB
410 return rule;
411
412err_add_rule:
96e32687
EC
413 if (split)
414 esw_vport_tbl_put(esw, attr);
d18296ff 415 else if (attr->chain || attr->prio)
96e32687 416 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 417err_esw_get:
39ac237c
PB
418 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
419 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
e52c2802 420err_create_goto_table:
aa0cbbae 421 return rule;
3d80d1a2
OG
422}
423
e4ad91f2
CM
424struct mlx5_flow_handle *
425mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
426 struct mlx5_flow_spec *spec,
427 struct mlx5_esw_flow_attr *attr)
428{
429 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 430 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
431 struct mlx5_flow_table *fast_fdb;
432 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 433 struct mlx5_flow_handle *rule;
e4ad91f2
CM
434 int i;
435
39ac237c 436 fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
e52c2802
PB
437 if (IS_ERR(fast_fdb)) {
438 rule = ERR_CAST(fast_fdb);
439 goto err_get_fast;
440 }
441
96e32687 442 fwd_fdb = esw_vport_tbl_get(esw, attr);
e52c2802
PB
443 if (IS_ERR(fwd_fdb)) {
444 rule = ERR_CAST(fwd_fdb);
445 goto err_get_fwd;
446 }
447
e4ad91f2 448 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 449 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 450 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 451 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 452 dest[i].vport.vhca_id =
df65a573 453 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
454 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
455 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
456 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
457 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5 458 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
1cc26d74 459 }
e4ad91f2
CM
460 }
461 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 462 dest[i].ft = fwd_fdb,
e4ad91f2
CM
463 i++;
464
c01cfd0f 465 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
e4ad91f2 466
93b3586e 467 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 468 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 469
278d51f2 470 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
e52c2802 471 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 472
e52c2802
PB
473 if (IS_ERR(rule))
474 goto add_err;
e4ad91f2 475
525e84be 476 atomic64_inc(&esw->offloads.num_flows);
e52c2802
PB
477
478 return rule;
479add_err:
96e32687 480 esw_vport_tbl_put(esw, attr);
e52c2802 481err_get_fwd:
39ac237c 482 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 483err_get_fast:
e4ad91f2
CM
484 return rule;
485}
486
e52c2802
PB
487static void
488__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
489 struct mlx5_flow_handle *rule,
490 struct mlx5_esw_flow_attr *attr,
491 bool fwd_rule)
492{
e85e02ba 493 bool split = (attr->split_count > 0);
10caabda 494 int i;
e52c2802
PB
495
496 mlx5_del_flow_rules(rule);
10caabda
OS
497
498 /* unref the term table */
499 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
500 if (attr->dests[i].termtbl)
501 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
502 }
503
525e84be 504 atomic64_dec(&esw->offloads.num_flows);
e52c2802
PB
505
506 if (fwd_rule) {
96e32687 507 esw_vport_tbl_put(esw, attr);
39ac237c 508 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 509 } else {
96e32687
EC
510 if (split)
511 esw_vport_tbl_put(esw, attr);
d18296ff 512 else if (attr->chain || attr->prio)
96e32687
EC
513 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
514 0);
e52c2802 515 if (attr->dest_chain)
39ac237c 516 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
e52c2802
PB
517 }
518}
519
d85cdccb
OG
520void
521mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
522 struct mlx5_flow_handle *rule,
523 struct mlx5_esw_flow_attr *attr)
524{
e52c2802 525 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
526}
527
48265006
OG
528void
529mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
530 struct mlx5_flow_handle *rule,
531 struct mlx5_esw_flow_attr *attr)
532{
e52c2802 533 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
534}
535
f5f82476
OG
536static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
537{
538 struct mlx5_eswitch_rep *rep;
411ec9e0 539 int i, err = 0;
f5f82476
OG
540
541 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 542 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 543 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
544 continue;
545
546 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
547 if (err)
548 goto out;
549 }
550
551out:
552 return err;
553}
554
555static struct mlx5_eswitch_rep *
556esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
557{
558 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
559
560 in_rep = attr->in_rep;
df65a573 561 out_rep = attr->dests[0].rep;
f5f82476
OG
562
563 if (push)
564 vport = in_rep;
565 else if (pop)
566 vport = out_rep;
567 else
568 vport = in_rep;
569
570 return vport;
571}
572
573static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
574 bool push, bool pop, bool fwd)
575{
576 struct mlx5_eswitch_rep *in_rep, *out_rep;
577
578 if ((push || pop) && !fwd)
579 goto out_notsupp;
580
581 in_rep = attr->in_rep;
df65a573 582 out_rep = attr->dests[0].rep;
f5f82476 583
b05af6aa 584 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
585 goto out_notsupp;
586
b05af6aa 587 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
588 goto out_notsupp;
589
590 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
591 if (!push && !pop && fwd)
b05af6aa 592 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
593 goto out_notsupp;
594
595 /* protects against (1) setting rules with different vlans to push and
596 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
597 */
1482bd3d 598 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
599 goto out_notsupp;
600
601 return 0;
602
603out_notsupp:
9eb78923 604 return -EOPNOTSUPP;
f5f82476
OG
605}
606
607int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
608 struct mlx5_esw_flow_attr *attr)
609{
610 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
611 struct mlx5_eswitch_rep *vport = NULL;
612 bool push, pop, fwd;
613 int err = 0;
614
6acfbf38 615 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 616 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
617 return 0;
618
f5f82476
OG
619 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
620 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
621 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
622 !attr->dest_chain);
f5f82476 623
0e18134f
VB
624 mutex_lock(&esw->state_lock);
625
f5f82476
OG
626 err = esw_add_vlan_action_check(attr, push, pop, fwd);
627 if (err)
0e18134f 628 goto unlock;
f5f82476 629
39ac237c 630 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
631
632 vport = esw_vlan_action_get_vport(attr, push, pop);
633
634 if (!push && !pop && fwd) {
635 /* tracks VF --> wire rules without vlan push action */
b05af6aa 636 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476 637 vport->vlan_refcount++;
39ac237c 638 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
639 }
640
0e18134f 641 goto unlock;
f5f82476
OG
642 }
643
644 if (!push && !pop)
0e18134f 645 goto unlock;
f5f82476
OG
646
647 if (!(offloads->vlan_push_pop_refcount)) {
648 /* it's the 1st vlan rule, apply global vlan pop policy */
649 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
650 if (err)
651 goto out;
652 }
653 offloads->vlan_push_pop_refcount++;
654
655 if (push) {
656 if (vport->vlan_refcount)
657 goto skip_set_push;
658
1482bd3d 659 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
660 SET_VLAN_INSERT | SET_VLAN_STRIP);
661 if (err)
662 goto out;
1482bd3d 663 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
664skip_set_push:
665 vport->vlan_refcount++;
666 }
667out:
668 if (!err)
39ac237c 669 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
0e18134f
VB
670unlock:
671 mutex_unlock(&esw->state_lock);
f5f82476
OG
672 return err;
673}
674
675int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
676 struct mlx5_esw_flow_attr *attr)
677{
678 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
679 struct mlx5_eswitch_rep *vport = NULL;
680 bool push, pop, fwd;
681 int err = 0;
682
6acfbf38 683 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 684 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
685 return 0;
686
39ac237c 687 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
f5f82476
OG
688 return 0;
689
690 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
691 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
692 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
693
0e18134f
VB
694 mutex_lock(&esw->state_lock);
695
f5f82476
OG
696 vport = esw_vlan_action_get_vport(attr, push, pop);
697
698 if (!push && !pop && fwd) {
699 /* tracks VF --> wire rules without vlan push action */
b05af6aa 700 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
701 vport->vlan_refcount--;
702
0e18134f 703 goto out;
f5f82476
OG
704 }
705
706 if (push) {
707 vport->vlan_refcount--;
708 if (vport->vlan_refcount)
709 goto skip_unset_push;
710
711 vport->vlan = 0;
712 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
713 0, 0, SET_VLAN_STRIP);
714 if (err)
715 goto out;
716 }
717
718skip_unset_push:
719 offloads->vlan_push_pop_refcount--;
720 if (offloads->vlan_push_pop_refcount)
0e18134f 721 goto out;
f5f82476
OG
722
723 /* no more vlan rules, stop global vlan pop policy */
724 err = esw_set_global_vlan_pop(esw, 0);
725
726out:
0e18134f 727 mutex_unlock(&esw->state_lock);
f5f82476
OG
728 return err;
729}
730
f7a68945 731struct mlx5_flow_handle *
02f3afd9
PP
732mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
733 u32 sqn)
ab22be9b 734{
66958ed9 735 struct mlx5_flow_act flow_act = {0};
4c5009c5 736 struct mlx5_flow_destination dest = {};
74491de9 737 struct mlx5_flow_handle *flow_rule;
c5bb1730 738 struct mlx5_flow_spec *spec;
ab22be9b
OG
739 void *misc;
740
1b9a07ee 741 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 742 if (!spec) {
ab22be9b
OG
743 flow_rule = ERR_PTR(-ENOMEM);
744 goto out;
745 }
746
c5bb1730 747 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 748 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
749 /* source vport is the esw manager */
750 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 751
c5bb1730 752 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
753 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
754 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
755
c5bb1730 756 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 757 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 758 dest.vport.num = vport;
66958ed9 759 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 760
39ac237c
PB
761 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
762 spec, &flow_act, &dest, 1);
ab22be9b
OG
763 if (IS_ERR(flow_rule))
764 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
765out:
c5bb1730 766 kvfree(spec);
ab22be9b
OG
767 return flow_rule;
768}
57cbd893 769EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 770
159fe639
MB
771void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
772{
773 mlx5_del_flow_rules(rule);
774}
775
5b7cb745
PB
776static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
777{
778 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
779 MLX5_FDB_TO_VPORT_REG_C_1;
780}
781
332bd3a5 782static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
783{
784 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
785 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
5b7cb745 786 u8 curr, wanted;
c1286050
JL
787 int err;
788
5b7cb745
PB
789 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
790 !mlx5_eswitch_vport_match_metadata_enabled(esw))
332bd3a5 791 return 0;
c1286050 792
238302fa 793 err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
c1286050
JL
794 out, sizeof(out));
795 if (err)
796 return err;
797
5b7cb745
PB
798 curr = MLX5_GET(query_esw_vport_context_out, out,
799 esw_vport_context.fdb_to_vport_reg_c_id);
800 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
801 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
802 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
c1286050 803
332bd3a5 804 if (enable)
5b7cb745 805 curr |= wanted;
332bd3a5 806 else
5b7cb745 807 curr &= ~wanted;
c1286050
JL
808
809 MLX5_SET(modify_esw_vport_context_in, in,
5b7cb745 810 esw_vport_context.fdb_to_vport_reg_c_id, curr);
c1286050
JL
811
812 MLX5_SET(modify_esw_vport_context_in, in,
813 field_select.fdb_to_vport_reg_c_id, 1);
814
5b7cb745
PB
815 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in,
816 sizeof(in));
817 if (!err) {
818 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
819 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
820 else
821 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
822 }
823
824 return err;
c1286050
JL
825}
826
a5641cb5
JL
827static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
828 struct mlx5_core_dev *peer_dev,
ac004b83
RD
829 struct mlx5_flow_spec *spec,
830 struct mlx5_flow_destination *dest)
831{
a5641cb5 832 void *misc;
ac004b83 833
a5641cb5
JL
834 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
835 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
836 misc_parameters_2);
0f0d3827
PB
837 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
838 mlx5_eswitch_get_vport_metadata_mask());
ac004b83 839
a5641cb5
JL
840 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
841 } else {
842 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
843 misc_parameters);
ac004b83 844
a5641cb5
JL
845 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
846 MLX5_CAP_GEN(peer_dev, vhca_id));
847
848 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
849
850 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
851 misc_parameters);
852 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
853 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
854 source_eswitch_owner_vhca_id);
855 }
ac004b83
RD
856
857 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 858 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 859 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 860 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
861}
862
a5641cb5
JL
863static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
864 struct mlx5_eswitch *peer_esw,
865 struct mlx5_flow_spec *spec,
866 u16 vport)
867{
868 void *misc;
869
870 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
871 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
872 misc_parameters_2);
873 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
874 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
875 vport));
876 } else {
877 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
878 misc_parameters);
879 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
880 }
881}
882
ac004b83
RD
883static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
884 struct mlx5_core_dev *peer_dev)
885{
886 struct mlx5_flow_destination dest = {};
887 struct mlx5_flow_act flow_act = {0};
888 struct mlx5_flow_handle **flows;
889 struct mlx5_flow_handle *flow;
890 struct mlx5_flow_spec *spec;
891 /* total vports is the same for both e-switches */
892 int nvports = esw->total_vports;
893 void *misc;
894 int err, i;
895
896 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
897 if (!spec)
898 return -ENOMEM;
899
a5641cb5 900 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
901
902 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
903 if (!flows) {
904 err = -ENOMEM;
905 goto alloc_flows_err;
906 }
907
908 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
909 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
910 misc_parameters);
911
81cd229c 912 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
913 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
914 spec, MLX5_VPORT_PF);
915
81cd229c
BW
916 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
917 spec, &flow_act, &dest, 1);
918 if (IS_ERR(flow)) {
919 err = PTR_ERR(flow);
920 goto add_pf_flow_err;
921 }
922 flows[MLX5_VPORT_PF] = flow;
923 }
924
925 if (mlx5_ecpf_vport_exists(esw->dev)) {
926 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
927 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
928 spec, &flow_act, &dest, 1);
929 if (IS_ERR(flow)) {
930 err = PTR_ERR(flow);
931 goto add_ecpf_flow_err;
932 }
933 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
934 }
935
786ef904 936 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
937 esw_set_peer_miss_rule_source_port(esw,
938 peer_dev->priv.eswitch,
939 spec, i);
940
ac004b83
RD
941 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
942 spec, &flow_act, &dest, 1);
943 if (IS_ERR(flow)) {
944 err = PTR_ERR(flow);
81cd229c 945 goto add_vf_flow_err;
ac004b83
RD
946 }
947 flows[i] = flow;
948 }
949
950 esw->fdb_table.offloads.peer_miss_rules = flows;
951
952 kvfree(spec);
953 return 0;
954
81cd229c 955add_vf_flow_err:
879c8f84 956 nvports = --i;
786ef904 957 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 958 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
959
960 if (mlx5_ecpf_vport_exists(esw->dev))
961 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
962add_ecpf_flow_err:
963 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
964 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
965add_pf_flow_err:
966 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
967 kvfree(flows);
968alloc_flows_err:
969 kvfree(spec);
970 return err;
971}
972
973static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
974{
975 struct mlx5_flow_handle **flows;
976 int i;
977
978 flows = esw->fdb_table.offloads.peer_miss_rules;
979
786ef904
PP
980 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
981 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
982 mlx5_del_flow_rules(flows[i]);
983
81cd229c
BW
984 if (mlx5_ecpf_vport_exists(esw->dev))
985 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
986
987 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
988 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
989
ac004b83
RD
990 kvfree(flows);
991}
992
3aa33572
OG
993static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
994{
66958ed9 995 struct mlx5_flow_act flow_act = {0};
4c5009c5 996 struct mlx5_flow_destination dest = {};
74491de9 997 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 998 struct mlx5_flow_spec *spec;
f80be543
MB
999 void *headers_c;
1000 void *headers_v;
3aa33572 1001 int err = 0;
f80be543
MB
1002 u8 *dmac_c;
1003 u8 *dmac_v;
3aa33572 1004
1b9a07ee 1005 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1006 if (!spec) {
3aa33572
OG
1007 err = -ENOMEM;
1008 goto out;
1009 }
1010
f80be543
MB
1011 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1012 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1013 outer_headers);
1014 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1015 outer_headers.dmac_47_16);
1016 dmac_c[0] = 0x01;
1017
3aa33572 1018 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1019 dest.vport.num = esw->manager_vport;
66958ed9 1020 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 1021
39ac237c
PB
1022 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1023 spec, &flow_act, &dest, 1);
3aa33572
OG
1024 if (IS_ERR(flow_rule)) {
1025 err = PTR_ERR(flow_rule);
f80be543 1026 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
1027 goto out;
1028 }
1029
f80be543
MB
1030 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1031
1032 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1033 outer_headers);
1034 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1035 outer_headers.dmac_47_16);
1036 dmac_v[0] = 0x01;
39ac237c
PB
1037 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1038 spec, &flow_act, &dest, 1);
f80be543
MB
1039 if (IS_ERR(flow_rule)) {
1040 err = PTR_ERR(flow_rule);
1041 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1042 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1043 goto out;
1044 }
1045
1046 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1047
3aa33572 1048out:
c5bb1730 1049 kvfree(spec);
3aa33572
OG
1050 return err;
1051}
1052
11b717d6
PB
1053struct mlx5_flow_handle *
1054esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1055{
1056 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1057 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1058 struct mlx5_flow_context *flow_context;
1059 struct mlx5_flow_handle *flow_rule;
1060 struct mlx5_flow_destination dest;
1061 struct mlx5_flow_spec *spec;
1062 void *misc;
1063
1064 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1065 if (!spec)
1066 return ERR_PTR(-ENOMEM);
1067
1068 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1069 misc_parameters_2);
1070 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1071 ESW_CHAIN_TAG_METADATA_MASK);
1072 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1073 misc_parameters_2);
1074 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1075 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
6724e66b
PB
1076 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1077 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1078 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
11b717d6
PB
1079
1080 flow_context = &spec->flow_context;
1081 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1082 flow_context->flow_tag = tag;
1083 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1084 dest.ft = esw->offloads.ft_offloads;
1085
1086 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1087 kfree(spec);
1088
1089 if (IS_ERR(flow_rule))
1090 esw_warn(esw->dev,
1091 "Failed to create restore rule for tag: %d, err(%d)\n",
1092 tag, (int)PTR_ERR(flow_rule));
1093
1094 return flow_rule;
1095}
1096
1097u32
1098esw_get_max_restore_tag(struct mlx5_eswitch *esw)
1099{
1100 return ESW_CHAIN_TAG_METADATA_MASK;
1101}
1102
1967ce6e 1103#define MAX_PF_SQ 256
cd3d07e7 1104#define MAX_SQ_NVPORTS 32
1967ce6e 1105
a5641cb5
JL
1106static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1107 u32 *flow_group_in)
1108{
1109 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1110 flow_group_in,
1111 match_criteria);
1112
1113 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1114 MLX5_SET(create_flow_group_in, flow_group_in,
1115 match_criteria_enable,
1116 MLX5_MATCH_MISC_PARAMETERS_2);
1117
0f0d3827
PB
1118 MLX5_SET(fte_match_param, match_criteria,
1119 misc_parameters_2.metadata_reg_c_0,
1120 mlx5_eswitch_get_vport_metadata_mask());
a5641cb5
JL
1121 } else {
1122 MLX5_SET(create_flow_group_in, flow_group_in,
1123 match_criteria_enable,
1124 MLX5_MATCH_MISC_PARAMETERS);
1125
1126 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1127 misc_parameters.source_port);
1128 }
1129}
1130
1967ce6e
OG
1131static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1132{
1133 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1134 struct mlx5_flow_table_attr ft_attr = {};
1135 struct mlx5_core_dev *dev = esw->dev;
1136 struct mlx5_flow_namespace *root_ns;
1137 struct mlx5_flow_table *fdb = NULL;
39ac237c
PB
1138 u32 flags = 0, *flow_group_in;
1139 int table_size, ix, err = 0;
1967ce6e
OG
1140 struct mlx5_flow_group *g;
1141 void *match_criteria;
f80be543 1142 u8 *dmac;
1967ce6e
OG
1143
1144 esw_debug(esw->dev, "Create offloads FDB Tables\n");
39ac237c 1145
1b9a07ee 1146 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1147 if (!flow_group_in)
1148 return -ENOMEM;
1149
1150 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1151 if (!root_ns) {
1152 esw_warn(dev, "Failed to get FDB flow namespace\n");
1153 err = -EOPNOTSUPP;
1154 goto ns_err;
1155 }
8463daf1
MG
1156 esw->fdb_table.offloads.ns = root_ns;
1157 err = mlx5_flow_namespace_set_mode(root_ns,
1158 esw->dev->priv.steering->mode);
1159 if (err) {
1160 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1161 goto ns_err;
1162 }
1967ce6e 1163
cd7e4186
BW
1164 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1165 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 1166
e52c2802
PB
1167 /* create the slow path fdb with encap set, so further table instances
1168 * can be created at run time while VFs are probed if the FW allows that.
1169 */
1170 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1171 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1172 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1173
1174 ft_attr.flags = flags;
b3ba5149
ES
1175 ft_attr.max_fte = table_size;
1176 ft_attr.prio = FDB_SLOW_PATH;
1177
1178 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1179 if (IS_ERR(fdb)) {
1180 err = PTR_ERR(fdb);
1181 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1182 goto slow_fdb_err;
1183 }
52fff327 1184 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1185
39ac237c
PB
1186 err = mlx5_esw_chains_create(esw);
1187 if (err) {
1188 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1189 goto fdb_chains_err;
e52c2802
PB
1190 }
1191
69697b6e 1192 /* create send-to-vport group */
69697b6e
OG
1193 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1194 MLX5_MATCH_MISC_PARAMETERS);
1195
1196 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1197
1198 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1199 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1200
cd3d07e7 1201 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1202 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1203 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1204
1205 g = mlx5_create_flow_group(fdb, flow_group_in);
1206 if (IS_ERR(g)) {
1207 err = PTR_ERR(g);
1208 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1209 goto send_vport_err;
1210 }
1211 esw->fdb_table.offloads.send_to_vport_grp = g;
1212
ac004b83
RD
1213 /* create peer esw miss group */
1214 memset(flow_group_in, 0, inlen);
ac004b83 1215
a5641cb5
JL
1216 esw_set_flow_group_source_port(esw, flow_group_in);
1217
1218 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1219 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1220 flow_group_in,
1221 match_criteria);
ac004b83 1222
a5641cb5
JL
1223 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1224 misc_parameters.source_eswitch_owner_vhca_id);
1225
1226 MLX5_SET(create_flow_group_in, flow_group_in,
1227 source_eswitch_owner_vhca_id_valid, 1);
1228 }
ac004b83 1229
ac004b83
RD
1230 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1231 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1232 ix + esw->total_vports - 1);
1233 ix += esw->total_vports;
1234
1235 g = mlx5_create_flow_group(fdb, flow_group_in);
1236 if (IS_ERR(g)) {
1237 err = PTR_ERR(g);
1238 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1239 goto peer_miss_err;
1240 }
1241 esw->fdb_table.offloads.peer_miss_grp = g;
1242
69697b6e
OG
1243 /* create miss group */
1244 memset(flow_group_in, 0, inlen);
f80be543
MB
1245 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1246 MLX5_MATCH_OUTER_HEADERS);
1247 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1248 match_criteria);
1249 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1250 outer_headers.dmac_47_16);
1251 dmac[0] = 0x01;
69697b6e
OG
1252
1253 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1254 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1255 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1256
1257 g = mlx5_create_flow_group(fdb, flow_group_in);
1258 if (IS_ERR(g)) {
1259 err = PTR_ERR(g);
1260 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1261 goto miss_err;
1262 }
1263 esw->fdb_table.offloads.miss_grp = g;
1264
3aa33572
OG
1265 err = esw_add_fdb_miss_rule(esw);
1266 if (err)
1267 goto miss_rule_err;
1268
e52c2802 1269 esw->nvports = nvports;
c88a026e 1270 kvfree(flow_group_in);
69697b6e
OG
1271 return 0;
1272
3aa33572
OG
1273miss_rule_err:
1274 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1275miss_err:
ac004b83
RD
1276 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1277peer_miss_err:
69697b6e
OG
1278 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1279send_vport_err:
39ac237c
PB
1280 mlx5_esw_chains_destroy(esw);
1281fdb_chains_err:
52fff327 1282 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1283slow_fdb_err:
8463daf1
MG
1284 /* Holds true only as long as DMFS is the default */
1285 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1286ns_err:
1287 kvfree(flow_group_in);
1288 return err;
1289}
1290
1967ce6e 1291static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1292{
e52c2802 1293 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1294 return;
1295
1967ce6e 1296 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1297 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1298 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1299 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1300 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1301 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1302
39ac237c 1303 mlx5_esw_chains_destroy(esw);
52fff327 1304 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
8463daf1
MG
1305 /* Holds true only as long as DMFS is the default */
1306 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1307 MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e 1308}
c116c6ee 1309
cd7e4186 1310static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1311{
b3ba5149 1312 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1313 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1314 struct mlx5_flow_table *ft_offloads;
1315 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1316 int err = 0;
1317
1318 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1319 if (!ns) {
1320 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1321 return -EOPNOTSUPP;
c116c6ee
OG
1322 }
1323
cd7e4186 1324 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
11b717d6 1325 ft_attr.prio = 1;
b3ba5149
ES
1326
1327 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1328 if (IS_ERR(ft_offloads)) {
1329 err = PTR_ERR(ft_offloads);
1330 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1331 return err;
1332 }
1333
1334 esw->offloads.ft_offloads = ft_offloads;
1335 return 0;
1336}
1337
1338static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1339{
1340 struct mlx5_esw_offload *offloads = &esw->offloads;
1341
1342 mlx5_destroy_flow_table(offloads->ft_offloads);
1343}
fed9ce22 1344
cd7e4186 1345static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1346{
1347 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1348 struct mlx5_flow_group *g;
fed9ce22 1349 u32 *flow_group_in;
fed9ce22 1350 int err = 0;
fed9ce22 1351
cd7e4186 1352 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1353 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1354 if (!flow_group_in)
1355 return -ENOMEM;
1356
1357 /* create vport rx group */
a5641cb5 1358 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1359
1360 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1361 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1362
1363 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1364
1365 if (IS_ERR(g)) {
1366 err = PTR_ERR(g);
1367 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1368 goto out;
1369 }
1370
1371 esw->offloads.vport_rx_group = g;
1372out:
e574978a 1373 kvfree(flow_group_in);
fed9ce22
OG
1374 return err;
1375}
1376
1377static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1378{
1379 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1380}
1381
74491de9 1382struct mlx5_flow_handle *
02f3afd9 1383mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1384 struct mlx5_flow_destination *dest)
fed9ce22 1385{
66958ed9 1386 struct mlx5_flow_act flow_act = {0};
74491de9 1387 struct mlx5_flow_handle *flow_rule;
c5bb1730 1388 struct mlx5_flow_spec *spec;
fed9ce22
OG
1389 void *misc;
1390
1b9a07ee 1391 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1392 if (!spec) {
fed9ce22
OG
1393 flow_rule = ERR_PTR(-ENOMEM);
1394 goto out;
1395 }
1396
a5641cb5
JL
1397 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1398 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1399 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1400 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1401
a5641cb5 1402 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
1403 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1404 mlx5_eswitch_get_vport_metadata_mask());
fed9ce22 1405
a5641cb5
JL
1406 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1407 } else {
1408 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1409 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1410
1411 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1412 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1413
1414 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1415 }
fed9ce22 1416
66958ed9 1417 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1418 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1419 &flow_act, dest, 1);
fed9ce22
OG
1420 if (IS_ERR(flow_rule)) {
1421 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1422 goto out;
1423 }
1424
1425out:
c5bb1730 1426 kvfree(spec);
fed9ce22
OG
1427 return flow_rule;
1428}
feae9087 1429
bf3347c4 1430
cc617ced
PP
1431static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
1432{
1433 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1434 struct mlx5_core_dev *dev = esw->dev;
1435 int vport;
1436
1437 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1438 return -EOPNOTSUPP;
1439
1440 if (esw->mode == MLX5_ESWITCH_NONE)
1441 return -EOPNOTSUPP;
1442
1443 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1444 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1445 mlx5_mode = MLX5_INLINE_MODE_NONE;
1446 goto out;
1447 case MLX5_CAP_INLINE_MODE_L2:
1448 mlx5_mode = MLX5_INLINE_MODE_L2;
1449 goto out;
1450 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1451 goto query_vports;
1452 }
1453
1454query_vports:
1455 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1456 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
1457 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1458 if (prev_mlx5_mode != mlx5_mode)
1459 return -EINVAL;
1460 prev_mlx5_mode = mlx5_mode;
1461 }
1462
1463out:
1464 *mode = mlx5_mode;
1465 return 0;
bf3347c4
DM
1466}
1467
11b717d6
PB
1468static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1469{
1470 struct mlx5_esw_offload *offloads = &esw->offloads;
1471
6724e66b 1472 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
11b717d6
PB
1473 mlx5_destroy_flow_group(offloads->restore_group);
1474 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1475}
1476
1477static int esw_create_restore_table(struct mlx5_eswitch *esw)
1478{
6724e66b 1479 u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
11b717d6
PB
1480 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1481 struct mlx5_flow_table_attr ft_attr = {};
1482 struct mlx5_core_dev *dev = esw->dev;
1483 struct mlx5_flow_namespace *ns;
6724e66b 1484 struct mlx5_modify_hdr *mod_hdr;
11b717d6
PB
1485 void *match_criteria, *misc;
1486 struct mlx5_flow_table *ft;
1487 struct mlx5_flow_group *g;
1488 u32 *flow_group_in;
1489 int err = 0;
1490
1491 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1492 if (!ns) {
1493 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1494 return -EOPNOTSUPP;
1495 }
1496
1497 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1498 if (!flow_group_in) {
1499 err = -ENOMEM;
1500 goto out_free;
1501 }
1502
1503 ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
1504 ft = mlx5_create_flow_table(ns, &ft_attr);
1505 if (IS_ERR(ft)) {
1506 err = PTR_ERR(ft);
1507 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
1508 err);
1509 goto out_free;
1510 }
1511
1512 memset(flow_group_in, 0, inlen);
1513 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1514 match_criteria);
1515 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
1516 misc_parameters_2);
1517
1518 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1519 ESW_CHAIN_TAG_METADATA_MASK);
1520 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1521 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1522 ft_attr.max_fte - 1);
1523 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1524 MLX5_MATCH_MISC_PARAMETERS_2);
1525 g = mlx5_create_flow_group(ft, flow_group_in);
1526 if (IS_ERR(g)) {
1527 err = PTR_ERR(g);
1528 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
1529 err);
1530 goto err_group;
1531 }
1532
6724e66b
PB
1533 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
1534 MLX5_SET(copy_action_in, modact, src_field,
1535 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1536 MLX5_SET(copy_action_in, modact, dst_field,
1537 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1538 mod_hdr = mlx5_modify_header_alloc(esw->dev,
1539 MLX5_FLOW_NAMESPACE_KERNEL, 1,
1540 modact);
1541 if (IS_ERR(mod_hdr)) {
1542 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
1543 err);
1544 err = PTR_ERR(mod_hdr);
1545 goto err_mod_hdr;
1546 }
1547
11b717d6
PB
1548 esw->offloads.ft_offloads_restore = ft;
1549 esw->offloads.restore_group = g;
6724e66b 1550 esw->offloads.restore_copy_hdr_id = mod_hdr;
11b717d6
PB
1551
1552 return 0;
1553
6724e66b
PB
1554err_mod_hdr:
1555 mlx5_destroy_flow_group(g);
11b717d6
PB
1556err_group:
1557 mlx5_destroy_flow_table(ft);
1558out_free:
1559 kvfree(flow_group_in);
1560
1561 return err;
cc617ced
PP
1562}
1563
db7ff19e
EB
1564static int esw_offloads_start(struct mlx5_eswitch *esw,
1565 struct netlink_ext_ack *extack)
c930a3ad 1566{
062f4bf4 1567 int err, err1;
c930a3ad 1568
f6455de0 1569 if (esw->mode != MLX5_ESWITCH_LEGACY &&
c96692fb 1570 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1571 NL_SET_ERR_MSG_MOD(extack,
1572 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1573 return -EINVAL;
1574 }
1575
383de108 1576 mlx5_eswitch_disable(esw, false);
062f4bf4
BW
1577 mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
1578 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
6c419ba8 1579 if (err) {
8c98ee77
EB
1580 NL_SET_ERR_MSG_MOD(extack,
1581 "Failed setting eswitch to offloads");
062f4bf4 1582 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
8c98ee77
EB
1583 if (err1) {
1584 NL_SET_ERR_MSG_MOD(extack,
1585 "Failed setting eswitch back to legacy");
1586 }
6c419ba8 1587 }
bffaa916
RD
1588 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1589 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
1590 &esw->offloads.inline_mode)) {
1591 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1592 NL_SET_ERR_MSG_MOD(extack,
1593 "Inline mode is different between vports");
bffaa916
RD
1594 }
1595 }
c930a3ad
OG
1596 return err;
1597}
1598
e8d31c4d
MB
1599void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1600{
1601 kfree(esw->offloads.vport_reps);
1602}
1603
1604int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1605{
2752b823 1606 int total_vports = esw->total_vports;
e8d31c4d 1607 struct mlx5_eswitch_rep *rep;
d6518db2 1608 int vport_index;
ef2e4094 1609 u8 rep_type;
e8d31c4d 1610
2aca1787 1611 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1612 sizeof(struct mlx5_eswitch_rep),
1613 GFP_KERNEL);
1614 if (!esw->offloads.vport_reps)
1615 return -ENOMEM;
1616
d6518db2
BW
1617 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1618 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 1619 rep->vport_index = vport_index;
f121e0ea
BW
1620
1621 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 1622 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 1623 REP_UNREGISTERED);
e8d31c4d
MB
1624 }
1625
e8d31c4d
MB
1626 return 0;
1627}
1628
c9b99abc
BW
1629static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1630 struct mlx5_eswitch_rep *rep, u8 rep_type)
1631{
8693115a 1632 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1633 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 1634 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
1635}
1636
4110fc59 1637static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1638{
1639 struct mlx5_eswitch_rep *rep;
4110fc59
BW
1640 int i;
1641
1642 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
1643 __esw_offloads_unload_rep(esw, rep, rep_type);
c9b99abc 1644
81cd229c
BW
1645 if (mlx5_ecpf_vport_exists(esw->dev)) {
1646 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1647 __esw_offloads_unload_rep(esw, rep, rep_type);
1648 }
1649
1650 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1651 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1652 __esw_offloads_unload_rep(esw, rep, rep_type);
1653 }
1654
879c8f84 1655 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1656 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1657}
1658
c2d7712c 1659int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
a4b97ab4 1660{
c2d7712c
BW
1661 struct mlx5_eswitch_rep *rep;
1662 int rep_type;
a4b97ab4
MB
1663 int err;
1664
c2d7712c
BW
1665 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1666 return 0;
a4b97ab4 1667
c2d7712c
BW
1668 rep = mlx5_eswitch_get_rep(esw, vport_num);
1669 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1670 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1671 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1672 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1673 if (err)
1674 goto err_reps;
1675 }
1676
1677 return 0;
a4b97ab4
MB
1678
1679err_reps:
c2d7712c
BW
1680 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
1681 for (--rep_type; rep_type >= 0; rep_type--)
1682 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1683 return err;
1684}
1685
c2d7712c
BW
1686void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
1687{
1688 struct mlx5_eswitch_rep *rep;
1689 int rep_type;
1690
1691 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1692 return;
1693
1694 rep = mlx5_eswitch_get_rep(esw, vport_num);
1695 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
1696 __esw_offloads_unload_rep(esw, rep, rep_type);
1697}
1698
ac004b83
RD
1699#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1700#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1701
1702static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1703 struct mlx5_eswitch *peer_esw)
1704{
1705 int err;
1706
1707 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1708 if (err)
1709 return err;
1710
1711 return 0;
1712}
1713
1714static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1715{
04de7dda 1716 mlx5e_tc_clean_fdb_peer_flows(esw);
ac004b83
RD
1717 esw_del_fdb_peer_miss_rules(esw);
1718}
1719
8463daf1
MG
1720static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1721 struct mlx5_eswitch *peer_esw,
1722 bool pair)
1723{
1724 struct mlx5_flow_root_namespace *peer_ns;
1725 struct mlx5_flow_root_namespace *ns;
1726 int err;
1727
1728 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1729 ns = esw->dev->priv.steering->fdb_root_ns;
1730
1731 if (pair) {
1732 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1733 if (err)
1734 return err;
1735
e53e6655 1736 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
1737 if (err) {
1738 mlx5_flow_namespace_set_peer(ns, NULL);
1739 return err;
1740 }
1741 } else {
1742 mlx5_flow_namespace_set_peer(ns, NULL);
1743 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1744 }
1745
1746 return 0;
1747}
1748
ac004b83
RD
1749static int mlx5_esw_offloads_devcom_event(int event,
1750 void *my_data,
1751 void *event_data)
1752{
1753 struct mlx5_eswitch *esw = my_data;
ac004b83 1754 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 1755 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
1756 int err;
1757
1758 switch (event) {
1759 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
1760 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1761 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1762 break;
1763
8463daf1 1764 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
1765 if (err)
1766 goto err_out;
8463daf1
MG
1767 err = mlx5_esw_offloads_pair(esw, peer_esw);
1768 if (err)
1769 goto err_peer;
ac004b83
RD
1770
1771 err = mlx5_esw_offloads_pair(peer_esw, esw);
1772 if (err)
1773 goto err_pair;
1774
1775 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1776 break;
1777
1778 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1779 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1780 break;
1781
1782 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1783 mlx5_esw_offloads_unpair(peer_esw);
1784 mlx5_esw_offloads_unpair(esw);
8463daf1 1785 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1786 break;
1787 }
1788
1789 return 0;
1790
1791err_pair:
1792 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
1793err_peer:
1794 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1795err_out:
1796 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1797 event, err);
1798 return err;
1799}
1800
1801static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1802{
1803 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1804
04de7dda
RD
1805 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1806 mutex_init(&esw->offloads.peer_mutex);
1807
ac004b83
RD
1808 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1809 return;
1810
1811 mlx5_devcom_register_component(devcom,
1812 MLX5_DEVCOM_ESW_OFFLOADS,
1813 mlx5_esw_offloads_devcom_event,
1814 esw);
1815
1816 mlx5_devcom_send_event(devcom,
1817 MLX5_DEVCOM_ESW_OFFLOADS,
1818 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1819}
1820
1821static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1822{
1823 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1824
1825 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1826 return;
1827
1828 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1829 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1830
1831 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1832}
1833
18486737
EB
1834static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1835 struct mlx5_vport *vport)
1836{
18486737
EB
1837 struct mlx5_flow_act flow_act = {0};
1838 struct mlx5_flow_spec *spec;
1839 int err = 0;
1840
1841 /* For prio tag mode, there is only 1 FTEs:
7445cfb1
JL
1842 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1843 * required, allow
18486737
EB
1844 * Unmatched traffic is allowed by default
1845 */
18486737 1846 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
b7826076
PP
1847 if (!spec)
1848 return -ENOMEM;
18486737
EB
1849
1850 /* Untagged packets - push prio tag VLAN, allow */
1851 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1852 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1853 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1854 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1855 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1856 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1857 flow_act.vlan[0].vid = 0;
1858 flow_act.vlan[0].prio = 0;
7445cfb1 1859
d68316b5 1860 if (vport->ingress.offloads.modify_metadata_rule) {
7445cfb1 1861 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
d68316b5 1862 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
7445cfb1
JL
1863 }
1864
18486737
EB
1865 vport->ingress.allow_rule =
1866 mlx5_add_flow_rules(vport->ingress.acl, spec,
1867 &flow_act, NULL, 0);
1868 if (IS_ERR(vport->ingress.allow_rule)) {
1869 err = PTR_ERR(vport->ingress.allow_rule);
1870 esw_warn(esw->dev,
1871 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1872 vport->vport, err);
1873 vport->ingress.allow_rule = NULL;
18486737
EB
1874 }
1875
18486737 1876 kvfree(spec);
18486737
EB
1877 return err;
1878}
1879
7445cfb1
JL
1880static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1881 struct mlx5_vport *vport)
1882{
1883 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
1884 struct mlx5_flow_act flow_act = {};
7445cfb1 1885 int err = 0;
0f0d3827
PB
1886 u32 key;
1887
1888 key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
1889 key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
7445cfb1
JL
1890
1891 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
0f0d3827
PB
1892 MLX5_SET(set_action_in, action, field,
1893 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1894 MLX5_SET(set_action_in, action, data, key);
1895 MLX5_SET(set_action_in, action, offset,
1896 ESW_SOURCE_PORT_METADATA_OFFSET);
1897 MLX5_SET(set_action_in, action, length,
1898 ESW_SOURCE_PORT_METADATA_BITS);
7445cfb1 1899
d68316b5 1900 vport->ingress.offloads.modify_metadata =
2b688ea5
MG
1901 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1902 1, action);
d68316b5
PP
1903 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
1904 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
7445cfb1
JL
1905 esw_warn(esw->dev,
1906 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1907 vport->vport, err);
1908 return err;
1909 }
1910
1911 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
d68316b5
PP
1912 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1913 vport->ingress.offloads.modify_metadata_rule =
1914 mlx5_add_flow_rules(vport->ingress.acl,
5c2aa8ae 1915 NULL, &flow_act, NULL, 0);
d68316b5
PP
1916 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
1917 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
7445cfb1
JL
1918 esw_warn(esw->dev,
1919 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1920 vport->vport, err);
b7826076 1921 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
d68316b5 1922 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1 1923 }
7445cfb1
JL
1924 return err;
1925}
1926
a962d7a6
PP
1927static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1928 struct mlx5_vport *vport)
7445cfb1 1929{
d68316b5
PP
1930 if (vport->ingress.offloads.modify_metadata_rule) {
1931 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
1932 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
7445cfb1 1933
d68316b5 1934 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1
JL
1935 }
1936}
1937
10652f39
PP
1938static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
1939 struct mlx5_vport *vport)
18486737 1940{
10652f39
PP
1941 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1942 struct mlx5_flow_group *g;
b7826076 1943 void *match_criteria;
10652f39 1944 u32 *flow_group_in;
b7826076 1945 u32 flow_index = 0;
10652f39 1946 int ret = 0;
18486737 1947
10652f39
PP
1948 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1949 if (!flow_group_in)
1950 return -ENOMEM;
18486737 1951
b7826076
PP
1952 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
1953 /* This group is to hold FTE to match untagged packets when prio_tag
1954 * is enabled.
1955 */
1956 memset(flow_group_in, 0, inlen);
18486737 1957
b7826076
PP
1958 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1959 flow_group_in, match_criteria);
1960 MLX5_SET(create_flow_group_in, flow_group_in,
1961 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1962 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1963 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1964 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1965
1966 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1967 if (IS_ERR(g)) {
1968 ret = PTR_ERR(g);
1969 esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
1970 vport->vport, ret);
1971 goto prio_tag_err;
1972 }
1973 vport->ingress.offloads.metadata_prio_tag_grp = g;
1974 flow_index++;
1975 }
1976
1977 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1978 /* This group holds an FTE with no matches for add metadata for
1979 * tagged packets, if prio-tag is enabled (as a fallthrough),
1980 * or all traffic in case prio-tag is disabled.
1981 */
1982 memset(flow_group_in, 0, inlen);
1983 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1984 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1985
1986 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1987 if (IS_ERR(g)) {
1988 ret = PTR_ERR(g);
1989 esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
1990 vport->vport, ret);
1991 goto metadata_err;
1992 }
1993 vport->ingress.offloads.metadata_allmatch_grp = g;
1994 }
1995
1996 kvfree(flow_group_in);
1997 return 0;
1998
1999metadata_err:
2000 if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
2001 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
2002 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
18486737 2003 }
b7826076 2004prio_tag_err:
10652f39
PP
2005 kvfree(flow_group_in);
2006 return ret;
2007}
18486737 2008
10652f39
PP
2009static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
2010{
b7826076
PP
2011 if (vport->ingress.offloads.metadata_allmatch_grp) {
2012 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
2013 vport->ingress.offloads.metadata_allmatch_grp = NULL;
2014 }
2015
2016 if (vport->ingress.offloads.metadata_prio_tag_grp) {
2017 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
2018 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
10652f39 2019 }
18486737
EB
2020}
2021
b1a3380a
VP
2022static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
2023 struct mlx5_vport *vport)
18486737 2024{
b7826076 2025 int num_ftes = 0;
18486737
EB
2026 int err;
2027
7445cfb1 2028 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
b7826076 2029 !esw_check_ingress_prio_tag_enabled(esw, vport))
7445cfb1
JL
2030 return 0;
2031
2032 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076
PP
2033
2034 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2035 num_ftes++;
2036 if (esw_check_ingress_prio_tag_enabled(esw, vport))
2037 num_ftes++;
2038
2039 err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
7445cfb1
JL
2040 if (err) {
2041 esw_warn(esw->dev,
2042 "failed to enable ingress acl (%d) on vport[%d]\n",
2043 err, vport->vport);
2044 return err;
2045 }
2046
10652f39
PP
2047 err = esw_vport_create_ingress_acl_group(esw, vport);
2048 if (err)
2049 goto group_err;
2050
7445cfb1
JL
2051 esw_debug(esw->dev,
2052 "vport[%d] configure ingress rules\n", vport->vport);
2053
2054 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2055 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
2056 if (err)
10652f39 2057 goto metadata_err;
7445cfb1
JL
2058 }
2059
b7826076 2060 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
786ef904 2061 err = esw_vport_ingress_prio_tag_config(esw, vport);
18486737 2062 if (err)
10652f39 2063 goto prio_tag_err;
7445cfb1 2064 }
10652f39 2065 return 0;
7445cfb1 2066
10652f39
PP
2067prio_tag_err:
2068 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2069metadata_err:
10652f39
PP
2070 esw_vport_destroy_ingress_acl_group(vport);
2071group_err:
2072 esw_vport_destroy_ingress_acl_table(vport);
7445cfb1
JL
2073 return err;
2074}
2075
6d94e610
VP
2076static int esw_vport_egress_config(struct mlx5_eswitch *esw,
2077 struct mlx5_vport *vport)
2078{
2079 int err;
2080
2081 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
2082 return 0;
2083
2084 esw_vport_cleanup_egress_rules(esw, vport);
2085
2086 err = esw_vport_enable_egress_acl(esw, vport);
2087 if (err)
2088 return err;
2089
fdde49e0
PP
2090 /* For prio tag mode, there is only 1 FTEs:
2091 * 1) prio tag packets - pop the prio tag VLAN, allow
2092 * Unmatched traffic is allowed by default
2093 */
2094 esw_debug(esw->dev,
2095 "vport[%d] configure prio tag egress rules\n", vport->vport);
6d94e610 2096
fdde49e0
PP
2097 /* prio tag vlan rule - pop it so VF receives untagged packets */
2098 err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
2099 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
2100 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
7445cfb1 2101 if (err)
6d94e610
VP
2102 esw_vport_disable_egress_acl(esw, vport);
2103
7445cfb1
JL
2104 return err;
2105}
2106
92ab1eb3
JL
2107static bool
2108esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2109{
2110 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2111 return false;
2112
2113 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2114 MLX5_FDB_TO_VPORT_REG_C_0))
2115 return false;
2116
2117 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2118 return false;
2119
2120 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2121 mlx5_ecpf_vport_exists(esw->dev))
2122 return false;
2123
2124 return true;
2125}
2126
1e62e222
MD
2127static bool
2128esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
2129{
2130 return mlx5_core_mp_enabled(esw->dev);
2131}
2132
2133static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
2134{
2135 return esw_check_vport_match_metadata_mandatory(esw) &&
2136 esw_check_vport_match_metadata_supported(esw);
2137}
2138
748da30b 2139int
89a0f1fb
PP
2140esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2141 struct mlx5_vport *vport)
7445cfb1 2142{
7445cfb1
JL
2143 int err;
2144
89a0f1fb
PP
2145 err = esw_vport_ingress_config(esw, vport);
2146 if (err)
2147 return err;
7445cfb1 2148
89a0f1fb
PP
2149 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
2150 err = esw_vport_egress_config(esw, vport);
a962d7a6 2151 if (err) {
10652f39 2152 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076
PP
2153 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2154 esw_vport_destroy_ingress_acl_group(vport);
10652f39 2155 esw_vport_destroy_ingress_acl_table(vport);
7445cfb1 2156 }
18486737 2157 }
89a0f1fb
PP
2158 return err;
2159}
18486737 2160
748da30b 2161void
89a0f1fb
PP
2162esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2163 struct mlx5_vport *vport)
2164{
2165 esw_vport_disable_egress_acl(esw, vport);
10652f39 2166 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076 2167 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
10652f39
PP
2168 esw_vport_destroy_ingress_acl_group(vport);
2169 esw_vport_destroy_ingress_acl_table(vport);
89a0f1fb 2170}
7445cfb1 2171
748da30b 2172static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
2173{
2174 struct mlx5_vport *vport;
7445cfb1 2175 int err;
18486737 2176
1e62e222 2177 if (esw_use_vport_metadata(esw))
92ab1eb3 2178 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737 2179
748da30b
VP
2180 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2181 err = esw_vport_create_offloads_acl_tables(esw, vport);
2182 if (err)
2183 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
2184 return err;
2185}
2186
748da30b 2187static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 2188{
786ef904 2189 struct mlx5_vport *vport;
7445cfb1 2190
748da30b
VP
2191 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2192 esw_vport_destroy_offloads_acl_tables(esw, vport);
7445cfb1 2193 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
2194}
2195
062f4bf4 2196static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 2197{
062f4bf4
BW
2198 int num_vfs = esw->esw_funcs.num_vfs;
2199 int total_vports;
6ed1803a
MB
2200 int err;
2201
062f4bf4
BW
2202 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2203 total_vports = esw->total_vports;
2204 else
2205 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2206
5c1d260e 2207 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
e52c2802 2208
748da30b 2209 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1
JL
2210 if (err)
2211 return err;
18486737 2212
11b717d6 2213 err = esw_create_offloads_table(esw, total_vports);
c930a3ad 2214 if (err)
11b717d6 2215 goto create_offloads_err;
c930a3ad 2216
11b717d6 2217 err = esw_create_restore_table(esw);
c930a3ad 2218 if (err)
11b717d6
PB
2219 goto create_restore_err;
2220
2221 err = esw_create_offloads_fdb_tables(esw, total_vports);
2222 if (err)
2223 goto create_fdb_err;
c930a3ad 2224
062f4bf4 2225 err = esw_create_vport_rx_group(esw, total_vports);
c930a3ad
OG
2226 if (err)
2227 goto create_fg_err;
2228
96e32687
EC
2229 mutex_init(&esw->fdb_table.offloads.vports.lock);
2230 hash_init(esw->fdb_table.offloads.vports.table);
2231
c930a3ad
OG
2232 return 0;
2233
2234create_fg_err:
1967ce6e 2235 esw_destroy_offloads_fdb_tables(esw);
7445cfb1 2236create_fdb_err:
11b717d6
PB
2237 esw_destroy_restore_table(esw);
2238create_restore_err:
2239 esw_destroy_offloads_table(esw);
2240create_offloads_err:
748da30b 2241 esw_destroy_uplink_offloads_acl_tables(esw);
7445cfb1 2242
c930a3ad
OG
2243 return err;
2244}
2245
eca8cc38
BW
2246static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2247{
96e32687 2248 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
eca8cc38 2249 esw_destroy_vport_rx_group(esw);
eca8cc38 2250 esw_destroy_offloads_fdb_tables(esw);
11b717d6
PB
2251 esw_destroy_restore_table(esw);
2252 esw_destroy_offloads_table(esw);
748da30b 2253 esw_destroy_uplink_offloads_acl_tables(esw);
eca8cc38
BW
2254}
2255
7e736f9a
PP
2256static void
2257esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 2258{
5ccf2770 2259 bool host_pf_disabled;
7e736f9a 2260 u16 new_num_vfs;
a3888f33 2261
7e736f9a
PP
2262 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2263 host_params_context.host_num_of_vfs);
5ccf2770
BW
2264 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2265 host_params_context.host_pf_disabled);
a3888f33 2266
7e736f9a
PP
2267 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2268 return;
a3888f33
BW
2269
2270 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929 2271 if (esw->esw_funcs.num_vfs > 0) {
23bb50cf 2272 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
a3888f33 2273 } else {
7e736f9a 2274 int err;
a3888f33 2275
23bb50cf
BW
2276 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2277 MLX5_VPORT_UC_ADDR_CHANGE);
a3888f33 2278 if (err)
7e736f9a 2279 return;
a3888f33 2280 }
7e736f9a 2281 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
2282}
2283
7e736f9a 2284static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 2285{
7e736f9a
PP
2286 struct mlx5_host_work *host_work;
2287 struct mlx5_eswitch *esw;
dd28087c 2288 const u32 *out;
ac35dcd6 2289
7e736f9a
PP
2290 host_work = container_of(work, struct mlx5_host_work, work);
2291 esw = host_work->esw;
a3888f33 2292
dd28087c
PP
2293 out = mlx5_esw_query_functions(esw->dev);
2294 if (IS_ERR(out))
7e736f9a 2295 goto out;
a3888f33 2296
7e736f9a 2297 esw_vfs_changed_event_handler(esw, out);
dd28087c 2298 kvfree(out);
a3888f33 2299out:
ac35dcd6
VP
2300 kfree(host_work);
2301}
2302
16fff98a 2303int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 2304{
cd56f929 2305 struct mlx5_esw_functions *esw_funcs;
a3888f33 2306 struct mlx5_host_work *host_work;
a3888f33
BW
2307 struct mlx5_eswitch *esw;
2308
2309 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2310 if (!host_work)
2311 return NOTIFY_DONE;
2312
cd56f929
VP
2313 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2314 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2315
2316 host_work->esw = esw;
2317
062f4bf4 2318 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2319 queue_work(esw->work_queue, &host_work->work);
2320
2321 return NOTIFY_OK;
2322}
2323
5896b972 2324int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38 2325{
3b83b6c2
DL
2326 struct mlx5_vport *vport;
2327 int err, i;
eca8cc38 2328
9a64144d
MG
2329 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2330 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2331 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2332 else
2333 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2334
8463daf1 2335 mlx5_rdma_enable_roce(esw->dev);
062f4bf4 2336 err = esw_offloads_steering_init(esw);
eca8cc38 2337 if (err)
8463daf1 2338 goto err_steering_init;
eca8cc38 2339
332bd3a5
PP
2340 err = esw_set_passing_vport_metadata(esw, true);
2341 if (err)
2342 goto err_vport_metadata;
c1286050 2343
3b83b6c2
DL
2344 /* Representor will control the vport link state */
2345 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2346 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2347
c2d7712c
BW
2348 /* Uplink vport rep must load first. */
2349 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
925a6acc 2350 if (err)
c2d7712c 2351 goto err_uplink;
c1286050 2352
c2d7712c 2353 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
eca8cc38 2354 if (err)
c2d7712c 2355 goto err_vports;
eca8cc38
BW
2356
2357 esw_offloads_devcom_init(esw);
10caabda 2358 mutex_init(&esw->offloads.termtbl_mutex);
a3888f33 2359
eca8cc38
BW
2360 return 0;
2361
925a6acc 2362err_vports:
c2d7712c
BW
2363 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2364err_uplink:
332bd3a5 2365 esw_set_passing_vport_metadata(esw, false);
c1286050 2366err_vport_metadata:
eca8cc38 2367 esw_offloads_steering_cleanup(esw);
8463daf1
MG
2368err_steering_init:
2369 mlx5_rdma_disable_roce(esw->dev);
eca8cc38
BW
2370 return err;
2371}
2372
db7ff19e
EB
2373static int esw_offloads_stop(struct mlx5_eswitch *esw,
2374 struct netlink_ext_ack *extack)
c930a3ad 2375{
062f4bf4 2376 int err, err1;
c930a3ad 2377
383de108 2378 mlx5_eswitch_disable(esw, false);
062f4bf4 2379 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
6c419ba8 2380 if (err) {
8c98ee77 2381 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
062f4bf4 2382 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
8c98ee77
EB
2383 if (err1) {
2384 NL_SET_ERR_MSG_MOD(extack,
2385 "Failed setting eswitch back to offloads");
2386 }
6c419ba8 2387 }
c930a3ad
OG
2388
2389 return err;
2390}
2391
5896b972 2392void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 2393{
ac004b83 2394 esw_offloads_devcom_cleanup(esw);
5896b972 2395 mlx5_eswitch_disable_pf_vf_vports(esw);
c2d7712c 2396 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
332bd3a5 2397 esw_set_passing_vport_metadata(esw, false);
eca8cc38 2398 esw_offloads_steering_cleanup(esw);
8463daf1 2399 mlx5_rdma_disable_roce(esw->dev);
9a64144d 2400 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2401}
2402
ef78618b 2403static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2404{
2405 switch (mode) {
2406 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2407 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2408 break;
2409 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2410 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2411 break;
2412 default:
2413 return -EINVAL;
2414 }
2415
2416 return 0;
2417}
2418
ef78618b
OG
2419static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2420{
2421 switch (mlx5_mode) {
f6455de0 2422 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2423 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2424 break;
f6455de0 2425 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2426 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2427 break;
2428 default:
2429 return -EINVAL;
2430 }
2431
2432 return 0;
2433}
2434
bffaa916
RD
2435static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2436{
2437 switch (mode) {
2438 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2439 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2440 break;
2441 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2442 *mlx5_mode = MLX5_INLINE_MODE_L2;
2443 break;
2444 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2445 *mlx5_mode = MLX5_INLINE_MODE_IP;
2446 break;
2447 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2448 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2449 break;
2450 default:
2451 return -EINVAL;
2452 }
2453
2454 return 0;
2455}
2456
2457static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2458{
2459 switch (mlx5_mode) {
2460 case MLX5_INLINE_MODE_NONE:
2461 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2462 break;
2463 case MLX5_INLINE_MODE_L2:
2464 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2465 break;
2466 case MLX5_INLINE_MODE_IP:
2467 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2468 break;
2469 case MLX5_INLINE_MODE_TCP_UDP:
2470 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2471 break;
2472 default:
2473 return -EINVAL;
2474 }
2475
2476 return 0;
2477}
2478
9d1cef19 2479static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 2480{
9d1cef19 2481 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 2482
9d1cef19
OG
2483 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2484 return -EOPNOTSUPP;
c930a3ad 2485
733d3e54
OG
2486 if(!MLX5_ESWITCH_MANAGER(dev))
2487 return -EPERM;
c930a3ad 2488
f6455de0 2489 if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
c96692fb 2490 !mlx5_core_is_ecpf_esw_manager(dev))
c930a3ad
OG
2491 return -EOPNOTSUPP;
2492
9d1cef19
OG
2493 return 0;
2494}
2495
db7ff19e
EB
2496int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2497 struct netlink_ext_ack *extack)
9d1cef19
OG
2498{
2499 struct mlx5_core_dev *dev = devlink_priv(devlink);
2500 u16 cur_mlx5_mode, mlx5_mode = 0;
2501 int err;
2502
2503 err = mlx5_devlink_eswitch_check(devlink);
2504 if (err)
2505 return err;
2506
2507 cur_mlx5_mode = dev->priv.eswitch->mode;
2508
ef78618b 2509 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2510 return -EINVAL;
2511
2512 if (cur_mlx5_mode == mlx5_mode)
2513 return 0;
2514
2515 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 2516 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 2517 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 2518 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
2519 else
2520 return -EINVAL;
feae9087
OG
2521}
2522
2523int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2524{
9d1cef19
OG
2525 struct mlx5_core_dev *dev = devlink_priv(devlink);
2526 int err;
c930a3ad 2527
9d1cef19
OG
2528 err = mlx5_devlink_eswitch_check(devlink);
2529 if (err)
2530 return err;
c930a3ad 2531
ef78618b 2532 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 2533}
127ea380 2534
db7ff19e
EB
2535int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2536 struct netlink_ext_ack *extack)
bffaa916
RD
2537{
2538 struct mlx5_core_dev *dev = devlink_priv(devlink);
2539 struct mlx5_eswitch *esw = dev->priv.eswitch;
db68cc56 2540 int err, vport, num_vport;
bffaa916
RD
2541 u8 mlx5_mode;
2542
9d1cef19
OG
2543 err = mlx5_devlink_eswitch_check(devlink);
2544 if (err)
2545 return err;
bffaa916 2546
c415f704
OG
2547 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2548 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2549 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2550 return 0;
2551 /* fall through */
2552 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2553 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 2554 return -EOPNOTSUPP;
c415f704
OG
2555 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2556 break;
2557 }
bffaa916 2558
525e84be 2559 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2560 NL_SET_ERR_MSG_MOD(extack,
2561 "Can't set inline mode when flows are configured");
375f51e2
RD
2562 return -EOPNOTSUPP;
2563 }
2564
bffaa916
RD
2565 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2566 if (err)
2567 goto out;
2568
411ec9e0 2569 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2570 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2571 if (err) {
8c98ee77
EB
2572 NL_SET_ERR_MSG_MOD(extack,
2573 "Failed to set min inline on vport");
bffaa916
RD
2574 goto revert_inline_mode;
2575 }
2576 }
2577
2578 esw->offloads.inline_mode = mlx5_mode;
2579 return 0;
2580
2581revert_inline_mode:
db68cc56 2582 num_vport = --vport;
411ec9e0 2583 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
2584 mlx5_modify_nic_vport_min_inline(dev,
2585 vport,
2586 esw->offloads.inline_mode);
2587out:
2588 return err;
2589}
2590
2591int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2592{
2593 struct mlx5_core_dev *dev = devlink_priv(devlink);
2594 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2595 int err;
bffaa916 2596
9d1cef19
OG
2597 err = mlx5_devlink_eswitch_check(devlink);
2598 if (err)
2599 return err;
bffaa916 2600
bffaa916
RD
2601 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2602}
2603
98fdbea5
LR
2604int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2605 enum devlink_eswitch_encap_mode encap,
db7ff19e 2606 struct netlink_ext_ack *extack)
7768d197
RD
2607{
2608 struct mlx5_core_dev *dev = devlink_priv(devlink);
2609 struct mlx5_eswitch *esw = dev->priv.eswitch;
2610 int err;
2611
9d1cef19
OG
2612 err = mlx5_devlink_eswitch_check(devlink);
2613 if (err)
2614 return err;
7768d197
RD
2615
2616 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2617 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
2618 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2619 return -EOPNOTSUPP;
2620
2621 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2622 return -EOPNOTSUPP;
2623
f6455de0 2624 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197
RD
2625 esw->offloads.encap = encap;
2626 return 0;
2627 }
2628
2629 if (esw->offloads.encap == encap)
2630 return 0;
2631
525e84be 2632 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2633 NL_SET_ERR_MSG_MOD(extack,
2634 "Can't set encapsulation when flows are configured");
7768d197
RD
2635 return -EOPNOTSUPP;
2636 }
2637
e52c2802 2638 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2639
2640 esw->offloads.encap = encap;
e52c2802
PB
2641
2642 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2643
7768d197 2644 if (err) {
8c98ee77
EB
2645 NL_SET_ERR_MSG_MOD(extack,
2646 "Failed re-creating fast FDB table");
7768d197 2647 esw->offloads.encap = !encap;
e52c2802 2648 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2649 }
e52c2802 2650
7768d197
RD
2651 return err;
2652}
2653
98fdbea5
LR
2654int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2655 enum devlink_eswitch_encap_mode *encap)
7768d197
RD
2656{
2657 struct mlx5_core_dev *dev = devlink_priv(devlink);
2658 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2659 int err;
7768d197 2660
9d1cef19
OG
2661 err = mlx5_devlink_eswitch_check(devlink);
2662 if (err)
2663 return err;
7768d197
RD
2664
2665 *encap = esw->offloads.encap;
2666 return 0;
2667}
2668
c2d7712c
BW
2669static bool
2670mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
2671{
2672 /* Currently, only ECPF based device has representor for host PF. */
2673 if (vport_num == MLX5_VPORT_PF &&
2674 !mlx5_core_is_ecpf_esw_manager(esw->dev))
2675 return false;
2676
2677 if (vport_num == MLX5_VPORT_ECPF &&
2678 !mlx5_ecpf_vport_exists(esw->dev))
2679 return false;
2680
2681 return true;
2682}
2683
f8e8fa02 2684void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 2685 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 2686 u8 rep_type)
127ea380 2687{
8693115a 2688 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
2689 struct mlx5_eswitch_rep *rep;
2690 int i;
9deb2241 2691
8693115a 2692 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 2693 mlx5_esw_for_all_reps(esw, i, rep) {
c2d7712c
BW
2694 if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
2695 rep_data = &rep->rep_data[rep_type];
2696 atomic_set(&rep_data->state, REP_REGISTERED);
2697 }
f8e8fa02 2698 }
127ea380 2699}
f8e8fa02 2700EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2701
f8e8fa02 2702void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2703{
cb67b832 2704 struct mlx5_eswitch_rep *rep;
f8e8fa02 2705 int i;
cb67b832 2706
f6455de0 2707 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 2708 __unload_reps_all_vport(esw, rep_type);
127ea380 2709
f8e8fa02 2710 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 2711 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 2712}
f8e8fa02 2713EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2714
a4b97ab4 2715void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2716{
726293f1
HHZ
2717 struct mlx5_eswitch_rep *rep;
2718
879c8f84 2719 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 2720 return rep->rep_data[rep_type].priv;
726293f1 2721}
22215908
MB
2722
2723void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 2724 u16 vport,
22215908
MB
2725 u8 rep_type)
2726{
22215908
MB
2727 struct mlx5_eswitch_rep *rep;
2728
879c8f84 2729 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2730
8693115a
PP
2731 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2732 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2733 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
2734 return NULL;
2735}
57cbd893 2736EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2737
2738void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2739{
879c8f84 2740 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2741}
57cbd893
MB
2742EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2743
2744struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 2745 u16 vport)
57cbd893 2746{
879c8f84 2747 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2748}
2749EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
2750
2751bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2752{
2753 return vport_num >= MLX5_VPORT_FIRST_VF &&
2754 vport_num <= esw->dev->priv.sriov.max_vfs;
2755}
7445cfb1 2756
5b7cb745
PB
2757bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
2758{
2759 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
2760}
2761EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
2762
7445cfb1
JL
2763bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2764{
2765 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2766}
2767EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2768
0f0d3827 2769u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
7445cfb1
JL
2770 u16 vport_num)
2771{
0f0d3827
PB
2772 u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0);
2773 u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
2774 u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
2775 u32 val;
2776
2777 /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
2778 WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
2779
2780 /* Trim vhca_id to ESW_VHCA_ID_BITS */
2781 vhca_id &= vhca_id_mask;
2782
2783 /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
2784 * don't overlap with VF numbers, and themselves, after trimming.
2785 */
2786 WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) <
2787 vport_num_mask - 1);
2788 WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) <
2789 vport_num_mask - 1);
2790 WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) ==
2791 (MLX5_VPORT_ECPF & vport_num_mask));
2792
2793 /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
2794 * overlap with pf and ecpf.
2795 */
2796 if (vport_num != MLX5_VPORT_UPLINK &&
2797 vport_num != MLX5_VPORT_ECPF)
2798 WARN_ON_ONCE(vport_num >= vport_num_mask - 1);
2799
2800 /* We can now trim vport_num to ESW_VPORT_BITS */
2801 vport_num &= vport_num_mask;
2802
2803 val = (vhca_id << ESW_VPORT_BITS) | vport_num;
2804 return val << (32 - ESW_SOURCE_PORT_METADATA_BITS);
7445cfb1
JL
2805}
2806EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);