]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5e: Remove redundant match on tunnel destination mac
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
133dcfc5 34#include <linux/idr.h>
69697b6e
OG
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/mlx5_ifc.h>
37#include <linux/mlx5/vport.h>
38#include <linux/mlx5/fs.h>
39#include "mlx5_core.h"
40#include "eswitch.h"
34ca6535 41#include "esw/indir_table.h"
ea651a86 42#include "esw/acl/ofld.h"
80f09dfc 43#include "rdma.h"
e52c2802
PB
44#include "en.h"
45#include "fs_core.h"
ac004b83 46#include "lib/devcom.h"
a3888f33 47#include "lib/eq.h"
ae430332 48#include "lib/fs_chains.h"
c620b772 49#include "en_tc.h"
69697b6e 50
cd7e4186
BW
51/* There are two match-all miss flows, one for unicast dst mac and
52 * one for multicast.
53 */
54#define MLX5_ESW_MISS_FLOWS (2)
c9b99abc
BW
55#define UPLINK_REP_INDEX 0
56
96e32687
EC
57/* Per vport tables */
58
59#define MLX5_ESW_VPORT_TABLE_SIZE 128
60
61/* This struct is used as a key to the hash table and we need it to be packed
62 * so hash result is consistent
63 */
64struct mlx5_vport_key {
65 u32 chain;
66 u16 prio;
67 u16 vport;
68 u16 vhca_id;
69} __packed;
70
c620b772
AL
71struct mlx5_vport_tbl_attr {
72 u16 chain;
73 u16 prio;
74 u16 vport;
75};
76
96e32687
EC
77struct mlx5_vport_table {
78 struct hlist_node hlist;
79 struct mlx5_flow_table *fdb;
80 u32 num_rules;
81 struct mlx5_vport_key key;
82};
83
87dac697
JL
84#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
85
96e32687
EC
86static struct mlx5_flow_table *
87esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
88{
89 struct mlx5_flow_table_attr ft_attr = {};
90 struct mlx5_flow_table *fdb;
91
87dac697 92 ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
96e32687
EC
93 ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
94 ft_attr.prio = FDB_PER_VPORT;
95 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
96 if (IS_ERR(fdb)) {
97 esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
98 PTR_ERR(fdb));
99 }
100
101 return fdb;
102}
103
104static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
c620b772 105 struct mlx5_vport_tbl_attr *attr,
96e32687
EC
106 struct mlx5_vport_key *key)
107{
c620b772 108 key->vport = attr->vport;
96e32687
EC
109 key->chain = attr->chain;
110 key->prio = attr->prio;
111 key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
112 return jhash(key, sizeof(*key), 0);
113}
114
115/* caller must hold vports.lock */
116static struct mlx5_vport_table *
117esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
118{
119 struct mlx5_vport_table *e;
120
121 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
122 if (!memcmp(&e->key, skey, sizeof(*skey)))
123 return e;
124
125 return NULL;
126}
127
128static void
c620b772 129esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
96e32687
EC
130{
131 struct mlx5_vport_table *e;
132 struct mlx5_vport_key key;
133 u32 hkey;
134
135 mutex_lock(&esw->fdb_table.offloads.vports.lock);
136 hkey = flow_attr_to_vport_key(esw, attr, &key);
137 e = esw_vport_tbl_lookup(esw, &key, hkey);
138 if (!e || --e->num_rules)
139 goto out;
140
141 hash_del(&e->hlist);
142 mlx5_destroy_flow_table(e->fdb);
143 kfree(e);
144out:
145 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
146}
147
148static struct mlx5_flow_table *
c620b772 149esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
96e32687
EC
150{
151 struct mlx5_core_dev *dev = esw->dev;
152 struct mlx5_flow_namespace *ns;
153 struct mlx5_flow_table *fdb;
154 struct mlx5_vport_table *e;
155 struct mlx5_vport_key skey;
156 u32 hkey;
157
158 mutex_lock(&esw->fdb_table.offloads.vports.lock);
159 hkey = flow_attr_to_vport_key(esw, attr, &skey);
160 e = esw_vport_tbl_lookup(esw, &skey, hkey);
161 if (e) {
162 e->num_rules++;
163 goto out;
164 }
165
166 e = kzalloc(sizeof(*e), GFP_KERNEL);
167 if (!e) {
168 fdb = ERR_PTR(-ENOMEM);
169 goto err_alloc;
170 }
171
172 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
173 if (!ns) {
174 esw_warn(dev, "Failed to get FDB namespace\n");
175 fdb = ERR_PTR(-ENOENT);
176 goto err_ns;
177 }
178
179 fdb = esw_vport_tbl_create(esw, ns);
180 if (IS_ERR(fdb))
181 goto err_ns;
182
183 e->fdb = fdb;
184 e->num_rules = 1;
185 e->key = skey;
186 hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
187out:
188 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
189 return e->fdb;
190
191err_ns:
192 kfree(e);
193err_alloc:
194 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
195 return fdb;
196}
197
198int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
199{
c620b772 200 struct mlx5_vport_tbl_attr attr;
96e32687
EC
201 struct mlx5_flow_table *fdb;
202 struct mlx5_vport *vport;
203 int i;
204
c620b772 205 attr.chain = 0;
96e32687 206 attr.prio = 1;
96e32687 207 mlx5_esw_for_all_vports(esw, i, vport) {
c620b772 208 attr.vport = vport->vport;
96e32687 209 fdb = esw_vport_tbl_get(esw, &attr);
d9fb932f 210 if (IS_ERR(fdb))
96e32687
EC
211 goto out;
212 }
213 return 0;
214
215out:
216 mlx5_esw_vport_tbl_put(esw);
217 return PTR_ERR(fdb);
218}
219
220void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
221{
c620b772 222 struct mlx5_vport_tbl_attr attr;
96e32687
EC
223 struct mlx5_vport *vport;
224 int i;
225
c620b772 226 attr.chain = 0;
96e32687 227 attr.prio = 1;
96e32687 228 mlx5_esw_for_all_vports(esw, i, vport) {
c620b772 229 attr.vport = vport->vport;
96e32687
EC
230 esw_vport_tbl_put(esw, &attr);
231 }
232}
233
234/* End: Per vport tables */
235
879c8f84
BW
236static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
237 u16 vport_num)
238{
02f3afd9 239 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
240
241 WARN_ON(idx > esw->total_vports - 1);
242 return &esw->offloads.vport_reps[idx];
243}
244
6f7bbad1
JL
245static void
246mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
247 struct mlx5_flow_spec *spec,
248 struct mlx5_esw_flow_attr *attr)
249{
250 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
036e19b9
HI
251 attr && attr->in_rep)
252 spec->flow_context.flow_source =
253 attr->in_rep->vport == MLX5_VPORT_UPLINK ?
254 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
255 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
6f7bbad1 256}
b7826076 257
c01cfd0f
JL
258static void
259mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
260 struct mlx5_flow_spec *spec,
b055ecf5
MB
261 struct mlx5_eswitch *src_esw,
262 u16 vport)
c01cfd0f
JL
263{
264 void *misc2;
265 void *misc;
266
267 /* Use metadata matching because vport is not represented by single
268 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
269 */
270 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
271 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
272 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
b055ecf5
MB
273 mlx5_eswitch_get_vport_metadata_for_match(src_esw,
274 vport));
c01cfd0f
JL
275
276 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
277 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
278 mlx5_eswitch_get_vport_metadata_mask());
c01cfd0f
JL
279
280 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
c01cfd0f
JL
281 } else {
282 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
b055ecf5 283 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
c01cfd0f
JL
284
285 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
286 MLX5_SET(fte_match_set_misc, misc,
287 source_eswitch_owner_vhca_id,
b055ecf5 288 MLX5_CAP_GEN(src_esw->dev, vhca_id));
c01cfd0f
JL
289
290 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
291 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
292 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
293 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
294 source_eswitch_owner_vhca_id);
295
296 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
297 }
c01cfd0f
JL
298}
299
9e51c0a6
VB
300static void
301esw_setup_ft_dest(struct mlx5_flow_destination *dest,
302 struct mlx5_flow_act *flow_act,
303 struct mlx5_flow_attr *attr,
304 int i)
305{
306 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
307 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
308 dest[i].ft = attr->dest_ft;
309}
310
311static void
312esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
313 struct mlx5_flow_act *flow_act,
314 struct mlx5_fs_chains *chains,
315 int i)
316{
317 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
318 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
319 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
320}
321
322static int
323esw_setup_chain_dest(struct mlx5_flow_destination *dest,
324 struct mlx5_flow_act *flow_act,
325 struct mlx5_fs_chains *chains,
326 u32 chain, u32 prio, u32 level,
327 int i)
328{
329 struct mlx5_flow_table *ft;
330
331 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
332 ft = mlx5_chains_get_table(chains, chain, prio, level);
333 if (IS_ERR(ft))
334 return PTR_ERR(ft);
335
336 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
337 dest[i].ft = ft;
338 return 0;
339}
340
10742efc
VB
341static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
342 int from, int to)
343{
344 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
345 struct mlx5_fs_chains *chains = esw_chains(esw);
346 int i;
347
348 for (i = from; i < to; i++)
349 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
350 mlx5_chains_put_table(chains, 0, 1, 0);
351}
352
353static bool
354esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
355{
356 int i;
357
358 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
359 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
360 return true;
361 return false;
362}
363
364static int
365esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
366 struct mlx5_flow_act *flow_act,
367 struct mlx5_eswitch *esw,
368 struct mlx5_fs_chains *chains,
369 struct mlx5_flow_attr *attr,
370 int *i)
371{
372 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
373 int j, err;
374
375 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
376 return -EOPNOTSUPP;
377
378 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
379 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
380 if (err)
381 goto err_setup_chain;
382 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
383 flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
384 }
385 return 0;
386
387err_setup_chain:
388 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
389 return err;
390}
391
392static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
393 struct mlx5_flow_attr *attr)
394{
395 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
396
397 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
398}
399
9e51c0a6
VB
400static void
401esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
402{
403 mlx5_chains_put_table(chains, chain, prio, level);
404}
405
406static void
407esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
408 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
409 int attr_idx, int dest_idx, bool pkt_reformat)
410{
411 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
412 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
413 dest[dest_idx].vport.vhca_id =
414 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
415 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
416 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
417 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
418 if (pkt_reformat) {
419 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
420 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
421 }
422 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
423 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
424 }
425}
426
427static int
428esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
429 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
430 int i)
431{
432 int j;
433
434 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
435 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
436 return i;
437}
438
439static int
440esw_setup_dests(struct mlx5_flow_destination *dest,
441 struct mlx5_flow_act *flow_act,
442 struct mlx5_eswitch *esw,
443 struct mlx5_flow_attr *attr,
10742efc 444 struct mlx5_flow_spec *spec,
9e51c0a6
VB
445 int *i)
446{
447 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
448 struct mlx5_fs_chains *chains = esw_chains(esw);
449 int err = 0;
450
10742efc
VB
451 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
452 MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
453 mlx5_eswitch_vport_match_metadata_enabled(esw))
454 attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
455
9e51c0a6
VB
456 if (attr->dest_ft) {
457 esw_setup_ft_dest(dest, flow_act, attr, *i);
458 (*i)++;
459 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
460 esw_setup_slow_path_dest(dest, flow_act, chains, *i);
461 (*i)++;
462 } else if (attr->dest_chain) {
463 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
464 1, 0, *i);
465 (*i)++;
10742efc
VB
466 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
467 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
9e51c0a6
VB
468 } else {
469 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
470 }
471
472 return err;
473}
474
475static void
476esw_cleanup_dests(struct mlx5_eswitch *esw,
477 struct mlx5_flow_attr *attr)
478{
10742efc 479 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
9e51c0a6
VB
480 struct mlx5_fs_chains *chains = esw_chains(esw);
481
10742efc
VB
482 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
483 if (attr->dest_chain)
484 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
485 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
486 esw_cleanup_chain_src_port_rewrite(esw, attr);
487 }
9e51c0a6
VB
488}
489
74491de9 490struct mlx5_flow_handle *
3d80d1a2
OG
491mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
492 struct mlx5_flow_spec *spec,
c620b772 493 struct mlx5_flow_attr *attr)
3d80d1a2 494{
592d3651 495 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 496 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
c620b772 497 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
ae430332 498 struct mlx5_fs_chains *chains = esw_chains(esw);
c620b772
AL
499 bool split = !!(esw_attr->split_count);
500 struct mlx5_vport_tbl_attr fwd_attr;
74491de9 501 struct mlx5_flow_handle *rule;
e52c2802 502 struct mlx5_flow_table *fdb;
9e51c0a6 503 int i = 0;
3d80d1a2 504
f6455de0 505 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
506 return ERR_PTR(-EOPNOTSUPP);
507
6acfbf38
OG
508 flow_act.action = attr->action;
509 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 510 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
511 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
512 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
513 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
c620b772
AL
514 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
515 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
516 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
cc495188 517 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
c620b772
AL
518 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
519 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
520 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
cc495188 521 }
6acfbf38 522 }
776b12b6 523
10742efc
VB
524 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
525
66958ed9 526 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
9e51c0a6
VB
527 int err;
528
10742efc 529 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
9e51c0a6
VB
530 if (err) {
531 rule = ERR_PTR(err);
532 goto err_create_goto_table;
56e858df 533 }
e37a79e5 534 }
14e6b038 535
c620b772
AL
536 if (esw_attr->decap_pkt_reformat)
537 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
14e6b038 538
66958ed9 539 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 540 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 541 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 542 i++;
3d80d1a2
OG
543 }
544
93b3586e 545 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 546 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
547 if (attr->inner_match_level != MLX5_MATCH_NONE)
548 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 549
aa24670e 550 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 551 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 552
96e32687 553 if (split) {
c620b772
AL
554 fwd_attr.chain = attr->chain;
555 fwd_attr.prio = attr->prio;
556 fwd_attr.vport = esw_attr->in_rep->vport;
557
558 fdb = esw_vport_tbl_get(esw, &fwd_attr);
96e32687 559 } else {
d18296ff 560 if (attr->chain || attr->prio)
ae430332
AL
561 fdb = mlx5_chains_get_table(chains, attr->chain,
562 attr->prio, 0);
d18296ff 563 else
c620b772 564 fdb = attr->ft;
6fb0701a
PB
565
566 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
b055ecf5
MB
567 mlx5_eswitch_set_rule_source_port(esw, spec,
568 esw_attr->in_mdev->priv.eswitch,
569 esw_attr->in_rep->vport);
96e32687 570 }
e52c2802
PB
571 if (IS_ERR(fdb)) {
572 rule = ERR_CAST(fdb);
573 goto err_esw_get;
574 }
575
84be2fda 576 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
c620b772 577 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
10caabda 578 &flow_act, dest, i);
84be2fda 579 else
10caabda 580 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 581 if (IS_ERR(rule))
e52c2802 582 goto err_add_rule;
375f51e2 583 else
525e84be 584 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 585
e52c2802
PB
586 return rule;
587
588err_add_rule:
96e32687 589 if (split)
c620b772 590 esw_vport_tbl_put(esw, &fwd_attr);
d18296ff 591 else if (attr->chain || attr->prio)
ae430332 592 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
e52c2802 593err_esw_get:
9e51c0a6 594 esw_cleanup_dests(esw, attr);
e52c2802 595err_create_goto_table:
aa0cbbae 596 return rule;
3d80d1a2
OG
597}
598
e4ad91f2
CM
599struct mlx5_flow_handle *
600mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
601 struct mlx5_flow_spec *spec,
c620b772 602 struct mlx5_flow_attr *attr)
e4ad91f2
CM
603{
604 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 605 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
c620b772 606 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
ae430332 607 struct mlx5_fs_chains *chains = esw_chains(esw);
c620b772 608 struct mlx5_vport_tbl_attr fwd_attr;
e52c2802
PB
609 struct mlx5_flow_table *fast_fdb;
610 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 611 struct mlx5_flow_handle *rule;
10742efc 612 int i, err = 0;
e4ad91f2 613
ae430332 614 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
e52c2802
PB
615 if (IS_ERR(fast_fdb)) {
616 rule = ERR_CAST(fast_fdb);
617 goto err_get_fast;
618 }
619
c620b772
AL
620 fwd_attr.chain = attr->chain;
621 fwd_attr.prio = attr->prio;
622 fwd_attr.vport = esw_attr->in_rep->vport;
623 fwd_fdb = esw_vport_tbl_get(esw, &fwd_attr);
e52c2802
PB
624 if (IS_ERR(fwd_fdb)) {
625 rule = ERR_CAST(fwd_fdb);
626 goto err_get_fwd;
627 }
628
e4ad91f2 629 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
10742efc
VB
630 for (i = 0; i < esw_attr->split_count; i++) {
631 if (esw_is_chain_src_port_rewrite(esw, esw_attr))
632 err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
633 &i);
634 else
635 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
636
637 if (err) {
638 rule = ERR_PTR(err);
639 goto err_chain_src_rewrite;
640 }
641 }
e4ad91f2 642 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
873d2f12 643 dest[i].ft = fwd_fdb;
e4ad91f2
CM
644 i++;
645
b055ecf5
MB
646 mlx5_eswitch_set_rule_source_port(esw, spec,
647 esw_attr->in_mdev->priv.eswitch,
648 esw_attr->in_rep->vport);
e4ad91f2 649
93b3586e 650 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 651 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 652
278d51f2 653 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
e52c2802 654 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 655
10742efc
VB
656 if (IS_ERR(rule)) {
657 i = esw_attr->split_count;
658 goto err_chain_src_rewrite;
659 }
e4ad91f2 660
525e84be 661 atomic64_inc(&esw->offloads.num_flows);
e52c2802
PB
662
663 return rule;
10742efc
VB
664err_chain_src_rewrite:
665 esw_put_dest_tables_loop(esw, attr, 0, i);
c620b772 666 esw_vport_tbl_put(esw, &fwd_attr);
e52c2802 667err_get_fwd:
ae430332 668 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
e52c2802 669err_get_fast:
e4ad91f2
CM
670 return rule;
671}
672
e52c2802
PB
673static void
674__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
675 struct mlx5_flow_handle *rule,
c620b772 676 struct mlx5_flow_attr *attr,
e52c2802
PB
677 bool fwd_rule)
678{
c620b772 679 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
ae430332 680 struct mlx5_fs_chains *chains = esw_chains(esw);
c620b772
AL
681 bool split = (esw_attr->split_count > 0);
682 struct mlx5_vport_tbl_attr fwd_attr;
10caabda 683 int i;
e52c2802
PB
684
685 mlx5_del_flow_rules(rule);
10caabda 686
84be2fda 687 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
d8a2034f
EC
688 /* unref the term table */
689 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
c620b772
AL
690 if (esw_attr->dests[i].termtbl)
691 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
d8a2034f 692 }
10caabda
OS
693 }
694
525e84be 695 atomic64_dec(&esw->offloads.num_flows);
e52c2802 696
c620b772
AL
697 if (fwd_rule || split) {
698 fwd_attr.chain = attr->chain;
699 fwd_attr.prio = attr->prio;
700 fwd_attr.vport = esw_attr->in_rep->vport;
701 }
702
e52c2802 703 if (fwd_rule) {
c620b772 704 esw_vport_tbl_put(esw, &fwd_attr);
ae430332 705 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
10742efc 706 esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
e52c2802 707 } else {
96e32687 708 if (split)
c620b772 709 esw_vport_tbl_put(esw, &fwd_attr);
d18296ff 710 else if (attr->chain || attr->prio)
ae430332 711 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
9e51c0a6 712 esw_cleanup_dests(esw, attr);
e52c2802
PB
713 }
714}
715
d85cdccb
OG
716void
717mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
718 struct mlx5_flow_handle *rule,
c620b772 719 struct mlx5_flow_attr *attr)
d85cdccb 720{
e52c2802 721 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
722}
723
48265006
OG
724void
725mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
726 struct mlx5_flow_handle *rule,
c620b772 727 struct mlx5_flow_attr *attr)
48265006 728{
e52c2802 729 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
730}
731
f5f82476
OG
732static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
733{
734 struct mlx5_eswitch_rep *rep;
411ec9e0 735 int i, err = 0;
f5f82476
OG
736
737 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 738 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 739 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
740 continue;
741
742 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
743 if (err)
744 goto out;
745 }
746
747out:
748 return err;
749}
750
751static struct mlx5_eswitch_rep *
752esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
753{
754 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
755
756 in_rep = attr->in_rep;
df65a573 757 out_rep = attr->dests[0].rep;
f5f82476
OG
758
759 if (push)
760 vport = in_rep;
761 else if (pop)
762 vport = out_rep;
763 else
764 vport = in_rep;
765
766 return vport;
767}
768
769static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
770 bool push, bool pop, bool fwd)
771{
772 struct mlx5_eswitch_rep *in_rep, *out_rep;
773
774 if ((push || pop) && !fwd)
775 goto out_notsupp;
776
777 in_rep = attr->in_rep;
df65a573 778 out_rep = attr->dests[0].rep;
f5f82476 779
b05af6aa 780 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
781 goto out_notsupp;
782
b05af6aa 783 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
784 goto out_notsupp;
785
786 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
787 if (!push && !pop && fwd)
b05af6aa 788 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
789 goto out_notsupp;
790
791 /* protects against (1) setting rules with different vlans to push and
792 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
793 */
1482bd3d 794 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
795 goto out_notsupp;
796
797 return 0;
798
799out_notsupp:
9eb78923 800 return -EOPNOTSUPP;
f5f82476
OG
801}
802
803int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
c620b772 804 struct mlx5_flow_attr *attr)
f5f82476
OG
805{
806 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
c620b772 807 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
f5f82476
OG
808 struct mlx5_eswitch_rep *vport = NULL;
809 bool push, pop, fwd;
810 int err = 0;
811
6acfbf38 812 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 813 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
814 return 0;
815
f5f82476
OG
816 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
817 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
818 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
819 !attr->dest_chain);
f5f82476 820
0e18134f
VB
821 mutex_lock(&esw->state_lock);
822
c620b772 823 err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
f5f82476 824 if (err)
0e18134f 825 goto unlock;
f5f82476 826
39ac237c 827 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476 828
c620b772 829 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
f5f82476
OG
830
831 if (!push && !pop && fwd) {
832 /* tracks VF --> wire rules without vlan push action */
c620b772 833 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476 834 vport->vlan_refcount++;
39ac237c 835 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
836 }
837
0e18134f 838 goto unlock;
f5f82476
OG
839 }
840
841 if (!push && !pop)
0e18134f 842 goto unlock;
f5f82476
OG
843
844 if (!(offloads->vlan_push_pop_refcount)) {
845 /* it's the 1st vlan rule, apply global vlan pop policy */
846 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
847 if (err)
848 goto out;
849 }
850 offloads->vlan_push_pop_refcount++;
851
852 if (push) {
853 if (vport->vlan_refcount)
854 goto skip_set_push;
855
c620b772
AL
856 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
857 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
f5f82476
OG
858 if (err)
859 goto out;
c620b772 860 vport->vlan = esw_attr->vlan_vid[0];
f5f82476
OG
861skip_set_push:
862 vport->vlan_refcount++;
863 }
864out:
865 if (!err)
39ac237c 866 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
0e18134f
VB
867unlock:
868 mutex_unlock(&esw->state_lock);
f5f82476
OG
869 return err;
870}
871
872int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
c620b772 873 struct mlx5_flow_attr *attr)
f5f82476
OG
874{
875 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
c620b772 876 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
f5f82476
OG
877 struct mlx5_eswitch_rep *vport = NULL;
878 bool push, pop, fwd;
879 int err = 0;
880
6acfbf38 881 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 882 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
883 return 0;
884
39ac237c 885 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
f5f82476
OG
886 return 0;
887
888 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
889 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
890 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
891
0e18134f
VB
892 mutex_lock(&esw->state_lock);
893
c620b772 894 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
f5f82476
OG
895
896 if (!push && !pop && fwd) {
897 /* tracks VF --> wire rules without vlan push action */
c620b772 898 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
899 vport->vlan_refcount--;
900
0e18134f 901 goto out;
f5f82476
OG
902 }
903
904 if (push) {
905 vport->vlan_refcount--;
906 if (vport->vlan_refcount)
907 goto skip_unset_push;
908
909 vport->vlan = 0;
910 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
911 0, 0, SET_VLAN_STRIP);
912 if (err)
913 goto out;
914 }
915
916skip_unset_push:
917 offloads->vlan_push_pop_refcount--;
918 if (offloads->vlan_push_pop_refcount)
0e18134f 919 goto out;
f5f82476
OG
920
921 /* no more vlan rules, stop global vlan pop policy */
922 err = esw_set_global_vlan_pop(esw, 0);
923
924out:
0e18134f 925 mutex_unlock(&esw->state_lock);
f5f82476
OG
926 return err;
927}
928
f7a68945 929struct mlx5_flow_handle *
02f3afd9
PP
930mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
931 u32 sqn)
ab22be9b 932{
66958ed9 933 struct mlx5_flow_act flow_act = {0};
4c5009c5 934 struct mlx5_flow_destination dest = {};
74491de9 935 struct mlx5_flow_handle *flow_rule;
c5bb1730 936 struct mlx5_flow_spec *spec;
ab22be9b
OG
937 void *misc;
938
1b9a07ee 939 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 940 if (!spec) {
ab22be9b
OG
941 flow_rule = ERR_PTR(-ENOMEM);
942 goto out;
943 }
944
c5bb1730 945 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 946 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
947 /* source vport is the esw manager */
948 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 949
c5bb1730 950 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
951 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
952 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
953
c5bb1730 954 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 955 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 956 dest.vport.num = vport;
66958ed9 957 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 958
39ac237c
PB
959 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
960 spec, &flow_act, &dest, 1);
ab22be9b
OG
961 if (IS_ERR(flow_rule))
962 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
963out:
c5bb1730 964 kvfree(spec);
ab22be9b
OG
965 return flow_rule;
966}
57cbd893 967EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 968
159fe639
MB
969void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
970{
971 mlx5_del_flow_rules(rule);
972}
973
5b7cb745
PB
974static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
975{
976 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
977 MLX5_FDB_TO_VPORT_REG_C_1;
978}
979
332bd3a5 980static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
981{
982 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
e08a6832
LR
983 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
984 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
5b7cb745 985 u8 curr, wanted;
c1286050
JL
986 int err;
987
5b7cb745
PB
988 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
989 !mlx5_eswitch_vport_match_metadata_enabled(esw))
332bd3a5 990 return 0;
c1286050 991
e08a6832
LR
992 MLX5_SET(query_esw_vport_context_in, in, opcode,
993 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
994 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
c1286050
JL
995 if (err)
996 return err;
997
5b7cb745
PB
998 curr = MLX5_GET(query_esw_vport_context_out, out,
999 esw_vport_context.fdb_to_vport_reg_c_id);
1000 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
1001 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
1002 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
c1286050 1003
332bd3a5 1004 if (enable)
5b7cb745 1005 curr |= wanted;
332bd3a5 1006 else
5b7cb745 1007 curr &= ~wanted;
c1286050 1008
e08a6832 1009 MLX5_SET(modify_esw_vport_context_in, min,
5b7cb745 1010 esw_vport_context.fdb_to_vport_reg_c_id, curr);
e08a6832 1011 MLX5_SET(modify_esw_vport_context_in, min,
c1286050
JL
1012 field_select.fdb_to_vport_reg_c_id, 1);
1013
e08a6832 1014 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
5b7cb745
PB
1015 if (!err) {
1016 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
1017 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1018 else
1019 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1020 }
1021
1022 return err;
c1286050
JL
1023}
1024
a5641cb5
JL
1025static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1026 struct mlx5_core_dev *peer_dev,
ac004b83
RD
1027 struct mlx5_flow_spec *spec,
1028 struct mlx5_flow_destination *dest)
1029{
a5641cb5 1030 void *misc;
ac004b83 1031
a5641cb5
JL
1032 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1033 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1034 misc_parameters_2);
0f0d3827
PB
1035 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1036 mlx5_eswitch_get_vport_metadata_mask());
ac004b83 1037
a5641cb5
JL
1038 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1039 } else {
1040 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1041 misc_parameters);
ac004b83 1042
a5641cb5
JL
1043 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1044 MLX5_CAP_GEN(peer_dev, vhca_id));
1045
1046 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1047
1048 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1049 misc_parameters);
1050 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1051 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1052 source_eswitch_owner_vhca_id);
1053 }
ac004b83
RD
1054
1055 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1056 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 1057 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 1058 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
1059}
1060
a5641cb5
JL
1061static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1062 struct mlx5_eswitch *peer_esw,
1063 struct mlx5_flow_spec *spec,
1064 u16 vport)
1065{
1066 void *misc;
1067
1068 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1069 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1070 misc_parameters_2);
1071 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1072 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1073 vport));
1074 } else {
1075 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1076 misc_parameters);
1077 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1078 }
1079}
1080
ac004b83
RD
1081static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1082 struct mlx5_core_dev *peer_dev)
1083{
1084 struct mlx5_flow_destination dest = {};
1085 struct mlx5_flow_act flow_act = {0};
1086 struct mlx5_flow_handle **flows;
1087 struct mlx5_flow_handle *flow;
1088 struct mlx5_flow_spec *spec;
1089 /* total vports is the same for both e-switches */
1090 int nvports = esw->total_vports;
1091 void *misc;
1092 int err, i;
1093
1094 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1095 if (!spec)
1096 return -ENOMEM;
1097
a5641cb5 1098 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
1099
1100 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
1101 if (!flows) {
1102 err = -ENOMEM;
1103 goto alloc_flows_err;
1104 }
1105
1106 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1107 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1108 misc_parameters);
1109
81cd229c 1110 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
1111 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1112 spec, MLX5_VPORT_PF);
1113
81cd229c
BW
1114 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1115 spec, &flow_act, &dest, 1);
1116 if (IS_ERR(flow)) {
1117 err = PTR_ERR(flow);
1118 goto add_pf_flow_err;
1119 }
1120 flows[MLX5_VPORT_PF] = flow;
1121 }
1122
1123 if (mlx5_ecpf_vport_exists(esw->dev)) {
1124 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1125 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1126 spec, &flow_act, &dest, 1);
1127 if (IS_ERR(flow)) {
1128 err = PTR_ERR(flow);
1129 goto add_ecpf_flow_err;
1130 }
1131 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
1132 }
1133
786ef904 1134 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
1135 esw_set_peer_miss_rule_source_port(esw,
1136 peer_dev->priv.eswitch,
1137 spec, i);
1138
ac004b83
RD
1139 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1140 spec, &flow_act, &dest, 1);
1141 if (IS_ERR(flow)) {
1142 err = PTR_ERR(flow);
81cd229c 1143 goto add_vf_flow_err;
ac004b83
RD
1144 }
1145 flows[i] = flow;
1146 }
1147
1148 esw->fdb_table.offloads.peer_miss_rules = flows;
1149
1150 kvfree(spec);
1151 return 0;
1152
81cd229c 1153add_vf_flow_err:
879c8f84 1154 nvports = --i;
786ef904 1155 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 1156 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
1157
1158 if (mlx5_ecpf_vport_exists(esw->dev))
1159 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
1160add_ecpf_flow_err:
1161 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
1162 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
1163add_pf_flow_err:
1164 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
1165 kvfree(flows);
1166alloc_flows_err:
1167 kvfree(spec);
1168 return err;
1169}
1170
1171static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
1172{
1173 struct mlx5_flow_handle **flows;
1174 int i;
1175
1176 flows = esw->fdb_table.offloads.peer_miss_rules;
1177
786ef904
PP
1178 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
1179 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
1180 mlx5_del_flow_rules(flows[i]);
1181
81cd229c
BW
1182 if (mlx5_ecpf_vport_exists(esw->dev))
1183 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
1184
1185 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
1186 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
1187
ac004b83
RD
1188 kvfree(flows);
1189}
1190
3aa33572
OG
1191static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1192{
66958ed9 1193 struct mlx5_flow_act flow_act = {0};
4c5009c5 1194 struct mlx5_flow_destination dest = {};
74491de9 1195 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 1196 struct mlx5_flow_spec *spec;
f80be543
MB
1197 void *headers_c;
1198 void *headers_v;
3aa33572 1199 int err = 0;
f80be543
MB
1200 u8 *dmac_c;
1201 u8 *dmac_v;
3aa33572 1202
1b9a07ee 1203 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1204 if (!spec) {
3aa33572
OG
1205 err = -ENOMEM;
1206 goto out;
1207 }
1208
f80be543
MB
1209 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1210 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1211 outer_headers);
1212 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1213 outer_headers.dmac_47_16);
1214 dmac_c[0] = 0x01;
1215
3aa33572 1216 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1217 dest.vport.num = esw->manager_vport;
66958ed9 1218 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 1219
39ac237c
PB
1220 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1221 spec, &flow_act, &dest, 1);
3aa33572
OG
1222 if (IS_ERR(flow_rule)) {
1223 err = PTR_ERR(flow_rule);
f80be543 1224 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
1225 goto out;
1226 }
1227
f80be543
MB
1228 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1229
1230 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1231 outer_headers);
1232 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1233 outer_headers.dmac_47_16);
1234 dmac_v[0] = 0x01;
39ac237c
PB
1235 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1236 spec, &flow_act, &dest, 1);
f80be543
MB
1237 if (IS_ERR(flow_rule)) {
1238 err = PTR_ERR(flow_rule);
1239 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1240 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1241 goto out;
1242 }
1243
1244 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1245
3aa33572 1246out:
c5bb1730 1247 kvfree(spec);
3aa33572
OG
1248 return err;
1249}
1250
11b717d6
PB
1251struct mlx5_flow_handle *
1252esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1253{
1254 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1255 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1256 struct mlx5_flow_context *flow_context;
1257 struct mlx5_flow_handle *flow_rule;
1258 struct mlx5_flow_destination dest;
1259 struct mlx5_flow_spec *spec;
1260 void *misc;
1261
60acc105
PB
1262 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1263 return ERR_PTR(-EOPNOTSUPP);
1264
11b717d6
PB
1265 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1266 if (!spec)
1267 return ERR_PTR(-ENOMEM);
1268
1269 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1270 misc_parameters_2);
1271 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1272 ESW_CHAIN_TAG_METADATA_MASK);
1273 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1274 misc_parameters_2);
1275 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1276 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
6724e66b
PB
1277 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1278 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1279 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
11b717d6
PB
1280
1281 flow_context = &spec->flow_context;
1282 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1283 flow_context->flow_tag = tag;
1284 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1285 dest.ft = esw->offloads.ft_offloads;
1286
1287 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1288 kfree(spec);
1289
1290 if (IS_ERR(flow_rule))
1291 esw_warn(esw->dev,
1292 "Failed to create restore rule for tag: %d, err(%d)\n",
1293 tag, (int)PTR_ERR(flow_rule));
1294
1295 return flow_rule;
1296}
1297
1298u32
1299esw_get_max_restore_tag(struct mlx5_eswitch *esw)
1300{
1301 return ESW_CHAIN_TAG_METADATA_MASK;
1302}
1303
1967ce6e 1304#define MAX_PF_SQ 256
cd3d07e7 1305#define MAX_SQ_NVPORTS 32
1967ce6e 1306
a5641cb5
JL
1307static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1308 u32 *flow_group_in)
1309{
1310 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1311 flow_group_in,
1312 match_criteria);
1313
1314 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1315 MLX5_SET(create_flow_group_in, flow_group_in,
1316 match_criteria_enable,
1317 MLX5_MATCH_MISC_PARAMETERS_2);
1318
0f0d3827
PB
1319 MLX5_SET(fte_match_param, match_criteria,
1320 misc_parameters_2.metadata_reg_c_0,
1321 mlx5_eswitch_get_vport_metadata_mask());
a5641cb5
JL
1322 } else {
1323 MLX5_SET(create_flow_group_in, flow_group_in,
1324 match_criteria_enable,
1325 MLX5_MATCH_MISC_PARAMETERS);
1326
1327 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1328 misc_parameters.source_port);
1329 }
1330}
1331
ae430332
AL
1332#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
1333#define fdb_modify_header_fwd_to_table_supported(esw) \
1334 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1335static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1336{
1337 struct mlx5_core_dev *dev = esw->dev;
1338
1339 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1340 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1341
1342 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1343 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1344 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1345 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1346 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1347 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1348 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1349 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1350 /* Disabled when ttl workaround is needed, e.g
1351 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1352 */
1353 esw_warn(dev,
1354 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1355 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1356 } else {
1357 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1358 esw_info(dev, "Supported tc chains and prios offload\n");
1359 }
1360
1361 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1362 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1363}
1364
1365static int
1366esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1367{
1368 struct mlx5_core_dev *dev = esw->dev;
1369 struct mlx5_flow_table *nf_ft, *ft;
1370 struct mlx5_chains_attr attr = {};
1371 struct mlx5_fs_chains *chains;
1372 u32 fdb_max;
1373 int err;
1374
1375 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1376
1377 esw_init_chains_offload_flags(esw, &attr.flags);
1378 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1379 attr.max_ft_sz = fdb_max;
1380 attr.max_grp_num = esw->params.large_group_num;
1381 attr.default_ft = miss_fdb;
1382 attr.max_restore_tag = esw_get_max_restore_tag(esw);
1383
1384 chains = mlx5_chains_create(dev, &attr);
1385 if (IS_ERR(chains)) {
1386 err = PTR_ERR(chains);
1387 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1388 return err;
1389 }
1390
1391 esw->fdb_table.offloads.esw_chains_priv = chains;
1392
1393 /* Create tc_end_ft which is the always created ft chain */
1394 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1395 1, 0);
1396 if (IS_ERR(nf_ft)) {
1397 err = PTR_ERR(nf_ft);
1398 goto nf_ft_err;
1399 }
1400
1401 /* Always open the root for fast path */
1402 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1403 if (IS_ERR(ft)) {
1404 err = PTR_ERR(ft);
1405 goto level_0_err;
1406 }
1407
1408 /* Open level 1 for split fdb rules now if prios isn't supported */
1409 if (!mlx5_chains_prios_supported(chains)) {
1410 err = mlx5_esw_vport_tbl_get(esw);
1411 if (err)
1412 goto level_1_err;
1413 }
1414
1415 mlx5_chains_set_end_ft(chains, nf_ft);
1416
1417 return 0;
1418
1419level_1_err:
1420 mlx5_chains_put_table(chains, 0, 1, 0);
1421level_0_err:
1422 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1423nf_ft_err:
1424 mlx5_chains_destroy(chains);
1425 esw->fdb_table.offloads.esw_chains_priv = NULL;
1426
1427 return err;
1428}
1429
1430static void
1431esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1432{
1433 if (!mlx5_chains_prios_supported(chains))
1434 mlx5_esw_vport_tbl_put(esw);
1435 mlx5_chains_put_table(chains, 0, 1, 0);
1436 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1437 mlx5_chains_destroy(chains);
1438}
1439
1440#else /* CONFIG_MLX5_CLS_ACT */
1441
1442static int
1443esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1444{ return 0; }
1445
1446static void
1447esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1448{}
1449
1450#endif
1451
0da3c12d 1452static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1967ce6e
OG
1453{
1454 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1455 struct mlx5_flow_table_attr ft_attr = {};
1456 struct mlx5_core_dev *dev = esw->dev;
1457 struct mlx5_flow_namespace *root_ns;
1458 struct mlx5_flow_table *fdb = NULL;
39ac237c
PB
1459 u32 flags = 0, *flow_group_in;
1460 int table_size, ix, err = 0;
1967ce6e
OG
1461 struct mlx5_flow_group *g;
1462 void *match_criteria;
f80be543 1463 u8 *dmac;
1967ce6e
OG
1464
1465 esw_debug(esw->dev, "Create offloads FDB Tables\n");
39ac237c 1466
1b9a07ee 1467 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1468 if (!flow_group_in)
1469 return -ENOMEM;
1470
1471 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1472 if (!root_ns) {
1473 esw_warn(dev, "Failed to get FDB flow namespace\n");
1474 err = -EOPNOTSUPP;
1475 goto ns_err;
1476 }
8463daf1
MG
1477 esw->fdb_table.offloads.ns = root_ns;
1478 err = mlx5_flow_namespace_set_mode(root_ns,
1479 esw->dev->priv.steering->mode);
1480 if (err) {
1481 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1482 goto ns_err;
1483 }
1967ce6e 1484
0da3c12d 1485 table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
cd7e4186 1486 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 1487
e52c2802
PB
1488 /* create the slow path fdb with encap set, so further table instances
1489 * can be created at run time while VFs are probed if the FW allows that.
1490 */
1491 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1492 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1493 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1494
1495 ft_attr.flags = flags;
b3ba5149
ES
1496 ft_attr.max_fte = table_size;
1497 ft_attr.prio = FDB_SLOW_PATH;
1498
1499 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1500 if (IS_ERR(fdb)) {
1501 err = PTR_ERR(fdb);
1502 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1503 goto slow_fdb_err;
1504 }
52fff327 1505 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1506
ae430332 1507 err = esw_chains_create(esw, fdb);
39ac237c 1508 if (err) {
ae430332 1509 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
39ac237c 1510 goto fdb_chains_err;
e52c2802
PB
1511 }
1512
69697b6e 1513 /* create send-to-vport group */
69697b6e
OG
1514 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1515 MLX5_MATCH_MISC_PARAMETERS);
1516
1517 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1518
1519 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1520 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1521
0da3c12d 1522 ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1523 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1524 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1525
1526 g = mlx5_create_flow_group(fdb, flow_group_in);
1527 if (IS_ERR(g)) {
1528 err = PTR_ERR(g);
1529 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1530 goto send_vport_err;
1531 }
1532 esw->fdb_table.offloads.send_to_vport_grp = g;
1533
6cec0229
MD
1534 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1535 /* create peer esw miss group */
1536 memset(flow_group_in, 0, inlen);
ac004b83 1537
6cec0229 1538 esw_set_flow_group_source_port(esw, flow_group_in);
a5641cb5 1539
6cec0229
MD
1540 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1541 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1542 flow_group_in,
1543 match_criteria);
ac004b83 1544
6cec0229
MD
1545 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1546 misc_parameters.source_eswitch_owner_vhca_id);
a5641cb5 1547
6cec0229
MD
1548 MLX5_SET(create_flow_group_in, flow_group_in,
1549 source_eswitch_owner_vhca_id_valid, 1);
1550 }
ac004b83 1551
6cec0229
MD
1552 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1553 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1554 ix + esw->total_vports - 1);
1555 ix += esw->total_vports;
ac004b83 1556
6cec0229
MD
1557 g = mlx5_create_flow_group(fdb, flow_group_in);
1558 if (IS_ERR(g)) {
1559 err = PTR_ERR(g);
1560 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1561 goto peer_miss_err;
1562 }
1563 esw->fdb_table.offloads.peer_miss_grp = g;
ac004b83 1564 }
ac004b83 1565
69697b6e
OG
1566 /* create miss group */
1567 memset(flow_group_in, 0, inlen);
f80be543
MB
1568 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1569 MLX5_MATCH_OUTER_HEADERS);
1570 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1571 match_criteria);
1572 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1573 outer_headers.dmac_47_16);
1574 dmac[0] = 0x01;
69697b6e
OG
1575
1576 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1577 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1578 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1579
1580 g = mlx5_create_flow_group(fdb, flow_group_in);
1581 if (IS_ERR(g)) {
1582 err = PTR_ERR(g);
1583 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1584 goto miss_err;
1585 }
1586 esw->fdb_table.offloads.miss_grp = g;
1587
3aa33572
OG
1588 err = esw_add_fdb_miss_rule(esw);
1589 if (err)
1590 goto miss_rule_err;
1591
c88a026e 1592 kvfree(flow_group_in);
69697b6e
OG
1593 return 0;
1594
3aa33572
OG
1595miss_rule_err:
1596 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1597miss_err:
6cec0229
MD
1598 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1599 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
ac004b83 1600peer_miss_err:
69697b6e
OG
1601 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1602send_vport_err:
ae430332 1603 esw_chains_destroy(esw, esw_chains(esw));
39ac237c 1604fdb_chains_err:
52fff327 1605 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1606slow_fdb_err:
8463daf1
MG
1607 /* Holds true only as long as DMFS is the default */
1608 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1609ns_err:
1610 kvfree(flow_group_in);
1611 return err;
1612}
1613
1967ce6e 1614static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1615{
e52c2802 1616 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1617 return;
1618
1967ce6e 1619 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1620 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1621 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1622 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
6cec0229
MD
1623 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1624 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1625 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1626
ae430332
AL
1627 esw_chains_destroy(esw, esw_chains(esw));
1628
52fff327 1629 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
8463daf1
MG
1630 /* Holds true only as long as DMFS is the default */
1631 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1632 MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e 1633}
c116c6ee 1634
8d6bd3c3 1635static int esw_create_offloads_table(struct mlx5_eswitch *esw)
c116c6ee 1636{
b3ba5149 1637 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1638 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1639 struct mlx5_flow_table *ft_offloads;
1640 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1641 int err = 0;
1642
1643 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1644 if (!ns) {
1645 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1646 return -EOPNOTSUPP;
c116c6ee
OG
1647 }
1648
8d6bd3c3 1649 ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS;
11b717d6 1650 ft_attr.prio = 1;
b3ba5149
ES
1651
1652 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1653 if (IS_ERR(ft_offloads)) {
1654 err = PTR_ERR(ft_offloads);
1655 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1656 return err;
1657 }
1658
1659 esw->offloads.ft_offloads = ft_offloads;
1660 return 0;
1661}
1662
1663static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1664{
1665 struct mlx5_esw_offload *offloads = &esw->offloads;
1666
1667 mlx5_destroy_flow_table(offloads->ft_offloads);
1668}
fed9ce22 1669
8d6bd3c3 1670static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
fed9ce22
OG
1671{
1672 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1673 struct mlx5_flow_group *g;
fed9ce22 1674 u32 *flow_group_in;
8d6bd3c3 1675 int nvports;
fed9ce22 1676 int err = 0;
fed9ce22 1677
8d6bd3c3 1678 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1679 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1680 if (!flow_group_in)
1681 return -ENOMEM;
1682
1683 /* create vport rx group */
a5641cb5 1684 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1685
1686 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1687 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1688
1689 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1690
1691 if (IS_ERR(g)) {
1692 err = PTR_ERR(g);
1693 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1694 goto out;
1695 }
1696
1697 esw->offloads.vport_rx_group = g;
1698out:
e574978a 1699 kvfree(flow_group_in);
fed9ce22
OG
1700 return err;
1701}
1702
1703static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1704{
1705 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1706}
1707
74491de9 1708struct mlx5_flow_handle *
02f3afd9 1709mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1710 struct mlx5_flow_destination *dest)
fed9ce22 1711{
66958ed9 1712 struct mlx5_flow_act flow_act = {0};
74491de9 1713 struct mlx5_flow_handle *flow_rule;
c5bb1730 1714 struct mlx5_flow_spec *spec;
fed9ce22
OG
1715 void *misc;
1716
1b9a07ee 1717 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1718 if (!spec) {
fed9ce22
OG
1719 flow_rule = ERR_PTR(-ENOMEM);
1720 goto out;
1721 }
1722
a5641cb5
JL
1723 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1724 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1725 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1726 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1727
a5641cb5 1728 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
1729 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1730 mlx5_eswitch_get_vport_metadata_mask());
fed9ce22 1731
a5641cb5
JL
1732 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1733 } else {
1734 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1735 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1736
1737 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1738 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1739
1740 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1741 }
fed9ce22 1742
66958ed9 1743 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1744 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1745 &flow_act, dest, 1);
fed9ce22
OG
1746 if (IS_ERR(flow_rule)) {
1747 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1748 goto out;
1749 }
1750
1751out:
c5bb1730 1752 kvfree(spec);
fed9ce22
OG
1753 return flow_rule;
1754}
feae9087 1755
bf3347c4 1756
cc617ced
PP
1757static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
1758{
1759 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1760 struct mlx5_core_dev *dev = esw->dev;
1761 int vport;
1762
1763 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1764 return -EOPNOTSUPP;
1765
1766 if (esw->mode == MLX5_ESWITCH_NONE)
1767 return -EOPNOTSUPP;
1768
1769 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1770 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1771 mlx5_mode = MLX5_INLINE_MODE_NONE;
1772 goto out;
1773 case MLX5_CAP_INLINE_MODE_L2:
1774 mlx5_mode = MLX5_INLINE_MODE_L2;
1775 goto out;
1776 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1777 goto query_vports;
1778 }
1779
1780query_vports:
1781 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1782 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
1783 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1784 if (prev_mlx5_mode != mlx5_mode)
1785 return -EINVAL;
1786 prev_mlx5_mode = mlx5_mode;
1787 }
1788
1789out:
1790 *mode = mlx5_mode;
1791 return 0;
e08a6832 1792}
bf3347c4 1793
11b717d6
PB
1794static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1795{
1796 struct mlx5_esw_offload *offloads = &esw->offloads;
1797
60acc105
PB
1798 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1799 return;
1800
6724e66b 1801 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
11b717d6
PB
1802 mlx5_destroy_flow_group(offloads->restore_group);
1803 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1804}
1805
1806static int esw_create_restore_table(struct mlx5_eswitch *esw)
1807{
d65dbedf 1808 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
11b717d6
PB
1809 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1810 struct mlx5_flow_table_attr ft_attr = {};
1811 struct mlx5_core_dev *dev = esw->dev;
1812 struct mlx5_flow_namespace *ns;
6724e66b 1813 struct mlx5_modify_hdr *mod_hdr;
11b717d6
PB
1814 void *match_criteria, *misc;
1815 struct mlx5_flow_table *ft;
1816 struct mlx5_flow_group *g;
1817 u32 *flow_group_in;
1818 int err = 0;
1819
60acc105
PB
1820 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1821 return 0;
1822
11b717d6
PB
1823 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1824 if (!ns) {
1825 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1826 return -EOPNOTSUPP;
1827 }
1828
1829 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1830 if (!flow_group_in) {
1831 err = -ENOMEM;
1832 goto out_free;
1833 }
1834
1835 ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
1836 ft = mlx5_create_flow_table(ns, &ft_attr);
1837 if (IS_ERR(ft)) {
1838 err = PTR_ERR(ft);
1839 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
1840 err);
1841 goto out_free;
1842 }
1843
11b717d6
PB
1844 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1845 match_criteria);
1846 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
1847 misc_parameters_2);
1848
1849 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1850 ESW_CHAIN_TAG_METADATA_MASK);
1851 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1852 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1853 ft_attr.max_fte - 1);
1854 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1855 MLX5_MATCH_MISC_PARAMETERS_2);
1856 g = mlx5_create_flow_group(ft, flow_group_in);
1857 if (IS_ERR(g)) {
1858 err = PTR_ERR(g);
1859 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
1860 err);
1861 goto err_group;
1862 }
1863
6724e66b
PB
1864 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
1865 MLX5_SET(copy_action_in, modact, src_field,
1866 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1867 MLX5_SET(copy_action_in, modact, dst_field,
1868 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1869 mod_hdr = mlx5_modify_header_alloc(esw->dev,
1870 MLX5_FLOW_NAMESPACE_KERNEL, 1,
1871 modact);
1872 if (IS_ERR(mod_hdr)) {
e9864539 1873 err = PTR_ERR(mod_hdr);
6724e66b
PB
1874 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
1875 err);
6724e66b
PB
1876 goto err_mod_hdr;
1877 }
1878
11b717d6
PB
1879 esw->offloads.ft_offloads_restore = ft;
1880 esw->offloads.restore_group = g;
6724e66b 1881 esw->offloads.restore_copy_hdr_id = mod_hdr;
11b717d6 1882
c8508713
RD
1883 kvfree(flow_group_in);
1884
11b717d6
PB
1885 return 0;
1886
6724e66b
PB
1887err_mod_hdr:
1888 mlx5_destroy_flow_group(g);
11b717d6
PB
1889err_group:
1890 mlx5_destroy_flow_table(ft);
1891out_free:
1892 kvfree(flow_group_in);
1893
1894 return err;
cc617ced
PP
1895}
1896
db7ff19e
EB
1897static int esw_offloads_start(struct mlx5_eswitch *esw,
1898 struct netlink_ext_ack *extack)
c930a3ad 1899{
062f4bf4 1900 int err, err1;
c930a3ad 1901
8e0aa4bc
PP
1902 mlx5_eswitch_disable_locked(esw, false);
1903 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
1904 esw->dev->priv.sriov.num_vfs);
6c419ba8 1905 if (err) {
8c98ee77
EB
1906 NL_SET_ERR_MSG_MOD(extack,
1907 "Failed setting eswitch to offloads");
8e0aa4bc
PP
1908 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
1909 MLX5_ESWITCH_IGNORE_NUM_VFS);
8c98ee77
EB
1910 if (err1) {
1911 NL_SET_ERR_MSG_MOD(extack,
1912 "Failed setting eswitch back to legacy");
1913 }
6c419ba8 1914 }
bffaa916
RD
1915 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1916 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
1917 &esw->offloads.inline_mode)) {
1918 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1919 NL_SET_ERR_MSG_MOD(extack,
1920 "Inline mode is different between vports");
bffaa916
RD
1921 }
1922 }
c930a3ad
OG
1923 return err;
1924}
1925
e8d31c4d
MB
1926void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1927{
1928 kfree(esw->offloads.vport_reps);
1929}
1930
1931int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1932{
2752b823 1933 int total_vports = esw->total_vports;
e8d31c4d 1934 struct mlx5_eswitch_rep *rep;
d6518db2 1935 int vport_index;
ef2e4094 1936 u8 rep_type;
e8d31c4d 1937
2aca1787 1938 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1939 sizeof(struct mlx5_eswitch_rep),
1940 GFP_KERNEL);
1941 if (!esw->offloads.vport_reps)
1942 return -ENOMEM;
1943
d6518db2
BW
1944 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1945 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 1946 rep->vport_index = vport_index;
f121e0ea
BW
1947
1948 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 1949 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 1950 REP_UNREGISTERED);
e8d31c4d
MB
1951 }
1952
e8d31c4d
MB
1953 return 0;
1954}
1955
c9b99abc
BW
1956static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1957 struct mlx5_eswitch_rep *rep, u8 rep_type)
1958{
8693115a 1959 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1960 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 1961 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
1962}
1963
d7f33a45
VP
1964static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
1965{
1966 struct mlx5_eswitch_rep *rep;
1967 int i;
1968
1969 mlx5_esw_for_each_sf_rep(esw, i, rep)
1970 __esw_offloads_unload_rep(esw, rep, rep_type);
1971}
1972
4110fc59 1973static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1974{
1975 struct mlx5_eswitch_rep *rep;
4110fc59
BW
1976 int i;
1977
d7f33a45
VP
1978 __unload_reps_sf_vport(esw, rep_type);
1979
4110fc59
BW
1980 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
1981 __esw_offloads_unload_rep(esw, rep, rep_type);
c9b99abc 1982
81cd229c
BW
1983 if (mlx5_ecpf_vport_exists(esw->dev)) {
1984 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1985 __esw_offloads_unload_rep(esw, rep, rep_type);
1986 }
1987
1988 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1989 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1990 __esw_offloads_unload_rep(esw, rep, rep_type);
1991 }
1992
879c8f84 1993 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1994 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1995}
1996
d970812b 1997int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
a4b97ab4 1998{
c2d7712c
BW
1999 struct mlx5_eswitch_rep *rep;
2000 int rep_type;
a4b97ab4
MB
2001 int err;
2002
c2d7712c
BW
2003 rep = mlx5_eswitch_get_rep(esw, vport_num);
2004 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2005 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2006 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2007 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2008 if (err)
2009 goto err_reps;
2010 }
2011
2012 return 0;
a4b97ab4
MB
2013
2014err_reps:
c2d7712c
BW
2015 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2016 for (--rep_type; rep_type >= 0; rep_type--)
2017 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
2018 return err;
2019}
2020
d970812b 2021void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
c2d7712c
BW
2022{
2023 struct mlx5_eswitch_rep *rep;
2024 int rep_type;
2025
c2d7712c
BW
2026 rep = mlx5_eswitch_get_rep(esw, vport_num);
2027 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2028 __esw_offloads_unload_rep(esw, rep, rep_type);
2029}
2030
38679b5a
PP
2031int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
2032{
2033 int err;
2034
2035 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2036 return 0;
2037
c7eddc60
PP
2038 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
2039 if (err)
2040 return err;
2041
38679b5a 2042 err = mlx5_esw_offloads_rep_load(esw, vport_num);
c7eddc60
PP
2043 if (err)
2044 goto load_err;
2045 return err;
2046
2047load_err:
2048 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
38679b5a
PP
2049 return err;
2050}
2051
2052void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
2053{
2054 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2055 return;
2056
2057 mlx5_esw_offloads_rep_unload(esw, vport_num);
c7eddc60 2058 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
38679b5a
PP
2059}
2060
ac004b83
RD
2061#define ESW_OFFLOADS_DEVCOM_PAIR (0)
2062#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2063
2064static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2065 struct mlx5_eswitch *peer_esw)
2066{
2067 int err;
2068
2069 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2070 if (err)
2071 return err;
2072
2073 return 0;
2074}
2075
2076static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
2077{
d956873f 2078#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
04de7dda 2079 mlx5e_tc_clean_fdb_peer_flows(esw);
d956873f 2080#endif
ac004b83
RD
2081 esw_del_fdb_peer_miss_rules(esw);
2082}
2083
8463daf1
MG
2084static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2085 struct mlx5_eswitch *peer_esw,
2086 bool pair)
2087{
2088 struct mlx5_flow_root_namespace *peer_ns;
2089 struct mlx5_flow_root_namespace *ns;
2090 int err;
2091
2092 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2093 ns = esw->dev->priv.steering->fdb_root_ns;
2094
2095 if (pair) {
2096 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
2097 if (err)
2098 return err;
2099
e53e6655 2100 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
2101 if (err) {
2102 mlx5_flow_namespace_set_peer(ns, NULL);
2103 return err;
2104 }
2105 } else {
2106 mlx5_flow_namespace_set_peer(ns, NULL);
2107 mlx5_flow_namespace_set_peer(peer_ns, NULL);
2108 }
2109
2110 return 0;
2111}
2112
ac004b83
RD
2113static int mlx5_esw_offloads_devcom_event(int event,
2114 void *my_data,
2115 void *event_data)
2116{
2117 struct mlx5_eswitch *esw = my_data;
ac004b83 2118 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 2119 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
2120 int err;
2121
2122 switch (event) {
2123 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
2124 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2125 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2126 break;
2127
8463daf1 2128 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
2129 if (err)
2130 goto err_out;
8463daf1
MG
2131 err = mlx5_esw_offloads_pair(esw, peer_esw);
2132 if (err)
2133 goto err_peer;
ac004b83
RD
2134
2135 err = mlx5_esw_offloads_pair(peer_esw, esw);
2136 if (err)
2137 goto err_pair;
2138
2139 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
2140 break;
2141
2142 case ESW_OFFLOADS_DEVCOM_UNPAIR:
2143 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
2144 break;
2145
2146 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
2147 mlx5_esw_offloads_unpair(peer_esw);
2148 mlx5_esw_offloads_unpair(esw);
8463daf1 2149 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
2150 break;
2151 }
2152
2153 return 0;
2154
2155err_pair:
2156 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
2157err_peer:
2158 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
2159err_out:
2160 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
2161 event, err);
2162 return err;
2163}
2164
2165static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
2166{
2167 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2168
04de7dda
RD
2169 INIT_LIST_HEAD(&esw->offloads.peer_flows);
2170 mutex_init(&esw->offloads.peer_mutex);
2171
ac004b83
RD
2172 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2173 return;
2174
2175 mlx5_devcom_register_component(devcom,
2176 MLX5_DEVCOM_ESW_OFFLOADS,
2177 mlx5_esw_offloads_devcom_event,
2178 esw);
2179
2180 mlx5_devcom_send_event(devcom,
2181 MLX5_DEVCOM_ESW_OFFLOADS,
2182 ESW_OFFLOADS_DEVCOM_PAIR, esw);
2183}
2184
2185static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2186{
2187 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2188
2189 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2190 return;
2191
2192 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2193 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2194
2195 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2196}
2197
92ab1eb3
JL
2198static bool
2199esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2200{
2201 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2202 return false;
2203
2204 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2205 MLX5_FDB_TO_VPORT_REG_C_0))
2206 return false;
2207
2208 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2209 return false;
2210
2211 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2212 mlx5_ecpf_vport_exists(esw->dev))
2213 return false;
2214
2215 return true;
2216}
2217
133dcfc5
VP
2218u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2219{
7cd7becd 2220 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
2221 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1;
2222 u32 pf_num;
133dcfc5
VP
2223 int id;
2224
7cd7becd 2225 /* Only 4 bits of pf_num */
2226 pf_num = PCI_FUNC(esw->dev->pdev->devfn);
2227 if (pf_num > max_pf_num)
2228 return 0;
133dcfc5 2229
7cd7becd 2230 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
2231 /* Use only non-zero vport_id (1-4095) for all PF's */
2232 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
2233 if (id < 0)
2234 return 0;
2235 id = (pf_num << ESW_VPORT_BITS) | id;
2236 return id;
133dcfc5
VP
2237}
2238
2239void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2240{
7cd7becd 2241 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2242
2243 /* Metadata contains only 12 bits of actual ida id */
2244 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
133dcfc5
VP
2245}
2246
2247static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2248 struct mlx5_vport *vport)
2249{
133dcfc5
VP
2250 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
2251 vport->metadata = vport->default_metadata;
2252 return vport->metadata ? 0 : -ENOSPC;
2253}
2254
2255static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
2256 struct mlx5_vport *vport)
2257{
406493a5 2258 if (!vport->default_metadata)
133dcfc5
VP
2259 return;
2260
2261 WARN_ON(vport->metadata != vport->default_metadata);
2262 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
2263}
2264
fc99c3d6
VP
2265static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
2266{
2267 struct mlx5_vport *vport;
2268 int i;
2269
2270 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2271 return;
2272
2273 mlx5_esw_for_all_vports_reverse(esw, i, vport)
2274 esw_offloads_vport_metadata_cleanup(esw, vport);
2275}
2276
2277static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
2278{
2279 struct mlx5_vport *vport;
2280 int err;
2281 int i;
2282
2283 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2284 return 0;
2285
2286 mlx5_esw_for_all_vports(esw, i, vport) {
2287 err = esw_offloads_vport_metadata_setup(esw, vport);
2288 if (err)
2289 goto metadata_err;
2290 }
2291
2292 return 0;
2293
2294metadata_err:
2295 esw_offloads_metadata_uninit(esw);
2296 return err;
2297}
2298
748da30b 2299int
89a0f1fb
PP
2300esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2301 struct mlx5_vport *vport)
7445cfb1 2302{
7445cfb1
JL
2303 int err;
2304
07bab950 2305 err = esw_acl_ingress_ofld_setup(esw, vport);
89a0f1fb 2306 if (err)
fc99c3d6 2307 return err;
7445cfb1 2308
2c40db2f
PP
2309 err = esw_acl_egress_ofld_setup(esw, vport);
2310 if (err)
2311 goto egress_err;
07bab950
VP
2312
2313 return 0;
2314
2315egress_err:
2316 esw_acl_ingress_ofld_cleanup(esw, vport);
89a0f1fb
PP
2317 return err;
2318}
18486737 2319
748da30b 2320void
89a0f1fb
PP
2321esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2322 struct mlx5_vport *vport)
2323{
ea651a86 2324 esw_acl_egress_ofld_cleanup(vport);
07bab950 2325 esw_acl_ingress_ofld_cleanup(esw, vport);
89a0f1fb 2326}
7445cfb1 2327
748da30b 2328static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
2329{
2330 struct mlx5_vport *vport;
18486737 2331
748da30b 2332 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
4e9a9ef7 2333 return esw_vport_create_offloads_acl_tables(esw, vport);
18486737
EB
2334}
2335
748da30b 2336static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 2337{
786ef904 2338 struct mlx5_vport *vport;
7445cfb1 2339
748da30b
VP
2340 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2341 esw_vport_destroy_offloads_acl_tables(esw, vport);
18486737
EB
2342}
2343
062f4bf4 2344static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 2345{
34ca6535 2346 struct mlx5_esw_indir_table *indir;
6ed1803a
MB
2347 int err;
2348
5c1d260e 2349 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
f8d1edda
PP
2350 mutex_init(&esw->fdb_table.offloads.vports.lock);
2351 hash_init(esw->fdb_table.offloads.vports.table);
e52c2802 2352
34ca6535
VB
2353 indir = mlx5_esw_indir_table_init();
2354 if (IS_ERR(indir)) {
2355 err = PTR_ERR(indir);
2356 goto create_indir_err;
2357 }
2358 esw->fdb_table.offloads.indir = indir;
2359
748da30b 2360 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1 2361 if (err)
f8d1edda 2362 goto create_acl_err;
18486737 2363
8d6bd3c3 2364 err = esw_create_offloads_table(esw);
c930a3ad 2365 if (err)
11b717d6 2366 goto create_offloads_err;
c930a3ad 2367
11b717d6 2368 err = esw_create_restore_table(esw);
c930a3ad 2369 if (err)
11b717d6
PB
2370 goto create_restore_err;
2371
0da3c12d 2372 err = esw_create_offloads_fdb_tables(esw);
11b717d6
PB
2373 if (err)
2374 goto create_fdb_err;
c930a3ad 2375
8d6bd3c3 2376 err = esw_create_vport_rx_group(esw);
c930a3ad
OG
2377 if (err)
2378 goto create_fg_err;
2379
2380 return 0;
2381
2382create_fg_err:
1967ce6e 2383 esw_destroy_offloads_fdb_tables(esw);
7445cfb1 2384create_fdb_err:
11b717d6
PB
2385 esw_destroy_restore_table(esw);
2386create_restore_err:
2387 esw_destroy_offloads_table(esw);
2388create_offloads_err:
748da30b 2389 esw_destroy_uplink_offloads_acl_tables(esw);
f8d1edda 2390create_acl_err:
34ca6535
VB
2391 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
2392create_indir_err:
f8d1edda 2393 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
c930a3ad
OG
2394 return err;
2395}
2396
eca8cc38
BW
2397static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2398{
2399 esw_destroy_vport_rx_group(esw);
eca8cc38 2400 esw_destroy_offloads_fdb_tables(esw);
11b717d6
PB
2401 esw_destroy_restore_table(esw);
2402 esw_destroy_offloads_table(esw);
748da30b 2403 esw_destroy_uplink_offloads_acl_tables(esw);
34ca6535 2404 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
f8d1edda 2405 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
eca8cc38
BW
2406}
2407
7e736f9a
PP
2408static void
2409esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 2410{
5ccf2770 2411 bool host_pf_disabled;
7e736f9a 2412 u16 new_num_vfs;
a3888f33 2413
7e736f9a
PP
2414 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2415 host_params_context.host_num_of_vfs);
5ccf2770
BW
2416 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2417 host_params_context.host_pf_disabled);
a3888f33 2418
7e736f9a
PP
2419 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2420 return;
a3888f33
BW
2421
2422 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929 2423 if (esw->esw_funcs.num_vfs > 0) {
23bb50cf 2424 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
a3888f33 2425 } else {
7e736f9a 2426 int err;
a3888f33 2427
23bb50cf
BW
2428 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2429 MLX5_VPORT_UC_ADDR_CHANGE);
a3888f33 2430 if (err)
7e736f9a 2431 return;
a3888f33 2432 }
7e736f9a 2433 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
2434}
2435
7e736f9a 2436static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 2437{
7e736f9a
PP
2438 struct mlx5_host_work *host_work;
2439 struct mlx5_eswitch *esw;
dd28087c 2440 const u32 *out;
ac35dcd6 2441
7e736f9a
PP
2442 host_work = container_of(work, struct mlx5_host_work, work);
2443 esw = host_work->esw;
a3888f33 2444
dd28087c
PP
2445 out = mlx5_esw_query_functions(esw->dev);
2446 if (IS_ERR(out))
7e736f9a 2447 goto out;
a3888f33 2448
7e736f9a 2449 esw_vfs_changed_event_handler(esw, out);
dd28087c 2450 kvfree(out);
a3888f33 2451out:
ac35dcd6
VP
2452 kfree(host_work);
2453}
2454
16fff98a 2455int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 2456{
cd56f929 2457 struct mlx5_esw_functions *esw_funcs;
a3888f33 2458 struct mlx5_host_work *host_work;
a3888f33
BW
2459 struct mlx5_eswitch *esw;
2460
2461 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2462 if (!host_work)
2463 return NOTIFY_DONE;
2464
cd56f929
VP
2465 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2466 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2467
2468 host_work->esw = esw;
2469
062f4bf4 2470 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2471 queue_work(esw->work_queue, &host_work->work);
2472
2473 return NOTIFY_OK;
2474}
2475
a53cf949
PP
2476static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
2477{
2478 const u32 *query_host_out;
2479
2480 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
2481 return 0;
2482
2483 query_host_out = mlx5_esw_query_functions(esw->dev);
2484 if (IS_ERR(query_host_out))
2485 return PTR_ERR(query_host_out);
2486
2487 /* Mark non local controller with non zero controller number. */
2488 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
2489 host_params_context.host_number);
2490 kvfree(query_host_out);
2491 return 0;
2492}
2493
5896b972 2494int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38 2495{
3b83b6c2
DL
2496 struct mlx5_vport *vport;
2497 int err, i;
eca8cc38 2498
9a64144d
MG
2499 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2500 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2501 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2502 else
2503 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2504
2bb72e7e 2505 mutex_init(&esw->offloads.termtbl_mutex);
8463daf1 2506 mlx5_rdma_enable_roce(esw->dev);
eca8cc38 2507
a53cf949
PP
2508 err = mlx5_esw_host_number_init(esw);
2509 if (err)
cd1ef966 2510 goto err_metadata;
a53cf949 2511
cd1ef966 2512 if (esw_check_vport_match_metadata_supported(esw))
4e9a9ef7
VP
2513 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2514
fc99c3d6
VP
2515 err = esw_offloads_metadata_init(esw);
2516 if (err)
2517 goto err_metadata;
2518
332bd3a5
PP
2519 err = esw_set_passing_vport_metadata(esw, true);
2520 if (err)
2521 goto err_vport_metadata;
c1286050 2522
7983a675
PB
2523 err = esw_offloads_steering_init(esw);
2524 if (err)
2525 goto err_steering_init;
2526
3b83b6c2
DL
2527 /* Representor will control the vport link state */
2528 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2529 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2530
c2d7712c
BW
2531 /* Uplink vport rep must load first. */
2532 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
925a6acc 2533 if (err)
c2d7712c 2534 goto err_uplink;
c1286050 2535
c2d7712c 2536 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
eca8cc38 2537 if (err)
c2d7712c 2538 goto err_vports;
eca8cc38
BW
2539
2540 esw_offloads_devcom_init(esw);
a3888f33 2541
eca8cc38
BW
2542 return 0;
2543
925a6acc 2544err_vports:
c2d7712c
BW
2545 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2546err_uplink:
7983a675 2547 esw_offloads_steering_cleanup(esw);
79949985
PP
2548err_steering_init:
2549 esw_set_passing_vport_metadata(esw, false);
7983a675 2550err_vport_metadata:
fc99c3d6
VP
2551 esw_offloads_metadata_uninit(esw);
2552err_metadata:
4e9a9ef7 2553 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
8463daf1 2554 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2555 mutex_destroy(&esw->offloads.termtbl_mutex);
eca8cc38
BW
2556 return err;
2557}
2558
db7ff19e
EB
2559static int esw_offloads_stop(struct mlx5_eswitch *esw,
2560 struct netlink_ext_ack *extack)
c930a3ad 2561{
062f4bf4 2562 int err, err1;
c930a3ad 2563
8e0aa4bc
PP
2564 mlx5_eswitch_disable_locked(esw, false);
2565 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2566 MLX5_ESWITCH_IGNORE_NUM_VFS);
6c419ba8 2567 if (err) {
8c98ee77 2568 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
8e0aa4bc
PP
2569 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2570 MLX5_ESWITCH_IGNORE_NUM_VFS);
8c98ee77
EB
2571 if (err1) {
2572 NL_SET_ERR_MSG_MOD(extack,
2573 "Failed setting eswitch back to offloads");
2574 }
6c419ba8 2575 }
c930a3ad
OG
2576
2577 return err;
2578}
2579
5896b972 2580void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 2581{
ac004b83 2582 esw_offloads_devcom_cleanup(esw);
5896b972 2583 mlx5_eswitch_disable_pf_vf_vports(esw);
c2d7712c 2584 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
332bd3a5 2585 esw_set_passing_vport_metadata(esw, false);
eca8cc38 2586 esw_offloads_steering_cleanup(esw);
fc99c3d6 2587 esw_offloads_metadata_uninit(esw);
4e9a9ef7 2588 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
8463daf1 2589 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2590 mutex_destroy(&esw->offloads.termtbl_mutex);
9a64144d 2591 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2592}
2593
ef78618b 2594static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2595{
2596 switch (mode) {
2597 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2598 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2599 break;
2600 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2601 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2602 break;
2603 default:
2604 return -EINVAL;
2605 }
2606
2607 return 0;
2608}
2609
ef78618b
OG
2610static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2611{
2612 switch (mlx5_mode) {
f6455de0 2613 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2614 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2615 break;
f6455de0 2616 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2617 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2618 break;
2619 default:
2620 return -EINVAL;
2621 }
2622
2623 return 0;
2624}
2625
bffaa916
RD
2626static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2627{
2628 switch (mode) {
2629 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2630 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2631 break;
2632 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2633 *mlx5_mode = MLX5_INLINE_MODE_L2;
2634 break;
2635 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2636 *mlx5_mode = MLX5_INLINE_MODE_IP;
2637 break;
2638 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2639 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2640 break;
2641 default:
2642 return -EINVAL;
2643 }
2644
2645 return 0;
2646}
2647
2648static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2649{
2650 switch (mlx5_mode) {
2651 case MLX5_INLINE_MODE_NONE:
2652 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2653 break;
2654 case MLX5_INLINE_MODE_L2:
2655 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2656 break;
2657 case MLX5_INLINE_MODE_IP:
2658 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2659 break;
2660 case MLX5_INLINE_MODE_TCP_UDP:
2661 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2662 break;
2663 default:
2664 return -EINVAL;
2665 }
2666
2667 return 0;
2668}
2669
ae24432c
PP
2670static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
2671{
2672 /* devlink commands in NONE eswitch mode are currently supported only
2673 * on ECPF.
2674 */
2675 return (esw->mode == MLX5_ESWITCH_NONE &&
2676 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
2677}
2678
db7ff19e
EB
2679int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2680 struct netlink_ext_ack *extack)
9d1cef19 2681{
9d1cef19 2682 u16 cur_mlx5_mode, mlx5_mode = 0;
bd939753 2683 struct mlx5_eswitch *esw;
ea2128fd 2684 int err = 0;
9d1cef19 2685
bd939753
PP
2686 esw = mlx5_devlink_eswitch_get(devlink);
2687 if (IS_ERR(esw))
2688 return PTR_ERR(esw);
9d1cef19 2689
ef78618b 2690 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2691 return -EINVAL;
2692
8e0aa4bc 2693 mutex_lock(&esw->mode_lock);
8e0aa4bc 2694 cur_mlx5_mode = esw->mode;
c930a3ad 2695 if (cur_mlx5_mode == mlx5_mode)
8e0aa4bc 2696 goto unlock;
c930a3ad
OG
2697
2698 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
8e0aa4bc 2699 err = esw_offloads_start(esw, extack);
c930a3ad 2700 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
8e0aa4bc 2701 err = esw_offloads_stop(esw, extack);
c930a3ad 2702 else
8e0aa4bc
PP
2703 err = -EINVAL;
2704
2705unlock:
2706 mutex_unlock(&esw->mode_lock);
2707 return err;
feae9087
OG
2708}
2709
2710int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2711{
bd939753 2712 struct mlx5_eswitch *esw;
9d1cef19 2713 int err;
c930a3ad 2714
bd939753
PP
2715 esw = mlx5_devlink_eswitch_get(devlink);
2716 if (IS_ERR(esw))
2717 return PTR_ERR(esw);
c930a3ad 2718
8e0aa4bc 2719 mutex_lock(&esw->mode_lock);
bd939753 2720 err = eswitch_devlink_esw_mode_check(esw);
ae24432c 2721 if (err)
8e0aa4bc 2722 goto unlock;
ae24432c 2723
8e0aa4bc
PP
2724 err = esw_mode_to_devlink(esw->mode, mode);
2725unlock:
2726 mutex_unlock(&esw->mode_lock);
2727 return err;
feae9087 2728}
127ea380 2729
db7ff19e
EB
2730int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2731 struct netlink_ext_ack *extack)
bffaa916
RD
2732{
2733 struct mlx5_core_dev *dev = devlink_priv(devlink);
db68cc56 2734 int err, vport, num_vport;
bd939753 2735 struct mlx5_eswitch *esw;
bffaa916
RD
2736 u8 mlx5_mode;
2737
bd939753
PP
2738 esw = mlx5_devlink_eswitch_get(devlink);
2739 if (IS_ERR(esw))
2740 return PTR_ERR(esw);
bffaa916 2741
8e0aa4bc 2742 mutex_lock(&esw->mode_lock);
ae24432c
PP
2743 err = eswitch_devlink_esw_mode_check(esw);
2744 if (err)
8e0aa4bc 2745 goto out;
ae24432c 2746
c415f704
OG
2747 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2748 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2749 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
8e0aa4bc 2750 goto out;
c8b838d1 2751 fallthrough;
c415f704 2752 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2753 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
8e0aa4bc
PP
2754 err = -EOPNOTSUPP;
2755 goto out;
c415f704
OG
2756 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2757 break;
2758 }
bffaa916 2759
525e84be 2760 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2761 NL_SET_ERR_MSG_MOD(extack,
2762 "Can't set inline mode when flows are configured");
8e0aa4bc
PP
2763 err = -EOPNOTSUPP;
2764 goto out;
375f51e2
RD
2765 }
2766
bffaa916
RD
2767 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2768 if (err)
2769 goto out;
2770
411ec9e0 2771 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2772 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2773 if (err) {
8c98ee77
EB
2774 NL_SET_ERR_MSG_MOD(extack,
2775 "Failed to set min inline on vport");
bffaa916
RD
2776 goto revert_inline_mode;
2777 }
2778 }
2779
2780 esw->offloads.inline_mode = mlx5_mode;
8e0aa4bc 2781 mutex_unlock(&esw->mode_lock);
bffaa916
RD
2782 return 0;
2783
2784revert_inline_mode:
db68cc56 2785 num_vport = --vport;
411ec9e0 2786 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
2787 mlx5_modify_nic_vport_min_inline(dev,
2788 vport,
2789 esw->offloads.inline_mode);
2790out:
8e0aa4bc 2791 mutex_unlock(&esw->mode_lock);
bffaa916
RD
2792 return err;
2793}
2794
2795int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2796{
bd939753 2797 struct mlx5_eswitch *esw;
9d1cef19 2798 int err;
bffaa916 2799
bd939753
PP
2800 esw = mlx5_devlink_eswitch_get(devlink);
2801 if (IS_ERR(esw))
2802 return PTR_ERR(esw);
bffaa916 2803
8e0aa4bc 2804 mutex_lock(&esw->mode_lock);
ae24432c
PP
2805 err = eswitch_devlink_esw_mode_check(esw);
2806 if (err)
8e0aa4bc 2807 goto unlock;
ae24432c 2808
8e0aa4bc
PP
2809 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2810unlock:
2811 mutex_unlock(&esw->mode_lock);
2812 return err;
bffaa916
RD
2813}
2814
98fdbea5
LR
2815int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2816 enum devlink_eswitch_encap_mode encap,
db7ff19e 2817 struct netlink_ext_ack *extack)
7768d197
RD
2818{
2819 struct mlx5_core_dev *dev = devlink_priv(devlink);
bd939753 2820 struct mlx5_eswitch *esw;
7768d197
RD
2821 int err;
2822
bd939753
PP
2823 esw = mlx5_devlink_eswitch_get(devlink);
2824 if (IS_ERR(esw))
2825 return PTR_ERR(esw);
7768d197 2826
8e0aa4bc 2827 mutex_lock(&esw->mode_lock);
ae24432c
PP
2828 err = eswitch_devlink_esw_mode_check(esw);
2829 if (err)
8e0aa4bc 2830 goto unlock;
ae24432c 2831
7768d197 2832 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2833 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
8e0aa4bc
PP
2834 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
2835 err = -EOPNOTSUPP;
2836 goto unlock;
2837 }
7768d197 2838
8e0aa4bc
PP
2839 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
2840 err = -EOPNOTSUPP;
2841 goto unlock;
2842 }
7768d197 2843
f6455de0 2844 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197 2845 esw->offloads.encap = encap;
8e0aa4bc 2846 goto unlock;
7768d197
RD
2847 }
2848
2849 if (esw->offloads.encap == encap)
8e0aa4bc 2850 goto unlock;
7768d197 2851
525e84be 2852 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2853 NL_SET_ERR_MSG_MOD(extack,
2854 "Can't set encapsulation when flows are configured");
8e0aa4bc
PP
2855 err = -EOPNOTSUPP;
2856 goto unlock;
7768d197
RD
2857 }
2858
e52c2802 2859 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2860
2861 esw->offloads.encap = encap;
e52c2802 2862
0da3c12d 2863 err = esw_create_offloads_fdb_tables(esw);
e52c2802 2864
7768d197 2865 if (err) {
8c98ee77
EB
2866 NL_SET_ERR_MSG_MOD(extack,
2867 "Failed re-creating fast FDB table");
7768d197 2868 esw->offloads.encap = !encap;
0da3c12d 2869 (void)esw_create_offloads_fdb_tables(esw);
7768d197 2870 }
e52c2802 2871
8e0aa4bc
PP
2872unlock:
2873 mutex_unlock(&esw->mode_lock);
7768d197
RD
2874 return err;
2875}
2876
98fdbea5
LR
2877int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2878 enum devlink_eswitch_encap_mode *encap)
7768d197 2879{
bd939753 2880 struct mlx5_eswitch *esw;
9d1cef19 2881 int err;
7768d197 2882
bd939753
PP
2883 esw = mlx5_devlink_eswitch_get(devlink);
2884 if (IS_ERR(esw))
2885 return PTR_ERR(esw);
2886
7768d197 2887
8e0aa4bc 2888 mutex_lock(&esw->mode_lock);
ae24432c
PP
2889 err = eswitch_devlink_esw_mode_check(esw);
2890 if (err)
8e0aa4bc 2891 goto unlock;
ae24432c 2892
7768d197 2893 *encap = esw->offloads.encap;
8e0aa4bc
PP
2894unlock:
2895 mutex_unlock(&esw->mode_lock);
7768d197
RD
2896 return 0;
2897}
2898
c2d7712c
BW
2899static bool
2900mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
2901{
2902 /* Currently, only ECPF based device has representor for host PF. */
2903 if (vport_num == MLX5_VPORT_PF &&
2904 !mlx5_core_is_ecpf_esw_manager(esw->dev))
2905 return false;
2906
2907 if (vport_num == MLX5_VPORT_ECPF &&
2908 !mlx5_ecpf_vport_exists(esw->dev))
2909 return false;
2910
2911 return true;
2912}
2913
f8e8fa02 2914void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 2915 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 2916 u8 rep_type)
127ea380 2917{
8693115a 2918 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
2919 struct mlx5_eswitch_rep *rep;
2920 int i;
9deb2241 2921
8693115a 2922 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 2923 mlx5_esw_for_all_reps(esw, i, rep) {
c2d7712c
BW
2924 if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
2925 rep_data = &rep->rep_data[rep_type];
2926 atomic_set(&rep_data->state, REP_REGISTERED);
2927 }
f8e8fa02 2928 }
127ea380 2929}
f8e8fa02 2930EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2931
f8e8fa02 2932void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2933{
cb67b832 2934 struct mlx5_eswitch_rep *rep;
f8e8fa02 2935 int i;
cb67b832 2936
f6455de0 2937 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 2938 __unload_reps_all_vport(esw, rep_type);
127ea380 2939
f8e8fa02 2940 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 2941 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 2942}
f8e8fa02 2943EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2944
a4b97ab4 2945void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2946{
726293f1
HHZ
2947 struct mlx5_eswitch_rep *rep;
2948
879c8f84 2949 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 2950 return rep->rep_data[rep_type].priv;
726293f1 2951}
22215908
MB
2952
2953void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 2954 u16 vport,
22215908
MB
2955 u8 rep_type)
2956{
22215908
MB
2957 struct mlx5_eswitch_rep *rep;
2958
879c8f84 2959 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2960
8693115a
PP
2961 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2962 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2963 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
2964 return NULL;
2965}
57cbd893 2966EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2967
2968void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2969{
879c8f84 2970 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2971}
57cbd893
MB
2972EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2973
2974struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 2975 u16 vport)
57cbd893 2976{
879c8f84 2977 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2978}
2979EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
2980
2981bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2982{
2983 return vport_num >= MLX5_VPORT_FIRST_VF &&
2984 vport_num <= esw->dev->priv.sriov.max_vfs;
2985}
7445cfb1 2986
5b7cb745
PB
2987bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
2988{
2989 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
2990}
2991EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
2992
7445cfb1
JL
2993bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2994{
2995 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2996}
2997EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2998
0f0d3827 2999u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
7445cfb1
JL
3000 u16 vport_num)
3001{
133dcfc5 3002 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
0f0d3827 3003
133dcfc5
VP
3004 if (WARN_ON_ONCE(IS_ERR(vport)))
3005 return 0;
0f0d3827 3006
133dcfc5 3007 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
7445cfb1
JL
3008}
3009EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
d970812b
PP
3010
3011int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
3012 u16 vport_num, u32 sfnum)
3013{
3014 int err;
3015
3016 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
3017 if (err)
3018 return err;
3019
3020 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum);
3021 if (err)
3022 goto devlink_err;
3023
3024 err = mlx5_esw_offloads_rep_load(esw, vport_num);
3025 if (err)
3026 goto rep_err;
3027 return 0;
3028
3029rep_err:
3030 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3031devlink_err:
3032 mlx5_esw_vport_disable(esw, vport_num);
3033 return err;
3034}
3035
3036void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
3037{
3038 mlx5_esw_offloads_rep_unload(esw, vport_num);
3039 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3040 mlx5_esw_vport_disable(esw, vport_num);
3041}
84ae9c1f
VB
3042
3043static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
3044{
3045 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3046 void *query_ctx;
3047 void *hca_caps;
3048 int err;
3049
3050 *vhca_id = 0;
3051 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
3052 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
3053 return -EPERM;
3054
3055 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
3056 if (!query_ctx)
3057 return -ENOMEM;
3058
3059 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx);
3060 if (err)
3061 goto out_free;
3062
3063 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
3064 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
3065
3066out_free:
3067 kfree(query_ctx);
3068 return err;
3069}
3070
3071int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
3072{
3073 u16 *old_entry, *vhca_map_entry, vhca_id;
3074 int err;
3075
3076 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3077 if (err) {
3078 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
3079 vport_num, err);
3080 return err;
3081 }
3082
3083 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
3084 if (!vhca_map_entry)
3085 return -ENOMEM;
3086
3087 *vhca_map_entry = vport_num;
3088 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
3089 if (xa_is_err(old_entry)) {
3090 kfree(vhca_map_entry);
3091 return xa_err(old_entry);
3092 }
3093 kfree(old_entry);
3094 return 0;
3095}
3096
3097void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
3098{
3099 u16 *vhca_map_entry, vhca_id;
3100 int err;
3101
3102 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3103 if (err)
3104 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
3105 vport_num, err);
3106
3107 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
3108 kfree(vhca_map_entry);
3109}
3110
3111int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
3112{
3113 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
3114
3115 if (!res)
3116 return -ENOENT;
3117
3118 *vport_num = *res;
3119 return 0;
3120}
10742efc
VB
3121
3122u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
3123 u16 vport_num)
3124{
3125 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3126
3127 if (WARN_ON_ONCE(IS_ERR(vport)))
3128 return 0;
3129
3130 return vport->metadata;
3131}
3132EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);