]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: Introduce mapping infra for mapping unique ids to data
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
39ac237c 40#include "eswitch_offloads_chains.h"
80f09dfc 41#include "rdma.h"
e52c2802
PB
42#include "en.h"
43#include "fs_core.h"
ac004b83 44#include "lib/devcom.h"
a3888f33 45#include "lib/eq.h"
69697b6e 46
cd7e4186
BW
47/* There are two match-all miss flows, one for unicast dst mac and
48 * one for multicast.
49 */
50#define MLX5_ESW_MISS_FLOWS (2)
c9b99abc
BW
51#define UPLINK_REP_INDEX 0
52
879c8f84
BW
53static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
54 u16 vport_num)
55{
02f3afd9 56 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
57
58 WARN_ON(idx > esw->total_vports - 1);
59 return &esw->offloads.vport_reps[idx];
60}
61
b7826076
PP
62static bool
63esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
64 const struct mlx5_vport *vport)
65{
66 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
67 mlx5_eswitch_is_vf_vport(esw, vport->vport));
68}
69
c01cfd0f
JL
70static void
71mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
72 struct mlx5_flow_spec *spec,
73 struct mlx5_esw_flow_attr *attr)
74{
75 void *misc2;
76 void *misc;
77
78 /* Use metadata matching because vport is not represented by single
79 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
80 */
81 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
82 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
83 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
84 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
85 attr->in_rep->vport));
86
87 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
88 MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
89
90 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
91 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
92 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
93 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
94 } else {
95 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
96 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
97
98 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
99 MLX5_SET(fte_match_set_misc, misc,
100 source_eswitch_owner_vhca_id,
101 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
102
103 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
104 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
105 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
106 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
107 source_eswitch_owner_vhca_id);
108
109 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
110 }
111
112 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
113 attr->in_rep->vport == MLX5_VPORT_UPLINK)
114 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
115}
116
74491de9 117struct mlx5_flow_handle *
3d80d1a2
OG
118mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
119 struct mlx5_flow_spec *spec,
776b12b6 120 struct mlx5_esw_flow_attr *attr)
3d80d1a2 121{
592d3651 122 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 123 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 124 bool split = !!(attr->split_count);
74491de9 125 struct mlx5_flow_handle *rule;
e52c2802 126 struct mlx5_flow_table *fdb;
592d3651 127 int j, i = 0;
3d80d1a2 128
f6455de0 129 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
130 return ERR_PTR(-EOPNOTSUPP);
131
6acfbf38
OG
132 flow_act.action = attr->action;
133 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 134 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
135 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
136 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
137 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
138 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
139 flow_act.vlan[0].vid = attr->vlan_vid[0];
140 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
141 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
142 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
143 flow_act.vlan[1].vid = attr->vlan_vid[1];
144 flow_act.vlan[1].prio = attr->vlan_prio[1];
145 }
6acfbf38 146 }
776b12b6 147
66958ed9 148 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
39ac237c 149 struct mlx5_flow_table *ft;
e52c2802 150
39ac237c
PB
151 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
152 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
153 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
278d51f2 154 dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
39ac237c
PB
155 i++;
156 } else if (attr->dest_chain) {
157 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
158 ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
159 1, 0);
e52c2802
PB
160 if (IS_ERR(ft)) {
161 rule = ERR_CAST(ft);
162 goto err_create_goto_table;
163 }
164
165 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
166 dest[i].ft = ft;
592d3651 167 i++;
e52c2802 168 } else {
e85e02ba 169 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 170 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 171 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 172 dest[i].vport.vhca_id =
df65a573 173 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
174 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
175 dest[i].vport.flags |=
176 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
177 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
178 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2b688ea5 179 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
a18e879d 180 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5
MG
181 dest[i].vport.pkt_reformat =
182 attr->dests[j].pkt_reformat;
f493f155 183 }
e52c2802
PB
184 i++;
185 }
56e858df 186 }
e37a79e5 187 }
66958ed9 188 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 189 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 190 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 191 i++;
3d80d1a2
OG
192 }
193
c01cfd0f 194 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
3d80d1a2 195
93b3586e 196 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 197 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
198 if (attr->inner_match_level != MLX5_MATCH_NONE)
199 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 200
aa24670e 201 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 202 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 203
39ac237c
PB
204 fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio,
205 !!split);
e52c2802
PB
206 if (IS_ERR(fdb)) {
207 rule = ERR_CAST(fdb);
208 goto err_esw_get;
209 }
210
10caabda
OS
211 if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
212 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
213 &flow_act, dest, i);
214 else
215 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 216 if (IS_ERR(rule))
e52c2802 217 goto err_add_rule;
375f51e2 218 else
525e84be 219 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 220
e52c2802
PB
221 return rule;
222
223err_add_rule:
39ac237c 224 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split);
e52c2802 225err_esw_get:
39ac237c
PB
226 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
227 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
e52c2802 228err_create_goto_table:
aa0cbbae 229 return rule;
3d80d1a2
OG
230}
231
e4ad91f2
CM
232struct mlx5_flow_handle *
233mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
234 struct mlx5_flow_spec *spec,
235 struct mlx5_esw_flow_attr *attr)
236{
237 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 238 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
239 struct mlx5_flow_table *fast_fdb;
240 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 241 struct mlx5_flow_handle *rule;
e4ad91f2
CM
242 int i;
243
39ac237c 244 fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
e52c2802
PB
245 if (IS_ERR(fast_fdb)) {
246 rule = ERR_CAST(fast_fdb);
247 goto err_get_fast;
248 }
249
39ac237c 250 fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1);
e52c2802
PB
251 if (IS_ERR(fwd_fdb)) {
252 rule = ERR_CAST(fwd_fdb);
253 goto err_get_fwd;
254 }
255
e4ad91f2 256 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 257 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 258 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 259 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 260 dest[i].vport.vhca_id =
df65a573 261 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
262 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
263 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
264 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
265 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5 266 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
1cc26d74 267 }
e4ad91f2
CM
268 }
269 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 270 dest[i].ft = fwd_fdb,
e4ad91f2
CM
271 i++;
272
c01cfd0f 273 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
e4ad91f2 274
93b3586e 275 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 276 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 277
278d51f2 278 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
e52c2802 279 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 280
e52c2802
PB
281 if (IS_ERR(rule))
282 goto add_err;
e4ad91f2 283
525e84be 284 atomic64_inc(&esw->offloads.num_flows);
e52c2802
PB
285
286 return rule;
287add_err:
39ac237c 288 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
e52c2802 289err_get_fwd:
39ac237c 290 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 291err_get_fast:
e4ad91f2
CM
292 return rule;
293}
294
e52c2802
PB
295static void
296__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
297 struct mlx5_flow_handle *rule,
298 struct mlx5_esw_flow_attr *attr,
299 bool fwd_rule)
300{
e85e02ba 301 bool split = (attr->split_count > 0);
10caabda 302 int i;
e52c2802
PB
303
304 mlx5_del_flow_rules(rule);
10caabda
OS
305
306 /* unref the term table */
307 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
308 if (attr->dests[i].termtbl)
309 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
310 }
311
525e84be 312 atomic64_dec(&esw->offloads.num_flows);
e52c2802
PB
313
314 if (fwd_rule) {
39ac237c
PB
315 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
316 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 317 } else {
39ac237c
PB
318 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
319 !!split);
e52c2802 320 if (attr->dest_chain)
39ac237c 321 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
e52c2802
PB
322 }
323}
324
d85cdccb
OG
325void
326mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
327 struct mlx5_flow_handle *rule,
328 struct mlx5_esw_flow_attr *attr)
329{
e52c2802 330 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
331}
332
48265006
OG
333void
334mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
335 struct mlx5_flow_handle *rule,
336 struct mlx5_esw_flow_attr *attr)
337{
e52c2802 338 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
339}
340
f5f82476
OG
341static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
342{
343 struct mlx5_eswitch_rep *rep;
411ec9e0 344 int i, err = 0;
f5f82476
OG
345
346 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 347 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 348 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
349 continue;
350
351 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
352 if (err)
353 goto out;
354 }
355
356out:
357 return err;
358}
359
360static struct mlx5_eswitch_rep *
361esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
362{
363 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
364
365 in_rep = attr->in_rep;
df65a573 366 out_rep = attr->dests[0].rep;
f5f82476
OG
367
368 if (push)
369 vport = in_rep;
370 else if (pop)
371 vport = out_rep;
372 else
373 vport = in_rep;
374
375 return vport;
376}
377
378static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
379 bool push, bool pop, bool fwd)
380{
381 struct mlx5_eswitch_rep *in_rep, *out_rep;
382
383 if ((push || pop) && !fwd)
384 goto out_notsupp;
385
386 in_rep = attr->in_rep;
df65a573 387 out_rep = attr->dests[0].rep;
f5f82476 388
b05af6aa 389 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
390 goto out_notsupp;
391
b05af6aa 392 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
393 goto out_notsupp;
394
395 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
396 if (!push && !pop && fwd)
b05af6aa 397 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
398 goto out_notsupp;
399
400 /* protects against (1) setting rules with different vlans to push and
401 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
402 */
1482bd3d 403 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
404 goto out_notsupp;
405
406 return 0;
407
408out_notsupp:
9eb78923 409 return -EOPNOTSUPP;
f5f82476
OG
410}
411
412int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
413 struct mlx5_esw_flow_attr *attr)
414{
415 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
416 struct mlx5_eswitch_rep *vport = NULL;
417 bool push, pop, fwd;
418 int err = 0;
419
6acfbf38 420 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 421 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
422 return 0;
423
f5f82476
OG
424 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
425 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
426 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
427 !attr->dest_chain);
f5f82476 428
0e18134f
VB
429 mutex_lock(&esw->state_lock);
430
f5f82476
OG
431 err = esw_add_vlan_action_check(attr, push, pop, fwd);
432 if (err)
0e18134f 433 goto unlock;
f5f82476 434
39ac237c 435 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
436
437 vport = esw_vlan_action_get_vport(attr, push, pop);
438
439 if (!push && !pop && fwd) {
440 /* tracks VF --> wire rules without vlan push action */
b05af6aa 441 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476 442 vport->vlan_refcount++;
39ac237c 443 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
444 }
445
0e18134f 446 goto unlock;
f5f82476
OG
447 }
448
449 if (!push && !pop)
0e18134f 450 goto unlock;
f5f82476
OG
451
452 if (!(offloads->vlan_push_pop_refcount)) {
453 /* it's the 1st vlan rule, apply global vlan pop policy */
454 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
455 if (err)
456 goto out;
457 }
458 offloads->vlan_push_pop_refcount++;
459
460 if (push) {
461 if (vport->vlan_refcount)
462 goto skip_set_push;
463
1482bd3d 464 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
465 SET_VLAN_INSERT | SET_VLAN_STRIP);
466 if (err)
467 goto out;
1482bd3d 468 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
469skip_set_push:
470 vport->vlan_refcount++;
471 }
472out:
473 if (!err)
39ac237c 474 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
0e18134f
VB
475unlock:
476 mutex_unlock(&esw->state_lock);
f5f82476
OG
477 return err;
478}
479
480int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
481 struct mlx5_esw_flow_attr *attr)
482{
483 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
484 struct mlx5_eswitch_rep *vport = NULL;
485 bool push, pop, fwd;
486 int err = 0;
487
6acfbf38 488 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 489 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
490 return 0;
491
39ac237c 492 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
f5f82476
OG
493 return 0;
494
495 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
496 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
497 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
498
0e18134f
VB
499 mutex_lock(&esw->state_lock);
500
f5f82476
OG
501 vport = esw_vlan_action_get_vport(attr, push, pop);
502
503 if (!push && !pop && fwd) {
504 /* tracks VF --> wire rules without vlan push action */
b05af6aa 505 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
506 vport->vlan_refcount--;
507
0e18134f 508 goto out;
f5f82476
OG
509 }
510
511 if (push) {
512 vport->vlan_refcount--;
513 if (vport->vlan_refcount)
514 goto skip_unset_push;
515
516 vport->vlan = 0;
517 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
518 0, 0, SET_VLAN_STRIP);
519 if (err)
520 goto out;
521 }
522
523skip_unset_push:
524 offloads->vlan_push_pop_refcount--;
525 if (offloads->vlan_push_pop_refcount)
0e18134f 526 goto out;
f5f82476
OG
527
528 /* no more vlan rules, stop global vlan pop policy */
529 err = esw_set_global_vlan_pop(esw, 0);
530
531out:
0e18134f 532 mutex_unlock(&esw->state_lock);
f5f82476
OG
533 return err;
534}
535
f7a68945 536struct mlx5_flow_handle *
02f3afd9
PP
537mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
538 u32 sqn)
ab22be9b 539{
66958ed9 540 struct mlx5_flow_act flow_act = {0};
4c5009c5 541 struct mlx5_flow_destination dest = {};
74491de9 542 struct mlx5_flow_handle *flow_rule;
c5bb1730 543 struct mlx5_flow_spec *spec;
ab22be9b
OG
544 void *misc;
545
1b9a07ee 546 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 547 if (!spec) {
ab22be9b
OG
548 flow_rule = ERR_PTR(-ENOMEM);
549 goto out;
550 }
551
c5bb1730 552 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 553 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
554 /* source vport is the esw manager */
555 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 556
c5bb1730 557 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
558 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
559 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
560
c5bb1730 561 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 562 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 563 dest.vport.num = vport;
66958ed9 564 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 565
39ac237c
PB
566 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
567 spec, &flow_act, &dest, 1);
ab22be9b
OG
568 if (IS_ERR(flow_rule))
569 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
570out:
c5bb1730 571 kvfree(spec);
ab22be9b
OG
572 return flow_rule;
573}
57cbd893 574EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 575
159fe639
MB
576void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
577{
578 mlx5_del_flow_rules(rule);
579}
580
332bd3a5 581static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
582{
583 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
584 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
585 u8 fdb_to_vport_reg_c_id;
586 int err;
587
332bd3a5
PP
588 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
589 return 0;
c1286050 590
238302fa 591 err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
c1286050
JL
592 out, sizeof(out));
593 if (err)
594 return err;
595
596 fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
597 esw_vport_context.fdb_to_vport_reg_c_id);
598
332bd3a5
PP
599 if (enable)
600 fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
601 else
602 fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
c1286050
JL
603
604 MLX5_SET(modify_esw_vport_context_in, in,
605 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
606
607 MLX5_SET(modify_esw_vport_context_in, in,
608 field_select.fdb_to_vport_reg_c_id, 1);
609
238302fa 610 return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false,
c1286050
JL
611 in, sizeof(in));
612}
613
a5641cb5
JL
614static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
615 struct mlx5_core_dev *peer_dev,
ac004b83
RD
616 struct mlx5_flow_spec *spec,
617 struct mlx5_flow_destination *dest)
618{
a5641cb5 619 void *misc;
ac004b83 620
a5641cb5
JL
621 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
622 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
623 misc_parameters_2);
624 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
ac004b83 625
a5641cb5
JL
626 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
627 } else {
628 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
629 misc_parameters);
ac004b83 630
a5641cb5
JL
631 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
632 MLX5_CAP_GEN(peer_dev, vhca_id));
633
634 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
635
636 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
637 misc_parameters);
638 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
639 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
640 source_eswitch_owner_vhca_id);
641 }
ac004b83
RD
642
643 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 644 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 645 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 646 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
647}
648
a5641cb5
JL
649static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
650 struct mlx5_eswitch *peer_esw,
651 struct mlx5_flow_spec *spec,
652 u16 vport)
653{
654 void *misc;
655
656 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
657 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
658 misc_parameters_2);
659 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
660 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
661 vport));
662 } else {
663 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
664 misc_parameters);
665 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
666 }
667}
668
ac004b83
RD
669static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
670 struct mlx5_core_dev *peer_dev)
671{
672 struct mlx5_flow_destination dest = {};
673 struct mlx5_flow_act flow_act = {0};
674 struct mlx5_flow_handle **flows;
675 struct mlx5_flow_handle *flow;
676 struct mlx5_flow_spec *spec;
677 /* total vports is the same for both e-switches */
678 int nvports = esw->total_vports;
679 void *misc;
680 int err, i;
681
682 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
683 if (!spec)
684 return -ENOMEM;
685
a5641cb5 686 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
687
688 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
689 if (!flows) {
690 err = -ENOMEM;
691 goto alloc_flows_err;
692 }
693
694 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
695 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
696 misc_parameters);
697
81cd229c 698 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
699 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
700 spec, MLX5_VPORT_PF);
701
81cd229c
BW
702 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
703 spec, &flow_act, &dest, 1);
704 if (IS_ERR(flow)) {
705 err = PTR_ERR(flow);
706 goto add_pf_flow_err;
707 }
708 flows[MLX5_VPORT_PF] = flow;
709 }
710
711 if (mlx5_ecpf_vport_exists(esw->dev)) {
712 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
713 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
714 spec, &flow_act, &dest, 1);
715 if (IS_ERR(flow)) {
716 err = PTR_ERR(flow);
717 goto add_ecpf_flow_err;
718 }
719 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
720 }
721
786ef904 722 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
723 esw_set_peer_miss_rule_source_port(esw,
724 peer_dev->priv.eswitch,
725 spec, i);
726
ac004b83
RD
727 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
728 spec, &flow_act, &dest, 1);
729 if (IS_ERR(flow)) {
730 err = PTR_ERR(flow);
81cd229c 731 goto add_vf_flow_err;
ac004b83
RD
732 }
733 flows[i] = flow;
734 }
735
736 esw->fdb_table.offloads.peer_miss_rules = flows;
737
738 kvfree(spec);
739 return 0;
740
81cd229c 741add_vf_flow_err:
879c8f84 742 nvports = --i;
786ef904 743 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 744 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
745
746 if (mlx5_ecpf_vport_exists(esw->dev))
747 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
748add_ecpf_flow_err:
749 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
750 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
751add_pf_flow_err:
752 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
753 kvfree(flows);
754alloc_flows_err:
755 kvfree(spec);
756 return err;
757}
758
759static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
760{
761 struct mlx5_flow_handle **flows;
762 int i;
763
764 flows = esw->fdb_table.offloads.peer_miss_rules;
765
786ef904
PP
766 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
767 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
768 mlx5_del_flow_rules(flows[i]);
769
81cd229c
BW
770 if (mlx5_ecpf_vport_exists(esw->dev))
771 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
772
773 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
774 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
775
ac004b83
RD
776 kvfree(flows);
777}
778
3aa33572
OG
779static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
780{
66958ed9 781 struct mlx5_flow_act flow_act = {0};
4c5009c5 782 struct mlx5_flow_destination dest = {};
74491de9 783 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 784 struct mlx5_flow_spec *spec;
f80be543
MB
785 void *headers_c;
786 void *headers_v;
3aa33572 787 int err = 0;
f80be543
MB
788 u8 *dmac_c;
789 u8 *dmac_v;
3aa33572 790
1b9a07ee 791 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 792 if (!spec) {
3aa33572
OG
793 err = -ENOMEM;
794 goto out;
795 }
796
f80be543
MB
797 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
798 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
799 outer_headers);
800 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
801 outer_headers.dmac_47_16);
802 dmac_c[0] = 0x01;
803
3aa33572 804 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 805 dest.vport.num = esw->manager_vport;
66958ed9 806 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 807
39ac237c
PB
808 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
809 spec, &flow_act, &dest, 1);
3aa33572
OG
810 if (IS_ERR(flow_rule)) {
811 err = PTR_ERR(flow_rule);
f80be543 812 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
813 goto out;
814 }
815
f80be543
MB
816 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
817
818 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
819 outer_headers);
820 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
821 outer_headers.dmac_47_16);
822 dmac_v[0] = 0x01;
39ac237c
PB
823 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
824 spec, &flow_act, &dest, 1);
f80be543
MB
825 if (IS_ERR(flow_rule)) {
826 err = PTR_ERR(flow_rule);
827 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
828 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
829 goto out;
830 }
831
832 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
833
3aa33572 834out:
c5bb1730 835 kvfree(spec);
3aa33572
OG
836 return err;
837}
838
1967ce6e 839#define MAX_PF_SQ 256
cd3d07e7 840#define MAX_SQ_NVPORTS 32
1967ce6e 841
a5641cb5
JL
842static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
843 u32 *flow_group_in)
844{
845 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
846 flow_group_in,
847 match_criteria);
848
849 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
850 MLX5_SET(create_flow_group_in, flow_group_in,
851 match_criteria_enable,
852 MLX5_MATCH_MISC_PARAMETERS_2);
853
854 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
855 misc_parameters_2.metadata_reg_c_0);
856 } else {
857 MLX5_SET(create_flow_group_in, flow_group_in,
858 match_criteria_enable,
859 MLX5_MATCH_MISC_PARAMETERS);
860
861 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
862 misc_parameters.source_port);
863 }
864}
865
1967ce6e
OG
866static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
867{
868 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
869 struct mlx5_flow_table_attr ft_attr = {};
870 struct mlx5_core_dev *dev = esw->dev;
871 struct mlx5_flow_namespace *root_ns;
872 struct mlx5_flow_table *fdb = NULL;
39ac237c
PB
873 u32 flags = 0, *flow_group_in;
874 int table_size, ix, err = 0;
1967ce6e
OG
875 struct mlx5_flow_group *g;
876 void *match_criteria;
f80be543 877 u8 *dmac;
1967ce6e
OG
878
879 esw_debug(esw->dev, "Create offloads FDB Tables\n");
39ac237c 880
1b9a07ee 881 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
882 if (!flow_group_in)
883 return -ENOMEM;
884
885 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
886 if (!root_ns) {
887 esw_warn(dev, "Failed to get FDB flow namespace\n");
888 err = -EOPNOTSUPP;
889 goto ns_err;
890 }
8463daf1
MG
891 esw->fdb_table.offloads.ns = root_ns;
892 err = mlx5_flow_namespace_set_mode(root_ns,
893 esw->dev->priv.steering->mode);
894 if (err) {
895 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
896 goto ns_err;
897 }
1967ce6e 898
cd7e4186
BW
899 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
900 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 901
e52c2802
PB
902 /* create the slow path fdb with encap set, so further table instances
903 * can be created at run time while VFs are probed if the FW allows that.
904 */
905 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
906 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
907 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
908
909 ft_attr.flags = flags;
b3ba5149
ES
910 ft_attr.max_fte = table_size;
911 ft_attr.prio = FDB_SLOW_PATH;
912
913 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
914 if (IS_ERR(fdb)) {
915 err = PTR_ERR(fdb);
916 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
917 goto slow_fdb_err;
918 }
52fff327 919 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 920
39ac237c
PB
921 err = mlx5_esw_chains_create(esw);
922 if (err) {
923 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
924 goto fdb_chains_err;
e52c2802
PB
925 }
926
69697b6e 927 /* create send-to-vport group */
69697b6e
OG
928 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
929 MLX5_MATCH_MISC_PARAMETERS);
930
931 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
932
933 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
934 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
935
cd3d07e7 936 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
937 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
938 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
939
940 g = mlx5_create_flow_group(fdb, flow_group_in);
941 if (IS_ERR(g)) {
942 err = PTR_ERR(g);
943 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
944 goto send_vport_err;
945 }
946 esw->fdb_table.offloads.send_to_vport_grp = g;
947
ac004b83
RD
948 /* create peer esw miss group */
949 memset(flow_group_in, 0, inlen);
ac004b83 950
a5641cb5
JL
951 esw_set_flow_group_source_port(esw, flow_group_in);
952
953 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
954 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
955 flow_group_in,
956 match_criteria);
ac004b83 957
a5641cb5
JL
958 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
959 misc_parameters.source_eswitch_owner_vhca_id);
960
961 MLX5_SET(create_flow_group_in, flow_group_in,
962 source_eswitch_owner_vhca_id_valid, 1);
963 }
ac004b83 964
ac004b83
RD
965 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
966 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
967 ix + esw->total_vports - 1);
968 ix += esw->total_vports;
969
970 g = mlx5_create_flow_group(fdb, flow_group_in);
971 if (IS_ERR(g)) {
972 err = PTR_ERR(g);
973 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
974 goto peer_miss_err;
975 }
976 esw->fdb_table.offloads.peer_miss_grp = g;
977
69697b6e
OG
978 /* create miss group */
979 memset(flow_group_in, 0, inlen);
f80be543
MB
980 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
981 MLX5_MATCH_OUTER_HEADERS);
982 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
983 match_criteria);
984 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
985 outer_headers.dmac_47_16);
986 dmac[0] = 0x01;
69697b6e
OG
987
988 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
989 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
990 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
991
992 g = mlx5_create_flow_group(fdb, flow_group_in);
993 if (IS_ERR(g)) {
994 err = PTR_ERR(g);
995 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
996 goto miss_err;
997 }
998 esw->fdb_table.offloads.miss_grp = g;
999
3aa33572
OG
1000 err = esw_add_fdb_miss_rule(esw);
1001 if (err)
1002 goto miss_rule_err;
1003
e52c2802 1004 esw->nvports = nvports;
c88a026e 1005 kvfree(flow_group_in);
69697b6e
OG
1006 return 0;
1007
3aa33572
OG
1008miss_rule_err:
1009 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1010miss_err:
ac004b83
RD
1011 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1012peer_miss_err:
69697b6e
OG
1013 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1014send_vport_err:
39ac237c
PB
1015 mlx5_esw_chains_destroy(esw);
1016fdb_chains_err:
52fff327 1017 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1018slow_fdb_err:
8463daf1
MG
1019 /* Holds true only as long as DMFS is the default */
1020 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1021ns_err:
1022 kvfree(flow_group_in);
1023 return err;
1024}
1025
1967ce6e 1026static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1027{
e52c2802 1028 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1029 return;
1030
1967ce6e 1031 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1032 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1033 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1034 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1035 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1036 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1037
39ac237c 1038 mlx5_esw_chains_destroy(esw);
52fff327 1039 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
8463daf1
MG
1040 /* Holds true only as long as DMFS is the default */
1041 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1042 MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e 1043}
c116c6ee 1044
cd7e4186 1045static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1046{
b3ba5149 1047 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1048 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1049 struct mlx5_flow_table *ft_offloads;
1050 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1051 int err = 0;
1052
1053 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1054 if (!ns) {
1055 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1056 return -EOPNOTSUPP;
c116c6ee
OG
1057 }
1058
cd7e4186 1059 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
b3ba5149
ES
1060
1061 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1062 if (IS_ERR(ft_offloads)) {
1063 err = PTR_ERR(ft_offloads);
1064 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1065 return err;
1066 }
1067
1068 esw->offloads.ft_offloads = ft_offloads;
1069 return 0;
1070}
1071
1072static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1073{
1074 struct mlx5_esw_offload *offloads = &esw->offloads;
1075
1076 mlx5_destroy_flow_table(offloads->ft_offloads);
1077}
fed9ce22 1078
cd7e4186 1079static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1080{
1081 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1082 struct mlx5_flow_group *g;
fed9ce22 1083 u32 *flow_group_in;
fed9ce22 1084 int err = 0;
fed9ce22 1085
cd7e4186 1086 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1087 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1088 if (!flow_group_in)
1089 return -ENOMEM;
1090
1091 /* create vport rx group */
a5641cb5 1092 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1093
1094 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1095 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1096
1097 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1098
1099 if (IS_ERR(g)) {
1100 err = PTR_ERR(g);
1101 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1102 goto out;
1103 }
1104
1105 esw->offloads.vport_rx_group = g;
1106out:
e574978a 1107 kvfree(flow_group_in);
fed9ce22
OG
1108 return err;
1109}
1110
1111static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1112{
1113 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1114}
1115
74491de9 1116struct mlx5_flow_handle *
02f3afd9 1117mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1118 struct mlx5_flow_destination *dest)
fed9ce22 1119{
66958ed9 1120 struct mlx5_flow_act flow_act = {0};
74491de9 1121 struct mlx5_flow_handle *flow_rule;
c5bb1730 1122 struct mlx5_flow_spec *spec;
fed9ce22
OG
1123 void *misc;
1124
1b9a07ee 1125 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1126 if (!spec) {
fed9ce22
OG
1127 flow_rule = ERR_PTR(-ENOMEM);
1128 goto out;
1129 }
1130
a5641cb5
JL
1131 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1132 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1133 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1134 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1135
a5641cb5
JL
1136 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1137 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
fed9ce22 1138
a5641cb5
JL
1139 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1140 } else {
1141 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1142 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1143
1144 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1145 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1146
1147 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1148 }
fed9ce22 1149
66958ed9 1150 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1151 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1152 &flow_act, dest, 1);
fed9ce22
OG
1153 if (IS_ERR(flow_rule)) {
1154 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1155 goto out;
1156 }
1157
1158out:
c5bb1730 1159 kvfree(spec);
fed9ce22
OG
1160 return flow_rule;
1161}
feae9087 1162
db7ff19e
EB
1163static int esw_offloads_start(struct mlx5_eswitch *esw,
1164 struct netlink_ext_ack *extack)
c930a3ad 1165{
062f4bf4 1166 int err, err1;
c930a3ad 1167
f6455de0 1168 if (esw->mode != MLX5_ESWITCH_LEGACY &&
c96692fb 1169 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1170 NL_SET_ERR_MSG_MOD(extack,
1171 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1172 return -EINVAL;
1173 }
1174
3b83b6c2 1175 mlx5_eswitch_disable(esw, true);
062f4bf4
BW
1176 mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
1177 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
6c419ba8 1178 if (err) {
8c98ee77
EB
1179 NL_SET_ERR_MSG_MOD(extack,
1180 "Failed setting eswitch to offloads");
062f4bf4 1181 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
8c98ee77
EB
1182 if (err1) {
1183 NL_SET_ERR_MSG_MOD(extack,
1184 "Failed setting eswitch back to legacy");
1185 }
6c419ba8 1186 }
bffaa916
RD
1187 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1188 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
1189 &esw->offloads.inline_mode)) {
1190 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1191 NL_SET_ERR_MSG_MOD(extack,
1192 "Inline mode is different between vports");
bffaa916
RD
1193 }
1194 }
c930a3ad
OG
1195 return err;
1196}
1197
e8d31c4d
MB
1198void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1199{
1200 kfree(esw->offloads.vport_reps);
1201}
1202
1203int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1204{
2752b823 1205 int total_vports = esw->total_vports;
e8d31c4d 1206 struct mlx5_eswitch_rep *rep;
d6518db2 1207 int vport_index;
ef2e4094 1208 u8 rep_type;
e8d31c4d 1209
2aca1787 1210 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1211 sizeof(struct mlx5_eswitch_rep),
1212 GFP_KERNEL);
1213 if (!esw->offloads.vport_reps)
1214 return -ENOMEM;
1215
d6518db2
BW
1216 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1217 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 1218 rep->vport_index = vport_index;
f121e0ea
BW
1219
1220 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 1221 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 1222 REP_UNREGISTERED);
e8d31c4d
MB
1223 }
1224
e8d31c4d
MB
1225 return 0;
1226}
1227
c9b99abc
BW
1228static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1229 struct mlx5_eswitch_rep *rep, u8 rep_type)
1230{
8693115a 1231 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1232 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 1233 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
1234}
1235
29d9fd7d 1236static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1237{
1238 struct mlx5_eswitch_rep *rep;
c9b99abc 1239
81cd229c
BW
1240 if (mlx5_ecpf_vport_exists(esw->dev)) {
1241 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1242 __esw_offloads_unload_rep(esw, rep, rep_type);
1243 }
1244
1245 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1246 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1247 __esw_offloads_unload_rep(esw, rep, rep_type);
1248 }
1249
879c8f84 1250 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1251 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1252}
1253
29d9fd7d
BW
1254static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1255 u8 rep_type)
1256{
1257 struct mlx5_eswitch_rep *rep;
1258 int i;
1259
1260 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1261 __esw_offloads_unload_rep(esw, rep, rep_type);
1262}
1263
1264static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1265{
1266 u8 rep_type = NUM_REP_TYPES;
1267
1268 while (rep_type-- > 0)
1269 __unload_reps_vf_vport(esw, nvports, rep_type);
1270}
1271
062f4bf4 1272static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
29d9fd7d 1273{
062f4bf4 1274 __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
29d9fd7d
BW
1275
1276 /* Special vports must be the last to unload. */
1277 __unload_reps_special_vport(esw, rep_type);
1278}
1279
062f4bf4 1280static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
a4b97ab4
MB
1281{
1282 u8 rep_type = NUM_REP_TYPES;
1283
1284 while (rep_type-- > 0)
062f4bf4 1285 __unload_reps_all_vport(esw, rep_type);
a4b97ab4
MB
1286}
1287
c9b99abc
BW
1288static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1289 struct mlx5_eswitch_rep *rep, u8 rep_type)
1290{
f121e0ea
BW
1291 int err = 0;
1292
8693115a 1293 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1294 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
8693115a 1295 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
6f4e0219 1296 if (err)
8693115a 1297 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219
BW
1298 REP_REGISTERED);
1299 }
f121e0ea 1300
6f4e0219 1301 return err;
c9b99abc
BW
1302}
1303
29d9fd7d 1304static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
c930a3ad 1305{
cb67b832 1306 struct mlx5_eswitch_rep *rep;
c930a3ad
OG
1307 int err;
1308
879c8f84 1309 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1310 err = __esw_offloads_load_rep(esw, rep, rep_type);
81cd229c
BW
1311 if (err)
1312 return err;
1313
1314 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1315 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1316 err = __esw_offloads_load_rep(esw, rep, rep_type);
1317 if (err)
1318 goto err_pf;
1319 }
1320
1321 if (mlx5_ecpf_vport_exists(esw->dev)) {
1322 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1323 err = __esw_offloads_load_rep(esw, rep, rep_type);
1324 if (err)
1325 goto err_ecpf;
1326 }
1327
1328 return 0;
1329
1330err_ecpf:
1331 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1332 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1333 __esw_offloads_unload_rep(esw, rep, rep_type);
1334 }
1335
1336err_pf:
1337 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1338 __esw_offloads_unload_rep(esw, rep, rep_type);
29d9fd7d
BW
1339 return err;
1340}
6ed1803a 1341
29d9fd7d
BW
1342static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1343 u8 rep_type)
1344{
1345 struct mlx5_eswitch_rep *rep;
1346 int err, i;
1347
1348 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
c9b99abc 1349 err = __esw_offloads_load_rep(esw, rep, rep_type);
6ed1803a 1350 if (err)
29d9fd7d 1351 goto err_vf;
6ed1803a
MB
1352 }
1353
1354 return 0;
1355
29d9fd7d
BW
1356err_vf:
1357 __unload_reps_vf_vport(esw, --i, rep_type);
1358 return err;
1359}
1360
062f4bf4
BW
1361static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1362{
1363 int err;
1364
1365 /* Special vports must be loaded first, uplink rep creates mdev resource. */
1366 err = __load_reps_special_vport(esw, rep_type);
1367 if (err)
1368 return err;
1369
1370 err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
1371 if (err)
1372 goto err_vfs;
1373
1374 return 0;
1375
1376err_vfs:
1377 __unload_reps_special_vport(esw, rep_type);
1378 return err;
1379}
1380
29d9fd7d
BW
1381static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1382{
1383 u8 rep_type = 0;
1384 int err;
1385
1386 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1387 err = __load_reps_vf_vport(esw, nvports, rep_type);
1388 if (err)
1389 goto err_reps;
1390 }
1391
1392 return err;
1393
6ed1803a 1394err_reps:
29d9fd7d
BW
1395 while (rep_type-- > 0)
1396 __unload_reps_vf_vport(esw, nvports, rep_type);
1397 return err;
1398}
1399
062f4bf4 1400static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
a4b97ab4
MB
1401{
1402 u8 rep_type = 0;
1403 int err;
1404
1405 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
062f4bf4 1406 err = __load_reps_all_vport(esw, rep_type);
a4b97ab4
MB
1407 if (err)
1408 goto err_reps;
1409 }
1410
1411 return err;
1412
1413err_reps:
1414 while (rep_type-- > 0)
062f4bf4 1415 __unload_reps_all_vport(esw, rep_type);
6ed1803a
MB
1416 return err;
1417}
1418
ac004b83
RD
1419#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1420#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1421
1422static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1423 struct mlx5_eswitch *peer_esw)
1424{
1425 int err;
1426
1427 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1428 if (err)
1429 return err;
1430
1431 return 0;
1432}
1433
1434static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1435{
04de7dda 1436 mlx5e_tc_clean_fdb_peer_flows(esw);
ac004b83
RD
1437 esw_del_fdb_peer_miss_rules(esw);
1438}
1439
8463daf1
MG
1440static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1441 struct mlx5_eswitch *peer_esw,
1442 bool pair)
1443{
1444 struct mlx5_flow_root_namespace *peer_ns;
1445 struct mlx5_flow_root_namespace *ns;
1446 int err;
1447
1448 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1449 ns = esw->dev->priv.steering->fdb_root_ns;
1450
1451 if (pair) {
1452 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1453 if (err)
1454 return err;
1455
e53e6655 1456 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
1457 if (err) {
1458 mlx5_flow_namespace_set_peer(ns, NULL);
1459 return err;
1460 }
1461 } else {
1462 mlx5_flow_namespace_set_peer(ns, NULL);
1463 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1464 }
1465
1466 return 0;
1467}
1468
ac004b83
RD
1469static int mlx5_esw_offloads_devcom_event(int event,
1470 void *my_data,
1471 void *event_data)
1472{
1473 struct mlx5_eswitch *esw = my_data;
ac004b83 1474 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 1475 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
1476 int err;
1477
1478 switch (event) {
1479 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
1480 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1481 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1482 break;
1483
8463daf1 1484 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
1485 if (err)
1486 goto err_out;
8463daf1
MG
1487 err = mlx5_esw_offloads_pair(esw, peer_esw);
1488 if (err)
1489 goto err_peer;
ac004b83
RD
1490
1491 err = mlx5_esw_offloads_pair(peer_esw, esw);
1492 if (err)
1493 goto err_pair;
1494
1495 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1496 break;
1497
1498 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1499 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1500 break;
1501
1502 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1503 mlx5_esw_offloads_unpair(peer_esw);
1504 mlx5_esw_offloads_unpair(esw);
8463daf1 1505 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1506 break;
1507 }
1508
1509 return 0;
1510
1511err_pair:
1512 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
1513err_peer:
1514 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1515err_out:
1516 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1517 event, err);
1518 return err;
1519}
1520
1521static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1522{
1523 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1524
04de7dda
RD
1525 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1526 mutex_init(&esw->offloads.peer_mutex);
1527
ac004b83
RD
1528 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1529 return;
1530
1531 mlx5_devcom_register_component(devcom,
1532 MLX5_DEVCOM_ESW_OFFLOADS,
1533 mlx5_esw_offloads_devcom_event,
1534 esw);
1535
1536 mlx5_devcom_send_event(devcom,
1537 MLX5_DEVCOM_ESW_OFFLOADS,
1538 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1539}
1540
1541static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1542{
1543 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1544
1545 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1546 return;
1547
1548 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1549 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1550
1551 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1552}
1553
18486737
EB
1554static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1555 struct mlx5_vport *vport)
1556{
18486737
EB
1557 struct mlx5_flow_act flow_act = {0};
1558 struct mlx5_flow_spec *spec;
1559 int err = 0;
1560
1561 /* For prio tag mode, there is only 1 FTEs:
7445cfb1
JL
1562 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1563 * required, allow
18486737
EB
1564 * Unmatched traffic is allowed by default
1565 */
18486737 1566 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
b7826076
PP
1567 if (!spec)
1568 return -ENOMEM;
18486737
EB
1569
1570 /* Untagged packets - push prio tag VLAN, allow */
1571 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1572 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1573 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1574 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1575 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1576 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1577 flow_act.vlan[0].vid = 0;
1578 flow_act.vlan[0].prio = 0;
7445cfb1 1579
d68316b5 1580 if (vport->ingress.offloads.modify_metadata_rule) {
7445cfb1 1581 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
d68316b5 1582 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
7445cfb1
JL
1583 }
1584
18486737
EB
1585 vport->ingress.allow_rule =
1586 mlx5_add_flow_rules(vport->ingress.acl, spec,
1587 &flow_act, NULL, 0);
1588 if (IS_ERR(vport->ingress.allow_rule)) {
1589 err = PTR_ERR(vport->ingress.allow_rule);
1590 esw_warn(esw->dev,
1591 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1592 vport->vport, err);
1593 vport->ingress.allow_rule = NULL;
18486737
EB
1594 }
1595
18486737 1596 kvfree(spec);
18486737
EB
1597 return err;
1598}
1599
7445cfb1
JL
1600static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1601 struct mlx5_vport *vport)
1602{
1603 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
9446d17e 1604 static const struct mlx5_flow_spec spec = {};
7445cfb1 1605 struct mlx5_flow_act flow_act = {};
7445cfb1
JL
1606 int err = 0;
1607
1608 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1609 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1610 MLX5_SET(set_action_in, action, data,
1611 mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
1612
d68316b5 1613 vport->ingress.offloads.modify_metadata =
2b688ea5
MG
1614 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1615 1, action);
d68316b5
PP
1616 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
1617 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
7445cfb1
JL
1618 esw_warn(esw->dev,
1619 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1620 vport->vport, err);
1621 return err;
1622 }
1623
1624 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
d68316b5
PP
1625 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1626 vport->ingress.offloads.modify_metadata_rule =
1627 mlx5_add_flow_rules(vport->ingress.acl,
1628 &spec, &flow_act, NULL, 0);
1629 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
1630 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
7445cfb1
JL
1631 esw_warn(esw->dev,
1632 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1633 vport->vport, err);
b7826076 1634 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
d68316b5 1635 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1 1636 }
7445cfb1
JL
1637 return err;
1638}
1639
a962d7a6
PP
1640static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1641 struct mlx5_vport *vport)
7445cfb1 1642{
d68316b5
PP
1643 if (vport->ingress.offloads.modify_metadata_rule) {
1644 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
1645 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
7445cfb1 1646
d68316b5 1647 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1
JL
1648 }
1649}
1650
10652f39
PP
1651static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
1652 struct mlx5_vport *vport)
18486737 1653{
10652f39
PP
1654 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1655 struct mlx5_flow_group *g;
b7826076 1656 void *match_criteria;
10652f39 1657 u32 *flow_group_in;
b7826076 1658 u32 flow_index = 0;
10652f39 1659 int ret = 0;
18486737 1660
10652f39
PP
1661 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1662 if (!flow_group_in)
1663 return -ENOMEM;
18486737 1664
b7826076
PP
1665 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
1666 /* This group is to hold FTE to match untagged packets when prio_tag
1667 * is enabled.
1668 */
1669 memset(flow_group_in, 0, inlen);
18486737 1670
b7826076
PP
1671 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1672 flow_group_in, match_criteria);
1673 MLX5_SET(create_flow_group_in, flow_group_in,
1674 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1675 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1676 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1677 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1678
1679 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1680 if (IS_ERR(g)) {
1681 ret = PTR_ERR(g);
1682 esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
1683 vport->vport, ret);
1684 goto prio_tag_err;
1685 }
1686 vport->ingress.offloads.metadata_prio_tag_grp = g;
1687 flow_index++;
1688 }
1689
1690 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1691 /* This group holds an FTE with no matches for add metadata for
1692 * tagged packets, if prio-tag is enabled (as a fallthrough),
1693 * or all traffic in case prio-tag is disabled.
1694 */
1695 memset(flow_group_in, 0, inlen);
1696 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1697 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1698
1699 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1700 if (IS_ERR(g)) {
1701 ret = PTR_ERR(g);
1702 esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
1703 vport->vport, ret);
1704 goto metadata_err;
1705 }
1706 vport->ingress.offloads.metadata_allmatch_grp = g;
1707 }
1708
1709 kvfree(flow_group_in);
1710 return 0;
1711
1712metadata_err:
1713 if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
1714 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
1715 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
18486737 1716 }
b7826076 1717prio_tag_err:
10652f39
PP
1718 kvfree(flow_group_in);
1719 return ret;
1720}
18486737 1721
10652f39
PP
1722static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
1723{
b7826076
PP
1724 if (vport->ingress.offloads.metadata_allmatch_grp) {
1725 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
1726 vport->ingress.offloads.metadata_allmatch_grp = NULL;
1727 }
1728
1729 if (vport->ingress.offloads.metadata_prio_tag_grp) {
1730 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
1731 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
10652f39 1732 }
18486737
EB
1733}
1734
b1a3380a
VP
1735static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1736 struct mlx5_vport *vport)
18486737 1737{
b7826076 1738 int num_ftes = 0;
18486737
EB
1739 int err;
1740
7445cfb1 1741 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
b7826076 1742 !esw_check_ingress_prio_tag_enabled(esw, vport))
7445cfb1
JL
1743 return 0;
1744
1745 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076
PP
1746
1747 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
1748 num_ftes++;
1749 if (esw_check_ingress_prio_tag_enabled(esw, vport))
1750 num_ftes++;
1751
1752 err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
7445cfb1
JL
1753 if (err) {
1754 esw_warn(esw->dev,
1755 "failed to enable ingress acl (%d) on vport[%d]\n",
1756 err, vport->vport);
1757 return err;
1758 }
1759
10652f39
PP
1760 err = esw_vport_create_ingress_acl_group(esw, vport);
1761 if (err)
1762 goto group_err;
1763
7445cfb1
JL
1764 esw_debug(esw->dev,
1765 "vport[%d] configure ingress rules\n", vport->vport);
1766
1767 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1768 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
1769 if (err)
10652f39 1770 goto metadata_err;
7445cfb1
JL
1771 }
1772
b7826076 1773 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
786ef904 1774 err = esw_vport_ingress_prio_tag_config(esw, vport);
18486737 1775 if (err)
10652f39 1776 goto prio_tag_err;
7445cfb1 1777 }
10652f39 1778 return 0;
7445cfb1 1779
10652f39
PP
1780prio_tag_err:
1781 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
1782metadata_err:
10652f39
PP
1783 esw_vport_destroy_ingress_acl_group(vport);
1784group_err:
1785 esw_vport_destroy_ingress_acl_table(vport);
7445cfb1
JL
1786 return err;
1787}
1788
6d94e610
VP
1789static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1790 struct mlx5_vport *vport)
1791{
1792 int err;
1793
1794 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
1795 return 0;
1796
1797 esw_vport_cleanup_egress_rules(esw, vport);
1798
1799 err = esw_vport_enable_egress_acl(esw, vport);
1800 if (err)
1801 return err;
1802
fdde49e0
PP
1803 /* For prio tag mode, there is only 1 FTEs:
1804 * 1) prio tag packets - pop the prio tag VLAN, allow
1805 * Unmatched traffic is allowed by default
1806 */
1807 esw_debug(esw->dev,
1808 "vport[%d] configure prio tag egress rules\n", vport->vport);
6d94e610 1809
fdde49e0
PP
1810 /* prio tag vlan rule - pop it so VF receives untagged packets */
1811 err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
1812 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1813 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
7445cfb1 1814 if (err)
6d94e610
VP
1815 esw_vport_disable_egress_acl(esw, vport);
1816
7445cfb1
JL
1817 return err;
1818}
1819
92ab1eb3
JL
1820static bool
1821esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
1822{
1823 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
1824 return false;
1825
1826 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1827 MLX5_FDB_TO_VPORT_REG_C_0))
1828 return false;
1829
1830 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
1831 return false;
1832
1833 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
1834 mlx5_ecpf_vport_exists(esw->dev))
1835 return false;
1836
1837 return true;
1838}
1839
748da30b 1840int
89a0f1fb
PP
1841esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
1842 struct mlx5_vport *vport)
7445cfb1 1843{
7445cfb1
JL
1844 int err;
1845
89a0f1fb
PP
1846 err = esw_vport_ingress_config(esw, vport);
1847 if (err)
1848 return err;
7445cfb1 1849
89a0f1fb
PP
1850 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
1851 err = esw_vport_egress_config(esw, vport);
a962d7a6 1852 if (err) {
10652f39 1853 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076
PP
1854 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
1855 esw_vport_destroy_ingress_acl_group(vport);
10652f39 1856 esw_vport_destroy_ingress_acl_table(vport);
7445cfb1 1857 }
18486737 1858 }
89a0f1fb
PP
1859 return err;
1860}
18486737 1861
748da30b 1862void
89a0f1fb
PP
1863esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
1864 struct mlx5_vport *vport)
1865{
1866 esw_vport_disable_egress_acl(esw, vport);
10652f39 1867 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076 1868 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
10652f39
PP
1869 esw_vport_destroy_ingress_acl_group(vport);
1870 esw_vport_destroy_ingress_acl_table(vport);
89a0f1fb 1871}
7445cfb1 1872
748da30b 1873static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
1874{
1875 struct mlx5_vport *vport;
7445cfb1 1876 int err;
18486737 1877
92ab1eb3
JL
1878 if (esw_check_vport_match_metadata_supported(esw))
1879 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737 1880
748da30b
VP
1881 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
1882 err = esw_vport_create_offloads_acl_tables(esw, vport);
1883 if (err)
1884 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
1885 return err;
1886}
1887
748da30b 1888static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 1889{
786ef904 1890 struct mlx5_vport *vport;
7445cfb1 1891
748da30b
VP
1892 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
1893 esw_vport_destroy_offloads_acl_tables(esw, vport);
7445cfb1 1894 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
1895}
1896
062f4bf4 1897static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 1898{
062f4bf4
BW
1899 int num_vfs = esw->esw_funcs.num_vfs;
1900 int total_vports;
6ed1803a
MB
1901 int err;
1902
062f4bf4
BW
1903 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
1904 total_vports = esw->total_vports;
1905 else
1906 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
1907
5c1d260e 1908 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
e52c2802 1909
748da30b 1910 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1
JL
1911 if (err)
1912 return err;
18486737 1913
062f4bf4 1914 err = esw_create_offloads_fdb_tables(esw, total_vports);
c930a3ad 1915 if (err)
7445cfb1 1916 goto create_fdb_err;
c930a3ad 1917
062f4bf4 1918 err = esw_create_offloads_table(esw, total_vports);
c930a3ad
OG
1919 if (err)
1920 goto create_ft_err;
1921
062f4bf4 1922 err = esw_create_vport_rx_group(esw, total_vports);
c930a3ad
OG
1923 if (err)
1924 goto create_fg_err;
1925
1926 return 0;
1927
1928create_fg_err:
1929 esw_destroy_offloads_table(esw);
1930
1931create_ft_err:
1967ce6e 1932 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 1933
7445cfb1 1934create_fdb_err:
748da30b 1935 esw_destroy_uplink_offloads_acl_tables(esw);
7445cfb1 1936
c930a3ad
OG
1937 return err;
1938}
1939
eca8cc38
BW
1940static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
1941{
1942 esw_destroy_vport_rx_group(esw);
1943 esw_destroy_offloads_table(esw);
1944 esw_destroy_offloads_fdb_tables(esw);
748da30b 1945 esw_destroy_uplink_offloads_acl_tables(esw);
eca8cc38
BW
1946}
1947
7e736f9a
PP
1948static void
1949esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 1950{
5ccf2770 1951 bool host_pf_disabled;
7e736f9a 1952 u16 new_num_vfs;
a3888f33 1953
7e736f9a
PP
1954 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
1955 host_params_context.host_num_of_vfs);
5ccf2770
BW
1956 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
1957 host_params_context.host_pf_disabled);
a3888f33 1958
7e736f9a
PP
1959 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
1960 return;
a3888f33
BW
1961
1962 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929
VP
1963 if (esw->esw_funcs.num_vfs > 0) {
1964 esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
a3888f33 1965 } else {
7e736f9a 1966 int err;
a3888f33 1967
7e736f9a 1968 err = esw_offloads_load_vf_reps(esw, new_num_vfs);
a3888f33 1969 if (err)
7e736f9a 1970 return;
a3888f33 1971 }
7e736f9a 1972 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
1973}
1974
7e736f9a 1975static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 1976{
7e736f9a
PP
1977 struct mlx5_host_work *host_work;
1978 struct mlx5_eswitch *esw;
dd28087c 1979 const u32 *out;
ac35dcd6 1980
7e736f9a
PP
1981 host_work = container_of(work, struct mlx5_host_work, work);
1982 esw = host_work->esw;
a3888f33 1983
dd28087c
PP
1984 out = mlx5_esw_query_functions(esw->dev);
1985 if (IS_ERR(out))
7e736f9a 1986 goto out;
a3888f33 1987
7e736f9a 1988 esw_vfs_changed_event_handler(esw, out);
dd28087c 1989 kvfree(out);
a3888f33 1990out:
ac35dcd6
VP
1991 kfree(host_work);
1992}
1993
16fff98a 1994int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 1995{
cd56f929 1996 struct mlx5_esw_functions *esw_funcs;
a3888f33 1997 struct mlx5_host_work *host_work;
a3888f33
BW
1998 struct mlx5_eswitch *esw;
1999
2000 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2001 if (!host_work)
2002 return NOTIFY_DONE;
2003
cd56f929
VP
2004 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2005 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2006
2007 host_work->esw = esw;
2008
062f4bf4 2009 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2010 queue_work(esw->work_queue, &host_work->work);
2011
2012 return NOTIFY_OK;
2013}
2014
5896b972 2015int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38 2016{
3b83b6c2
DL
2017 struct mlx5_vport *vport;
2018 int err, i;
eca8cc38 2019
9a64144d
MG
2020 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2021 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2022 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2023 else
2024 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2025
8463daf1 2026 mlx5_rdma_enable_roce(esw->dev);
062f4bf4 2027 err = esw_offloads_steering_init(esw);
eca8cc38 2028 if (err)
8463daf1 2029 goto err_steering_init;
eca8cc38 2030
332bd3a5
PP
2031 err = esw_set_passing_vport_metadata(esw, true);
2032 if (err)
2033 goto err_vport_metadata;
c1286050 2034
3b83b6c2
DL
2035 /* Representor will control the vport link state */
2036 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2037 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2038
925a6acc
PP
2039 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
2040 if (err)
2041 goto err_vports;
c1286050 2042
062f4bf4 2043 err = esw_offloads_load_all_reps(esw);
eca8cc38
BW
2044 if (err)
2045 goto err_reps;
2046
2047 esw_offloads_devcom_init(esw);
10caabda 2048 mutex_init(&esw->offloads.termtbl_mutex);
a3888f33 2049
eca8cc38
BW
2050 return 0;
2051
2052err_reps:
5896b972 2053 mlx5_eswitch_disable_pf_vf_vports(esw);
925a6acc 2054err_vports:
332bd3a5 2055 esw_set_passing_vport_metadata(esw, false);
c1286050 2056err_vport_metadata:
eca8cc38 2057 esw_offloads_steering_cleanup(esw);
8463daf1
MG
2058err_steering_init:
2059 mlx5_rdma_disable_roce(esw->dev);
eca8cc38
BW
2060 return err;
2061}
2062
db7ff19e
EB
2063static int esw_offloads_stop(struct mlx5_eswitch *esw,
2064 struct netlink_ext_ack *extack)
c930a3ad 2065{
062f4bf4 2066 int err, err1;
c930a3ad 2067
3b83b6c2 2068 mlx5_eswitch_disable(esw, true);
062f4bf4 2069 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
6c419ba8 2070 if (err) {
8c98ee77 2071 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
062f4bf4 2072 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
8c98ee77
EB
2073 if (err1) {
2074 NL_SET_ERR_MSG_MOD(extack,
2075 "Failed setting eswitch back to offloads");
2076 }
6c419ba8 2077 }
c930a3ad
OG
2078
2079 return err;
2080}
2081
5896b972 2082void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 2083{
ac004b83 2084 esw_offloads_devcom_cleanup(esw);
062f4bf4 2085 esw_offloads_unload_all_reps(esw);
5896b972 2086 mlx5_eswitch_disable_pf_vf_vports(esw);
332bd3a5 2087 esw_set_passing_vport_metadata(esw, false);
eca8cc38 2088 esw_offloads_steering_cleanup(esw);
8463daf1 2089 mlx5_rdma_disable_roce(esw->dev);
9a64144d 2090 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2091}
2092
ef78618b 2093static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2094{
2095 switch (mode) {
2096 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2097 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2098 break;
2099 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2100 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2101 break;
2102 default:
2103 return -EINVAL;
2104 }
2105
2106 return 0;
2107}
2108
ef78618b
OG
2109static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2110{
2111 switch (mlx5_mode) {
f6455de0 2112 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2113 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2114 break;
f6455de0 2115 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2116 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2117 break;
2118 default:
2119 return -EINVAL;
2120 }
2121
2122 return 0;
2123}
2124
bffaa916
RD
2125static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2126{
2127 switch (mode) {
2128 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2129 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2130 break;
2131 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2132 *mlx5_mode = MLX5_INLINE_MODE_L2;
2133 break;
2134 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2135 *mlx5_mode = MLX5_INLINE_MODE_IP;
2136 break;
2137 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2138 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2139 break;
2140 default:
2141 return -EINVAL;
2142 }
2143
2144 return 0;
2145}
2146
2147static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2148{
2149 switch (mlx5_mode) {
2150 case MLX5_INLINE_MODE_NONE:
2151 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2152 break;
2153 case MLX5_INLINE_MODE_L2:
2154 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2155 break;
2156 case MLX5_INLINE_MODE_IP:
2157 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2158 break;
2159 case MLX5_INLINE_MODE_TCP_UDP:
2160 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2161 break;
2162 default:
2163 return -EINVAL;
2164 }
2165
2166 return 0;
2167}
2168
9d1cef19 2169static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 2170{
9d1cef19 2171 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 2172
9d1cef19
OG
2173 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2174 return -EOPNOTSUPP;
c930a3ad 2175
733d3e54
OG
2176 if(!MLX5_ESWITCH_MANAGER(dev))
2177 return -EPERM;
c930a3ad 2178
f6455de0 2179 if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
c96692fb 2180 !mlx5_core_is_ecpf_esw_manager(dev))
c930a3ad
OG
2181 return -EOPNOTSUPP;
2182
9d1cef19
OG
2183 return 0;
2184}
2185
db7ff19e
EB
2186int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2187 struct netlink_ext_ack *extack)
9d1cef19
OG
2188{
2189 struct mlx5_core_dev *dev = devlink_priv(devlink);
2190 u16 cur_mlx5_mode, mlx5_mode = 0;
2191 int err;
2192
2193 err = mlx5_devlink_eswitch_check(devlink);
2194 if (err)
2195 return err;
2196
2197 cur_mlx5_mode = dev->priv.eswitch->mode;
2198
ef78618b 2199 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2200 return -EINVAL;
2201
2202 if (cur_mlx5_mode == mlx5_mode)
2203 return 0;
2204
2205 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 2206 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 2207 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 2208 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
2209 else
2210 return -EINVAL;
feae9087
OG
2211}
2212
2213int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2214{
9d1cef19
OG
2215 struct mlx5_core_dev *dev = devlink_priv(devlink);
2216 int err;
c930a3ad 2217
9d1cef19
OG
2218 err = mlx5_devlink_eswitch_check(devlink);
2219 if (err)
2220 return err;
c930a3ad 2221
ef78618b 2222 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 2223}
127ea380 2224
db7ff19e
EB
2225int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2226 struct netlink_ext_ack *extack)
bffaa916
RD
2227{
2228 struct mlx5_core_dev *dev = devlink_priv(devlink);
2229 struct mlx5_eswitch *esw = dev->priv.eswitch;
db68cc56 2230 int err, vport, num_vport;
bffaa916
RD
2231 u8 mlx5_mode;
2232
9d1cef19
OG
2233 err = mlx5_devlink_eswitch_check(devlink);
2234 if (err)
2235 return err;
bffaa916 2236
c415f704
OG
2237 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2238 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2239 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2240 return 0;
2241 /* fall through */
2242 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2243 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 2244 return -EOPNOTSUPP;
c415f704
OG
2245 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2246 break;
2247 }
bffaa916 2248
525e84be 2249 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2250 NL_SET_ERR_MSG_MOD(extack,
2251 "Can't set inline mode when flows are configured");
375f51e2
RD
2252 return -EOPNOTSUPP;
2253 }
2254
bffaa916
RD
2255 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2256 if (err)
2257 goto out;
2258
411ec9e0 2259 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2260 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2261 if (err) {
8c98ee77
EB
2262 NL_SET_ERR_MSG_MOD(extack,
2263 "Failed to set min inline on vport");
bffaa916
RD
2264 goto revert_inline_mode;
2265 }
2266 }
2267
2268 esw->offloads.inline_mode = mlx5_mode;
2269 return 0;
2270
2271revert_inline_mode:
db68cc56 2272 num_vport = --vport;
411ec9e0 2273 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
2274 mlx5_modify_nic_vport_min_inline(dev,
2275 vport,
2276 esw->offloads.inline_mode);
2277out:
2278 return err;
2279}
2280
2281int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2282{
2283 struct mlx5_core_dev *dev = devlink_priv(devlink);
2284 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2285 int err;
bffaa916 2286
9d1cef19
OG
2287 err = mlx5_devlink_eswitch_check(devlink);
2288 if (err)
2289 return err;
bffaa916 2290
bffaa916
RD
2291 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2292}
2293
062f4bf4 2294int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
bffaa916 2295{
c415f704 2296 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
2297 struct mlx5_core_dev *dev = esw->dev;
2298 int vport;
bffaa916
RD
2299
2300 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2301 return -EOPNOTSUPP;
2302
f6455de0 2303 if (esw->mode == MLX5_ESWITCH_NONE)
bffaa916
RD
2304 return -EOPNOTSUPP;
2305
c415f704
OG
2306 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2307 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2308 mlx5_mode = MLX5_INLINE_MODE_NONE;
2309 goto out;
2310 case MLX5_CAP_INLINE_MODE_L2:
2311 mlx5_mode = MLX5_INLINE_MODE_L2;
2312 goto out;
2313 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2314 goto query_vports;
2315 }
bffaa916 2316
c415f704 2317query_vports:
411ec9e0
BW
2318 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2319 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916 2320 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
411ec9e0 2321 if (prev_mlx5_mode != mlx5_mode)
bffaa916
RD
2322 return -EINVAL;
2323 prev_mlx5_mode = mlx5_mode;
2324 }
2325
c415f704 2326out:
bffaa916
RD
2327 *mode = mlx5_mode;
2328 return 0;
2329}
2330
98fdbea5
LR
2331int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2332 enum devlink_eswitch_encap_mode encap,
db7ff19e 2333 struct netlink_ext_ack *extack)
7768d197
RD
2334{
2335 struct mlx5_core_dev *dev = devlink_priv(devlink);
2336 struct mlx5_eswitch *esw = dev->priv.eswitch;
2337 int err;
2338
9d1cef19
OG
2339 err = mlx5_devlink_eswitch_check(devlink);
2340 if (err)
2341 return err;
7768d197
RD
2342
2343 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2344 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
2345 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2346 return -EOPNOTSUPP;
2347
2348 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2349 return -EOPNOTSUPP;
2350
f6455de0 2351 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197
RD
2352 esw->offloads.encap = encap;
2353 return 0;
2354 }
2355
2356 if (esw->offloads.encap == encap)
2357 return 0;
2358
525e84be 2359 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2360 NL_SET_ERR_MSG_MOD(extack,
2361 "Can't set encapsulation when flows are configured");
7768d197
RD
2362 return -EOPNOTSUPP;
2363 }
2364
e52c2802 2365 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2366
2367 esw->offloads.encap = encap;
e52c2802
PB
2368
2369 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2370
7768d197 2371 if (err) {
8c98ee77
EB
2372 NL_SET_ERR_MSG_MOD(extack,
2373 "Failed re-creating fast FDB table");
7768d197 2374 esw->offloads.encap = !encap;
e52c2802 2375 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2376 }
e52c2802 2377
7768d197
RD
2378 return err;
2379}
2380
98fdbea5
LR
2381int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2382 enum devlink_eswitch_encap_mode *encap)
7768d197
RD
2383{
2384 struct mlx5_core_dev *dev = devlink_priv(devlink);
2385 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2386 int err;
7768d197 2387
9d1cef19
OG
2388 err = mlx5_devlink_eswitch_check(devlink);
2389 if (err)
2390 return err;
7768d197
RD
2391
2392 *encap = esw->offloads.encap;
2393 return 0;
2394}
2395
f8e8fa02 2396void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 2397 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 2398 u8 rep_type)
127ea380 2399{
8693115a 2400 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
2401 struct mlx5_eswitch_rep *rep;
2402 int i;
9deb2241 2403
8693115a 2404 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 2405 mlx5_esw_for_all_reps(esw, i, rep) {
8693115a
PP
2406 rep_data = &rep->rep_data[rep_type];
2407 atomic_set(&rep_data->state, REP_REGISTERED);
f8e8fa02 2408 }
127ea380 2409}
f8e8fa02 2410EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2411
f8e8fa02 2412void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2413{
cb67b832 2414 struct mlx5_eswitch_rep *rep;
f8e8fa02 2415 int i;
cb67b832 2416
f6455de0 2417 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 2418 __unload_reps_all_vport(esw, rep_type);
127ea380 2419
f8e8fa02 2420 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 2421 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 2422}
f8e8fa02 2423EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2424
a4b97ab4 2425void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2426{
726293f1
HHZ
2427 struct mlx5_eswitch_rep *rep;
2428
879c8f84 2429 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 2430 return rep->rep_data[rep_type].priv;
726293f1 2431}
22215908
MB
2432
2433void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 2434 u16 vport,
22215908
MB
2435 u8 rep_type)
2436{
22215908
MB
2437 struct mlx5_eswitch_rep *rep;
2438
879c8f84 2439 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2440
8693115a
PP
2441 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2442 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2443 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
2444 return NULL;
2445}
57cbd893 2446EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2447
2448void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2449{
879c8f84 2450 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2451}
57cbd893
MB
2452EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2453
2454struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 2455 u16 vport)
57cbd893 2456{
879c8f84 2457 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2458}
2459EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
2460
2461bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2462{
2463 return vport_num >= MLX5_VPORT_FIRST_VF &&
2464 vport_num <= esw->dev->priv.sriov.max_vfs;
2465}
7445cfb1
JL
2466
2467bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2468{
2469 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2470}
2471EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2472
2473u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
2474 u16 vport_num)
2475{
2476 return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
2477}
2478EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);