]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
net/mlx5: DR, use non preemptible call to get the current cpu number
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
80f09dfc 40#include "rdma.h"
e52c2802
PB
41#include "en.h"
42#include "fs_core.h"
ac004b83 43#include "lib/devcom.h"
a3888f33 44#include "lib/eq.h"
69697b6e 45
cd7e4186
BW
46/* There are two match-all miss flows, one for unicast dst mac and
47 * one for multicast.
48 */
49#define MLX5_ESW_MISS_FLOWS (2)
50
e52c2802
PB
51#define fdb_prio_table(esw, chain, prio, level) \
52 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
53
c9b99abc
BW
54#define UPLINK_REP_INDEX 0
55
879c8f84
BW
56static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
57 u16 vport_num)
58{
02f3afd9 59 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
60
61 WARN_ON(idx > esw->total_vports - 1);
62 return &esw->offloads.vport_reps[idx];
63}
64
e52c2802
PB
65static struct mlx5_flow_table *
66esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
67static void
68esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
69
70bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
71{
72 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
73}
74
75u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
76{
77 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
2cf2954b 78 return FDB_TC_MAX_CHAIN;
e52c2802
PB
79
80 return 0;
81}
82
83u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
84{
85 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
2cf2954b 86 return FDB_TC_MAX_PRIO;
e52c2802 87
bf07aa73 88 return 1;
e52c2802
PB
89}
90
b7826076
PP
91static bool
92esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
93 const struct mlx5_vport *vport)
94{
95 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
96 mlx5_eswitch_is_vf_vport(esw, vport->vport));
97}
98
c01cfd0f
JL
99static void
100mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
101 struct mlx5_flow_spec *spec,
102 struct mlx5_esw_flow_attr *attr)
103{
104 void *misc2;
105 void *misc;
106
107 /* Use metadata matching because vport is not represented by single
108 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
109 */
110 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
111 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
112 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
113 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
114 attr->in_rep->vport));
115
116 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
117 MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
118
119 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
120 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
121 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
122 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
123 } else {
124 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
125 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
126
127 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
128 MLX5_SET(fte_match_set_misc, misc,
129 source_eswitch_owner_vhca_id,
130 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
131
132 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
133 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
134 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
135 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
136 source_eswitch_owner_vhca_id);
137
138 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
139 }
140
141 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
142 attr->in_rep->vport == MLX5_VPORT_UPLINK)
143 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
144}
145
74491de9 146struct mlx5_flow_handle *
3d80d1a2
OG
147mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
148 struct mlx5_flow_spec *spec,
776b12b6 149 struct mlx5_esw_flow_attr *attr)
3d80d1a2 150{
592d3651 151 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 152 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 153 bool split = !!(attr->split_count);
74491de9 154 struct mlx5_flow_handle *rule;
e52c2802 155 struct mlx5_flow_table *fdb;
592d3651 156 int j, i = 0;
3d80d1a2 157
f6455de0 158 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
159 return ERR_PTR(-EOPNOTSUPP);
160
6acfbf38
OG
161 flow_act.action = attr->action;
162 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 163 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
164 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
165 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
166 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
167 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
168 flow_act.vlan[0].vid = attr->vlan_vid[0];
169 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
170 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
171 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
172 flow_act.vlan[1].vid = attr->vlan_vid[1];
173 flow_act.vlan[1].prio = attr->vlan_prio[1];
174 }
6acfbf38 175 }
776b12b6 176
66958ed9 177 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e52c2802
PB
178 if (attr->dest_chain) {
179 struct mlx5_flow_table *ft;
180
181 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
182 if (IS_ERR(ft)) {
183 rule = ERR_CAST(ft);
184 goto err_create_goto_table;
185 }
186
187 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
188 dest[i].ft = ft;
592d3651 189 i++;
e52c2802 190 } else {
e85e02ba 191 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 192 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 193 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 194 dest[i].vport.vhca_id =
df65a573 195 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
196 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
197 dest[i].vport.flags |=
198 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
199 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
200 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2b688ea5 201 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
a18e879d 202 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5
MG
203 dest[i].vport.pkt_reformat =
204 attr->dests[j].pkt_reformat;
f493f155 205 }
e52c2802
PB
206 i++;
207 }
56e858df 208 }
e37a79e5 209 }
66958ed9 210 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 211 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 212 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 213 i++;
3d80d1a2
OG
214 }
215
c01cfd0f 216 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
3d80d1a2 217
93b3586e 218 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 219 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
220 if (attr->inner_match_level != MLX5_MATCH_NONE)
221 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 222
aa24670e 223 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 224 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 225
e85e02ba 226 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
227 if (IS_ERR(fdb)) {
228 rule = ERR_CAST(fdb);
229 goto err_esw_get;
230 }
231
10caabda
OS
232 if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
233 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
234 &flow_act, dest, i);
235 else
236 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 237 if (IS_ERR(rule))
e52c2802 238 goto err_add_rule;
375f51e2 239 else
525e84be 240 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 241
e52c2802
PB
242 return rule;
243
244err_add_rule:
e85e02ba 245 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
246err_esw_get:
247 if (attr->dest_chain)
248 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
249err_create_goto_table:
aa0cbbae 250 return rule;
3d80d1a2
OG
251}
252
e4ad91f2
CM
253struct mlx5_flow_handle *
254mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
255 struct mlx5_flow_spec *spec,
256 struct mlx5_esw_flow_attr *attr)
257{
258 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 259 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
260 struct mlx5_flow_table *fast_fdb;
261 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 262 struct mlx5_flow_handle *rule;
e4ad91f2
CM
263 int i;
264
e52c2802
PB
265 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
266 if (IS_ERR(fast_fdb)) {
267 rule = ERR_CAST(fast_fdb);
268 goto err_get_fast;
269 }
270
271 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
272 if (IS_ERR(fwd_fdb)) {
273 rule = ERR_CAST(fwd_fdb);
274 goto err_get_fwd;
275 }
276
e4ad91f2 277 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 278 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 279 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 280 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 281 dest[i].vport.vhca_id =
df65a573 282 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
283 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
284 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
285 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
286 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5 287 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
1cc26d74 288 }
e4ad91f2
CM
289 }
290 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 291 dest[i].ft = fwd_fdb,
e4ad91f2
CM
292 i++;
293
c01cfd0f 294 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
e4ad91f2 295
93b3586e 296 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 297 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 298
e52c2802 299 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 300
e52c2802
PB
301 if (IS_ERR(rule))
302 goto add_err;
e4ad91f2 303
525e84be 304 atomic64_inc(&esw->offloads.num_flows);
e52c2802
PB
305
306 return rule;
307add_err:
308 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
309err_get_fwd:
310 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
311err_get_fast:
e4ad91f2
CM
312 return rule;
313}
314
e52c2802
PB
315static void
316__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
317 struct mlx5_flow_handle *rule,
318 struct mlx5_esw_flow_attr *attr,
319 bool fwd_rule)
320{
e85e02ba 321 bool split = (attr->split_count > 0);
10caabda 322 int i;
e52c2802
PB
323
324 mlx5_del_flow_rules(rule);
10caabda
OS
325
326 /* unref the term table */
327 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
328 if (attr->dests[i].termtbl)
329 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
330 }
331
525e84be 332 atomic64_dec(&esw->offloads.num_flows);
e52c2802
PB
333
334 if (fwd_rule) {
335 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
336 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
337 } else {
e85e02ba 338 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
339 if (attr->dest_chain)
340 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
341 }
342}
343
d85cdccb
OG
344void
345mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
346 struct mlx5_flow_handle *rule,
347 struct mlx5_esw_flow_attr *attr)
348{
e52c2802 349 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
350}
351
48265006
OG
352void
353mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
354 struct mlx5_flow_handle *rule,
355 struct mlx5_esw_flow_attr *attr)
356{
e52c2802 357 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
358}
359
f5f82476
OG
360static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
361{
362 struct mlx5_eswitch_rep *rep;
411ec9e0 363 int i, err = 0;
f5f82476
OG
364
365 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 366 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 367 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
368 continue;
369
370 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
371 if (err)
372 goto out;
373 }
374
375out:
376 return err;
377}
378
379static struct mlx5_eswitch_rep *
380esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
381{
382 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
383
384 in_rep = attr->in_rep;
df65a573 385 out_rep = attr->dests[0].rep;
f5f82476
OG
386
387 if (push)
388 vport = in_rep;
389 else if (pop)
390 vport = out_rep;
391 else
392 vport = in_rep;
393
394 return vport;
395}
396
397static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
398 bool push, bool pop, bool fwd)
399{
400 struct mlx5_eswitch_rep *in_rep, *out_rep;
401
402 if ((push || pop) && !fwd)
403 goto out_notsupp;
404
405 in_rep = attr->in_rep;
df65a573 406 out_rep = attr->dests[0].rep;
f5f82476 407
b05af6aa 408 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
409 goto out_notsupp;
410
b05af6aa 411 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
412 goto out_notsupp;
413
414 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
415 if (!push && !pop && fwd)
b05af6aa 416 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
417 goto out_notsupp;
418
419 /* protects against (1) setting rules with different vlans to push and
420 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
421 */
1482bd3d 422 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
423 goto out_notsupp;
424
425 return 0;
426
427out_notsupp:
9eb78923 428 return -EOPNOTSUPP;
f5f82476
OG
429}
430
431int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
432 struct mlx5_esw_flow_attr *attr)
433{
434 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
435 struct mlx5_eswitch_rep *vport = NULL;
436 bool push, pop, fwd;
437 int err = 0;
438
6acfbf38 439 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 440 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
441 return 0;
442
f5f82476
OG
443 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
444 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
445 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
446 !attr->dest_chain);
f5f82476 447
0e18134f
VB
448 mutex_lock(&esw->state_lock);
449
f5f82476
OG
450 err = esw_add_vlan_action_check(attr, push, pop, fwd);
451 if (err)
0e18134f 452 goto unlock;
f5f82476
OG
453
454 attr->vlan_handled = false;
455
456 vport = esw_vlan_action_get_vport(attr, push, pop);
457
458 if (!push && !pop && fwd) {
459 /* tracks VF --> wire rules without vlan push action */
b05af6aa 460 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476
OG
461 vport->vlan_refcount++;
462 attr->vlan_handled = true;
463 }
464
0e18134f 465 goto unlock;
f5f82476
OG
466 }
467
468 if (!push && !pop)
0e18134f 469 goto unlock;
f5f82476
OG
470
471 if (!(offloads->vlan_push_pop_refcount)) {
472 /* it's the 1st vlan rule, apply global vlan pop policy */
473 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
474 if (err)
475 goto out;
476 }
477 offloads->vlan_push_pop_refcount++;
478
479 if (push) {
480 if (vport->vlan_refcount)
481 goto skip_set_push;
482
1482bd3d 483 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
484 SET_VLAN_INSERT | SET_VLAN_STRIP);
485 if (err)
486 goto out;
1482bd3d 487 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
488skip_set_push:
489 vport->vlan_refcount++;
490 }
491out:
492 if (!err)
493 attr->vlan_handled = true;
0e18134f
VB
494unlock:
495 mutex_unlock(&esw->state_lock);
f5f82476
OG
496 return err;
497}
498
499int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
500 struct mlx5_esw_flow_attr *attr)
501{
502 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
503 struct mlx5_eswitch_rep *vport = NULL;
504 bool push, pop, fwd;
505 int err = 0;
506
6acfbf38 507 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 508 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
509 return 0;
510
f5f82476
OG
511 if (!attr->vlan_handled)
512 return 0;
513
514 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
515 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
516 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
517
0e18134f
VB
518 mutex_lock(&esw->state_lock);
519
f5f82476
OG
520 vport = esw_vlan_action_get_vport(attr, push, pop);
521
522 if (!push && !pop && fwd) {
523 /* tracks VF --> wire rules without vlan push action */
b05af6aa 524 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
525 vport->vlan_refcount--;
526
0e18134f 527 goto out;
f5f82476
OG
528 }
529
530 if (push) {
531 vport->vlan_refcount--;
532 if (vport->vlan_refcount)
533 goto skip_unset_push;
534
535 vport->vlan = 0;
536 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
537 0, 0, SET_VLAN_STRIP);
538 if (err)
539 goto out;
540 }
541
542skip_unset_push:
543 offloads->vlan_push_pop_refcount--;
544 if (offloads->vlan_push_pop_refcount)
0e18134f 545 goto out;
f5f82476
OG
546
547 /* no more vlan rules, stop global vlan pop policy */
548 err = esw_set_global_vlan_pop(esw, 0);
549
550out:
0e18134f 551 mutex_unlock(&esw->state_lock);
f5f82476
OG
552 return err;
553}
554
f7a68945 555struct mlx5_flow_handle *
02f3afd9
PP
556mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
557 u32 sqn)
ab22be9b 558{
66958ed9 559 struct mlx5_flow_act flow_act = {0};
4c5009c5 560 struct mlx5_flow_destination dest = {};
74491de9 561 struct mlx5_flow_handle *flow_rule;
c5bb1730 562 struct mlx5_flow_spec *spec;
ab22be9b
OG
563 void *misc;
564
1b9a07ee 565 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 566 if (!spec) {
ab22be9b
OG
567 flow_rule = ERR_PTR(-ENOMEM);
568 goto out;
569 }
570
c5bb1730 571 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 572 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
573 /* source vport is the esw manager */
574 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 575
c5bb1730 576 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
577 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
578 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
579
c5bb1730 580 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 581 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 582 dest.vport.num = vport;
66958ed9 583 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 584
52fff327 585 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 586 &flow_act, &dest, 1);
ab22be9b
OG
587 if (IS_ERR(flow_rule))
588 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
589out:
c5bb1730 590 kvfree(spec);
ab22be9b
OG
591 return flow_rule;
592}
57cbd893 593EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 594
159fe639
MB
595void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
596{
597 mlx5_del_flow_rules(rule);
598}
599
332bd3a5 600static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
601{
602 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
603 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
604 u8 fdb_to_vport_reg_c_id;
605 int err;
606
332bd3a5
PP
607 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
608 return 0;
c1286050 609
238302fa 610 err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
c1286050
JL
611 out, sizeof(out));
612 if (err)
613 return err;
614
615 fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
616 esw_vport_context.fdb_to_vport_reg_c_id);
617
332bd3a5
PP
618 if (enable)
619 fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
620 else
621 fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
c1286050
JL
622
623 MLX5_SET(modify_esw_vport_context_in, in,
624 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
625
626 MLX5_SET(modify_esw_vport_context_in, in,
627 field_select.fdb_to_vport_reg_c_id, 1);
628
238302fa 629 return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false,
c1286050
JL
630 in, sizeof(in));
631}
632
a5641cb5
JL
633static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
634 struct mlx5_core_dev *peer_dev,
ac004b83
RD
635 struct mlx5_flow_spec *spec,
636 struct mlx5_flow_destination *dest)
637{
a5641cb5 638 void *misc;
ac004b83 639
a5641cb5
JL
640 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
641 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
642 misc_parameters_2);
643 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
ac004b83 644
a5641cb5
JL
645 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
646 } else {
647 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
648 misc_parameters);
ac004b83 649
a5641cb5
JL
650 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
651 MLX5_CAP_GEN(peer_dev, vhca_id));
652
653 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
654
655 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
656 misc_parameters);
657 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
658 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
659 source_eswitch_owner_vhca_id);
660 }
ac004b83
RD
661
662 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 663 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 664 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 665 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
666}
667
a5641cb5
JL
668static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
669 struct mlx5_eswitch *peer_esw,
670 struct mlx5_flow_spec *spec,
671 u16 vport)
672{
673 void *misc;
674
675 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
676 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
677 misc_parameters_2);
678 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
679 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
680 vport));
681 } else {
682 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
683 misc_parameters);
684 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
685 }
686}
687
ac004b83
RD
688static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
689 struct mlx5_core_dev *peer_dev)
690{
691 struct mlx5_flow_destination dest = {};
692 struct mlx5_flow_act flow_act = {0};
693 struct mlx5_flow_handle **flows;
694 struct mlx5_flow_handle *flow;
695 struct mlx5_flow_spec *spec;
696 /* total vports is the same for both e-switches */
697 int nvports = esw->total_vports;
698 void *misc;
699 int err, i;
700
701 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
702 if (!spec)
703 return -ENOMEM;
704
a5641cb5 705 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
706
707 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
708 if (!flows) {
709 err = -ENOMEM;
710 goto alloc_flows_err;
711 }
712
713 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
714 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
715 misc_parameters);
716
81cd229c 717 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
718 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
719 spec, MLX5_VPORT_PF);
720
81cd229c
BW
721 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
722 spec, &flow_act, &dest, 1);
723 if (IS_ERR(flow)) {
724 err = PTR_ERR(flow);
725 goto add_pf_flow_err;
726 }
727 flows[MLX5_VPORT_PF] = flow;
728 }
729
730 if (mlx5_ecpf_vport_exists(esw->dev)) {
731 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
732 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
733 spec, &flow_act, &dest, 1);
734 if (IS_ERR(flow)) {
735 err = PTR_ERR(flow);
736 goto add_ecpf_flow_err;
737 }
738 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
739 }
740
786ef904 741 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
742 esw_set_peer_miss_rule_source_port(esw,
743 peer_dev->priv.eswitch,
744 spec, i);
745
ac004b83
RD
746 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
747 spec, &flow_act, &dest, 1);
748 if (IS_ERR(flow)) {
749 err = PTR_ERR(flow);
81cd229c 750 goto add_vf_flow_err;
ac004b83
RD
751 }
752 flows[i] = flow;
753 }
754
755 esw->fdb_table.offloads.peer_miss_rules = flows;
756
757 kvfree(spec);
758 return 0;
759
81cd229c 760add_vf_flow_err:
879c8f84 761 nvports = --i;
786ef904 762 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 763 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
764
765 if (mlx5_ecpf_vport_exists(esw->dev))
766 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
767add_ecpf_flow_err:
768 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
769 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
770add_pf_flow_err:
771 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
772 kvfree(flows);
773alloc_flows_err:
774 kvfree(spec);
775 return err;
776}
777
778static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
779{
780 struct mlx5_flow_handle **flows;
781 int i;
782
783 flows = esw->fdb_table.offloads.peer_miss_rules;
784
786ef904
PP
785 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
786 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
787 mlx5_del_flow_rules(flows[i]);
788
81cd229c
BW
789 if (mlx5_ecpf_vport_exists(esw->dev))
790 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
791
792 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
793 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
794
ac004b83
RD
795 kvfree(flows);
796}
797
3aa33572
OG
798static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
799{
66958ed9 800 struct mlx5_flow_act flow_act = {0};
4c5009c5 801 struct mlx5_flow_destination dest = {};
74491de9 802 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 803 struct mlx5_flow_spec *spec;
f80be543
MB
804 void *headers_c;
805 void *headers_v;
3aa33572 806 int err = 0;
f80be543
MB
807 u8 *dmac_c;
808 u8 *dmac_v;
3aa33572 809
1b9a07ee 810 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 811 if (!spec) {
3aa33572
OG
812 err = -ENOMEM;
813 goto out;
814 }
815
f80be543
MB
816 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
817 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
818 outer_headers);
819 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
820 outer_headers.dmac_47_16);
821 dmac_c[0] = 0x01;
822
3aa33572 823 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 824 dest.vport.num = esw->manager_vport;
66958ed9 825 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 826
52fff327 827 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 828 &flow_act, &dest, 1);
3aa33572
OG
829 if (IS_ERR(flow_rule)) {
830 err = PTR_ERR(flow_rule);
f80be543 831 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
832 goto out;
833 }
834
f80be543
MB
835 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
836
837 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
838 outer_headers);
839 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
840 outer_headers.dmac_47_16);
841 dmac_v[0] = 0x01;
52fff327 842 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
843 &flow_act, &dest, 1);
844 if (IS_ERR(flow_rule)) {
845 err = PTR_ERR(flow_rule);
846 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
847 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
848 goto out;
849 }
850
851 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
852
3aa33572 853out:
c5bb1730 854 kvfree(spec);
3aa33572
OG
855 return err;
856}
857
1033665e 858#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 859
e52c2802
PB
860/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
861 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
862 * for each flow table pool. We can allocate up to 16M of each pool,
863 * and we keep track of how much we used via put/get_sz_to_pool.
864 * Firmware doesn't report any of this for now.
865 * ESW_POOL is expected to be sorted from large to small
866 */
867#define ESW_SIZE (16 * 1024 * 1024)
868const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
93b8a7ec 869 64 * 1024, 128 };
e52c2802
PB
870
871static int
872get_sz_from_pool(struct mlx5_eswitch *esw)
873{
874 int sz = 0, i;
875
876 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
877 if (esw->fdb_table.offloads.fdb_left[i]) {
878 --esw->fdb_table.offloads.fdb_left[i];
879 sz = ESW_POOLS[i];
880 break;
881 }
882 }
883
884 return sz;
885}
886
887static void
888put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
889{
890 int i;
891
892 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
893 if (sz >= ESW_POOLS[i]) {
894 ++esw->fdb_table.offloads.fdb_left[i];
895 break;
896 }
897 }
898}
899
900static struct mlx5_flow_table *
901create_next_size_table(struct mlx5_eswitch *esw,
902 struct mlx5_flow_namespace *ns,
903 u16 table_prio,
904 int level,
905 u32 flags)
906{
907 struct mlx5_flow_table *fdb;
908 int sz;
909
910 sz = get_sz_from_pool(esw);
911 if (!sz)
912 return ERR_PTR(-ENOSPC);
913
914 fdb = mlx5_create_auto_grouped_flow_table(ns,
915 table_prio,
916 sz,
917 ESW_OFFLOADS_NUM_GROUPS,
918 level,
919 flags);
920 if (IS_ERR(fdb)) {
921 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
922 (int)PTR_ERR(fdb), table_prio, level, sz);
923 put_sz_to_pool(esw, sz);
924 }
925
926 return fdb;
927}
928
929static struct mlx5_flow_table *
930esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
69697b6e 931{
69697b6e 932 struct mlx5_core_dev *dev = esw->dev;
69697b6e 933 struct mlx5_flow_table *fdb = NULL;
e52c2802
PB
934 struct mlx5_flow_namespace *ns;
935 int table_prio, l = 0;
bbd00f7e 936 u32 flags = 0;
69697b6e 937
2cf2954b 938 if (chain == FDB_TC_SLOW_PATH_CHAIN)
c92a0b94
PB
939 return esw->fdb_table.offloads.slow_fdb;
940
e52c2802 941 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
264d7bf3 942
e52c2802
PB
943 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
944 if (fdb) {
945 /* take ref on earlier levels as well */
946 while (level >= 0)
947 fdb_prio_table(esw, chain, prio, level--).num_rules++;
948 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
949 return fdb;
950 }
69697b6e 951
e52c2802
PB
952 ns = mlx5_get_fdb_sub_ns(dev, chain);
953 if (!ns) {
954 esw_warn(dev, "Failed to get FDB sub namespace\n");
955 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
956 return ERR_PTR(-EOPNOTSUPP);
957 }
a842dd04 958
7768d197 959 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
60786f09 960 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
61444b45 961 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
bbd00f7e 962
34b13cb3 963 table_prio = prio - 1;
69697b6e 964
e52c2802
PB
965 /* create earlier levels for correct fs_core lookup when
966 * connecting tables
967 */
968 for (l = 0; l <= level; l++) {
969 if (fdb_prio_table(esw, chain, prio, l).fdb) {
970 fdb_prio_table(esw, chain, prio, l).num_rules++;
971 continue;
972 }
a842dd04 973
e52c2802
PB
974 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
975 if (IS_ERR(fdb)) {
976 l--;
977 goto err_create_fdb;
978 }
979
980 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
981 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
a842dd04 982 }
a842dd04 983
e52c2802
PB
984 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
985 return fdb;
a842dd04 986
e52c2802
PB
987err_create_fdb:
988 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
989 if (l >= 0)
990 esw_put_prio_table(esw, chain, prio, l);
991
992 return fdb;
1967ce6e
OG
993}
994
e52c2802
PB
995static void
996esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
1967ce6e 997{
e52c2802
PB
998 int l;
999
2cf2954b 1000 if (chain == FDB_TC_SLOW_PATH_CHAIN)
c92a0b94
PB
1001 return;
1002
e52c2802
PB
1003 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
1004
1005 for (l = level; l >= 0; l--) {
1006 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
1007 continue;
1008
1009 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
1010 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
1011 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
1012 }
1013
1014 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
1015}
1016
1017static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
1018{
1019 /* If lazy creation isn't supported, deref the fast path tables */
1020 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
1021 esw_put_prio_table(esw, 0, 1, 1);
1022 esw_put_prio_table(esw, 0, 1, 0);
1023 }
1967ce6e
OG
1024}
1025
1026#define MAX_PF_SQ 256
cd3d07e7 1027#define MAX_SQ_NVPORTS 32
1967ce6e 1028
a5641cb5
JL
1029static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1030 u32 *flow_group_in)
1031{
1032 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1033 flow_group_in,
1034 match_criteria);
1035
1036 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1037 MLX5_SET(create_flow_group_in, flow_group_in,
1038 match_criteria_enable,
1039 MLX5_MATCH_MISC_PARAMETERS_2);
1040
1041 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1042 misc_parameters_2.metadata_reg_c_0);
1043 } else {
1044 MLX5_SET(create_flow_group_in, flow_group_in,
1045 match_criteria_enable,
1046 MLX5_MATCH_MISC_PARAMETERS);
1047
1048 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1049 misc_parameters.source_port);
1050 }
1051}
1052
1967ce6e
OG
1053static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1054{
1055 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1056 struct mlx5_flow_table_attr ft_attr = {};
1057 struct mlx5_core_dev *dev = esw->dev;
e52c2802 1058 u32 *flow_group_in, max_flow_counter;
1967ce6e
OG
1059 struct mlx5_flow_namespace *root_ns;
1060 struct mlx5_flow_table *fdb = NULL;
e52c2802 1061 int table_size, ix, err = 0, i;
1967ce6e 1062 struct mlx5_flow_group *g;
e52c2802 1063 u32 flags = 0, fdb_max;
1967ce6e 1064 void *match_criteria;
f80be543 1065 u8 *dmac;
1967ce6e
OG
1066
1067 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 1068 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1069 if (!flow_group_in)
1070 return -ENOMEM;
1071
1072 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1073 if (!root_ns) {
1074 esw_warn(dev, "Failed to get FDB flow namespace\n");
1075 err = -EOPNOTSUPP;
1076 goto ns_err;
1077 }
8463daf1
MG
1078 esw->fdb_table.offloads.ns = root_ns;
1079 err = mlx5_flow_namespace_set_mode(root_ns,
1080 esw->dev->priv.steering->mode);
1081 if (err) {
1082 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1083 goto ns_err;
1084 }
1967ce6e 1085
e52c2802
PB
1086 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
1087 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
1088 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1089
f382b0df 1090 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n",
e52c2802
PB
1091 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
1092 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
1093 fdb_max);
1094
1095 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
1096 esw->fdb_table.offloads.fdb_left[i] =
1097 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1967ce6e 1098
cd7e4186
BW
1099 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1100 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 1101
e52c2802
PB
1102 /* create the slow path fdb with encap set, so further table instances
1103 * can be created at run time while VFs are probed if the FW allows that.
1104 */
1105 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1106 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1107 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1108
1109 ft_attr.flags = flags;
b3ba5149
ES
1110 ft_attr.max_fte = table_size;
1111 ft_attr.prio = FDB_SLOW_PATH;
1112
1113 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1114 if (IS_ERR(fdb)) {
1115 err = PTR_ERR(fdb);
1116 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1117 goto slow_fdb_err;
1118 }
52fff327 1119 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1120
e52c2802
PB
1121 /* If lazy creation isn't supported, open the fast path tables now */
1122 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
1123 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1124 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1125 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
1126 esw_get_prio_table(esw, 0, 1, 0);
1127 esw_get_prio_table(esw, 0, 1, 1);
1128 } else {
1129 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
1130 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1131 }
1132
69697b6e 1133 /* create send-to-vport group */
69697b6e
OG
1134 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1135 MLX5_MATCH_MISC_PARAMETERS);
1136
1137 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1138
1139 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1140 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1141
cd3d07e7 1142 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1143 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1144 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1145
1146 g = mlx5_create_flow_group(fdb, flow_group_in);
1147 if (IS_ERR(g)) {
1148 err = PTR_ERR(g);
1149 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1150 goto send_vport_err;
1151 }
1152 esw->fdb_table.offloads.send_to_vport_grp = g;
1153
ac004b83
RD
1154 /* create peer esw miss group */
1155 memset(flow_group_in, 0, inlen);
ac004b83 1156
a5641cb5
JL
1157 esw_set_flow_group_source_port(esw, flow_group_in);
1158
1159 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1160 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1161 flow_group_in,
1162 match_criteria);
ac004b83 1163
a5641cb5
JL
1164 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1165 misc_parameters.source_eswitch_owner_vhca_id);
1166
1167 MLX5_SET(create_flow_group_in, flow_group_in,
1168 source_eswitch_owner_vhca_id_valid, 1);
1169 }
ac004b83 1170
ac004b83
RD
1171 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1172 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1173 ix + esw->total_vports - 1);
1174 ix += esw->total_vports;
1175
1176 g = mlx5_create_flow_group(fdb, flow_group_in);
1177 if (IS_ERR(g)) {
1178 err = PTR_ERR(g);
1179 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1180 goto peer_miss_err;
1181 }
1182 esw->fdb_table.offloads.peer_miss_grp = g;
1183
69697b6e
OG
1184 /* create miss group */
1185 memset(flow_group_in, 0, inlen);
f80be543
MB
1186 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1187 MLX5_MATCH_OUTER_HEADERS);
1188 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1189 match_criteria);
1190 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1191 outer_headers.dmac_47_16);
1192 dmac[0] = 0x01;
69697b6e
OG
1193
1194 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1195 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1196 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1197
1198 g = mlx5_create_flow_group(fdb, flow_group_in);
1199 if (IS_ERR(g)) {
1200 err = PTR_ERR(g);
1201 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1202 goto miss_err;
1203 }
1204 esw->fdb_table.offloads.miss_grp = g;
1205
3aa33572
OG
1206 err = esw_add_fdb_miss_rule(esw);
1207 if (err)
1208 goto miss_rule_err;
1209
e52c2802 1210 esw->nvports = nvports;
c88a026e 1211 kvfree(flow_group_in);
69697b6e
OG
1212 return 0;
1213
3aa33572
OG
1214miss_rule_err:
1215 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1216miss_err:
ac004b83
RD
1217 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1218peer_miss_err:
69697b6e
OG
1219 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1220send_vport_err:
e52c2802 1221 esw_destroy_offloads_fast_fdb_tables(esw);
52fff327 1222 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1223slow_fdb_err:
8463daf1
MG
1224 /* Holds true only as long as DMFS is the default */
1225 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1226ns_err:
1227 kvfree(flow_group_in);
1228 return err;
1229}
1230
1967ce6e 1231static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1232{
e52c2802 1233 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1234 return;
1235
1967ce6e 1236 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1237 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1238 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1239 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1240 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1241 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1242
52fff327 1243 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
e52c2802 1244 esw_destroy_offloads_fast_fdb_tables(esw);
8463daf1
MG
1245 /* Holds true only as long as DMFS is the default */
1246 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1247 MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e 1248}
c116c6ee 1249
cd7e4186 1250static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1251{
b3ba5149 1252 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1253 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1254 struct mlx5_flow_table *ft_offloads;
1255 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1256 int err = 0;
1257
1258 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1259 if (!ns) {
1260 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1261 return -EOPNOTSUPP;
c116c6ee
OG
1262 }
1263
cd7e4186 1264 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
b3ba5149
ES
1265
1266 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1267 if (IS_ERR(ft_offloads)) {
1268 err = PTR_ERR(ft_offloads);
1269 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1270 return err;
1271 }
1272
1273 esw->offloads.ft_offloads = ft_offloads;
1274 return 0;
1275}
1276
1277static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1278{
1279 struct mlx5_esw_offload *offloads = &esw->offloads;
1280
1281 mlx5_destroy_flow_table(offloads->ft_offloads);
1282}
fed9ce22 1283
cd7e4186 1284static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1285{
1286 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1287 struct mlx5_flow_group *g;
fed9ce22 1288 u32 *flow_group_in;
fed9ce22 1289 int err = 0;
fed9ce22 1290
cd7e4186 1291 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1292 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1293 if (!flow_group_in)
1294 return -ENOMEM;
1295
1296 /* create vport rx group */
a5641cb5 1297 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1298
1299 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1300 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1301
1302 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1303
1304 if (IS_ERR(g)) {
1305 err = PTR_ERR(g);
1306 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1307 goto out;
1308 }
1309
1310 esw->offloads.vport_rx_group = g;
1311out:
e574978a 1312 kvfree(flow_group_in);
fed9ce22
OG
1313 return err;
1314}
1315
1316static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1317{
1318 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1319}
1320
74491de9 1321struct mlx5_flow_handle *
02f3afd9 1322mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1323 struct mlx5_flow_destination *dest)
fed9ce22 1324{
66958ed9 1325 struct mlx5_flow_act flow_act = {0};
74491de9 1326 struct mlx5_flow_handle *flow_rule;
c5bb1730 1327 struct mlx5_flow_spec *spec;
fed9ce22
OG
1328 void *misc;
1329
1b9a07ee 1330 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1331 if (!spec) {
fed9ce22
OG
1332 flow_rule = ERR_PTR(-ENOMEM);
1333 goto out;
1334 }
1335
a5641cb5
JL
1336 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1337 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1338 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1339 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1340
a5641cb5
JL
1341 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1342 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
fed9ce22 1343
a5641cb5
JL
1344 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1345 } else {
1346 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1347 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1348
1349 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1350 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1351
1352 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1353 }
fed9ce22 1354
66958ed9 1355 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1356 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1357 &flow_act, dest, 1);
fed9ce22
OG
1358 if (IS_ERR(flow_rule)) {
1359 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1360 goto out;
1361 }
1362
1363out:
c5bb1730 1364 kvfree(spec);
fed9ce22
OG
1365 return flow_rule;
1366}
feae9087 1367
db7ff19e
EB
1368static int esw_offloads_start(struct mlx5_eswitch *esw,
1369 struct netlink_ext_ack *extack)
c930a3ad 1370{
062f4bf4 1371 int err, err1;
c930a3ad 1372
f6455de0 1373 if (esw->mode != MLX5_ESWITCH_LEGACY &&
c96692fb 1374 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1375 NL_SET_ERR_MSG_MOD(extack,
1376 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1377 return -EINVAL;
1378 }
1379
556b9d16 1380 mlx5_eswitch_disable(esw, false);
062f4bf4
BW
1381 mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
1382 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
6c419ba8 1383 if (err) {
8c98ee77
EB
1384 NL_SET_ERR_MSG_MOD(extack,
1385 "Failed setting eswitch to offloads");
062f4bf4 1386 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
8c98ee77
EB
1387 if (err1) {
1388 NL_SET_ERR_MSG_MOD(extack,
1389 "Failed setting eswitch back to legacy");
1390 }
6c419ba8 1391 }
bffaa916
RD
1392 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1393 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
1394 &esw->offloads.inline_mode)) {
1395 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1396 NL_SET_ERR_MSG_MOD(extack,
1397 "Inline mode is different between vports");
bffaa916
RD
1398 }
1399 }
c930a3ad
OG
1400 return err;
1401}
1402
e8d31c4d
MB
1403void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1404{
1405 kfree(esw->offloads.vport_reps);
1406}
1407
1408int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1409{
2752b823 1410 int total_vports = esw->total_vports;
e8d31c4d 1411 struct mlx5_eswitch_rep *rep;
d6518db2 1412 int vport_index;
ef2e4094 1413 u8 rep_type;
e8d31c4d 1414
2aca1787 1415 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1416 sizeof(struct mlx5_eswitch_rep),
1417 GFP_KERNEL);
1418 if (!esw->offloads.vport_reps)
1419 return -ENOMEM;
1420
d6518db2
BW
1421 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1422 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 1423 rep->vport_index = vport_index;
f121e0ea
BW
1424
1425 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 1426 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 1427 REP_UNREGISTERED);
e8d31c4d
MB
1428 }
1429
e8d31c4d
MB
1430 return 0;
1431}
1432
c9b99abc
BW
1433static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1434 struct mlx5_eswitch_rep *rep, u8 rep_type)
1435{
8693115a 1436 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1437 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 1438 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
1439}
1440
29d9fd7d 1441static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1442{
1443 struct mlx5_eswitch_rep *rep;
c9b99abc 1444
81cd229c
BW
1445 if (mlx5_ecpf_vport_exists(esw->dev)) {
1446 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1447 __esw_offloads_unload_rep(esw, rep, rep_type);
1448 }
1449
1450 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1451 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1452 __esw_offloads_unload_rep(esw, rep, rep_type);
1453 }
1454
879c8f84 1455 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1456 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1457}
1458
29d9fd7d
BW
1459static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1460 u8 rep_type)
1461{
1462 struct mlx5_eswitch_rep *rep;
1463 int i;
1464
1465 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1466 __esw_offloads_unload_rep(esw, rep, rep_type);
1467}
1468
1469static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1470{
1471 u8 rep_type = NUM_REP_TYPES;
1472
1473 while (rep_type-- > 0)
1474 __unload_reps_vf_vport(esw, nvports, rep_type);
1475}
1476
062f4bf4 1477static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
29d9fd7d 1478{
062f4bf4 1479 __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
29d9fd7d
BW
1480
1481 /* Special vports must be the last to unload. */
1482 __unload_reps_special_vport(esw, rep_type);
1483}
1484
062f4bf4 1485static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
a4b97ab4
MB
1486{
1487 u8 rep_type = NUM_REP_TYPES;
1488
1489 while (rep_type-- > 0)
062f4bf4 1490 __unload_reps_all_vport(esw, rep_type);
a4b97ab4
MB
1491}
1492
c9b99abc
BW
1493static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1494 struct mlx5_eswitch_rep *rep, u8 rep_type)
1495{
f121e0ea
BW
1496 int err = 0;
1497
8693115a 1498 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1499 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
8693115a 1500 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
6f4e0219 1501 if (err)
8693115a 1502 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219
BW
1503 REP_REGISTERED);
1504 }
f121e0ea 1505
6f4e0219 1506 return err;
c9b99abc
BW
1507}
1508
29d9fd7d 1509static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
c930a3ad 1510{
cb67b832 1511 struct mlx5_eswitch_rep *rep;
c930a3ad
OG
1512 int err;
1513
879c8f84 1514 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1515 err = __esw_offloads_load_rep(esw, rep, rep_type);
81cd229c
BW
1516 if (err)
1517 return err;
1518
1519 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1520 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1521 err = __esw_offloads_load_rep(esw, rep, rep_type);
1522 if (err)
1523 goto err_pf;
1524 }
1525
1526 if (mlx5_ecpf_vport_exists(esw->dev)) {
1527 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1528 err = __esw_offloads_load_rep(esw, rep, rep_type);
1529 if (err)
1530 goto err_ecpf;
1531 }
1532
1533 return 0;
1534
1535err_ecpf:
1536 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1537 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1538 __esw_offloads_unload_rep(esw, rep, rep_type);
1539 }
1540
1541err_pf:
1542 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1543 __esw_offloads_unload_rep(esw, rep, rep_type);
29d9fd7d
BW
1544 return err;
1545}
6ed1803a 1546
29d9fd7d
BW
1547static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1548 u8 rep_type)
1549{
1550 struct mlx5_eswitch_rep *rep;
1551 int err, i;
1552
1553 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
c9b99abc 1554 err = __esw_offloads_load_rep(esw, rep, rep_type);
6ed1803a 1555 if (err)
29d9fd7d 1556 goto err_vf;
6ed1803a
MB
1557 }
1558
1559 return 0;
1560
29d9fd7d
BW
1561err_vf:
1562 __unload_reps_vf_vport(esw, --i, rep_type);
1563 return err;
1564}
1565
062f4bf4
BW
1566static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1567{
1568 int err;
1569
1570 /* Special vports must be loaded first, uplink rep creates mdev resource. */
1571 err = __load_reps_special_vport(esw, rep_type);
1572 if (err)
1573 return err;
1574
1575 err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
1576 if (err)
1577 goto err_vfs;
1578
1579 return 0;
1580
1581err_vfs:
1582 __unload_reps_special_vport(esw, rep_type);
1583 return err;
1584}
1585
29d9fd7d
BW
1586static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1587{
1588 u8 rep_type = 0;
1589 int err;
1590
1591 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1592 err = __load_reps_vf_vport(esw, nvports, rep_type);
1593 if (err)
1594 goto err_reps;
1595 }
1596
1597 return err;
1598
6ed1803a 1599err_reps:
29d9fd7d
BW
1600 while (rep_type-- > 0)
1601 __unload_reps_vf_vport(esw, nvports, rep_type);
1602 return err;
1603}
1604
062f4bf4 1605static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
a4b97ab4
MB
1606{
1607 u8 rep_type = 0;
1608 int err;
1609
1610 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
062f4bf4 1611 err = __load_reps_all_vport(esw, rep_type);
a4b97ab4
MB
1612 if (err)
1613 goto err_reps;
1614 }
1615
1616 return err;
1617
1618err_reps:
1619 while (rep_type-- > 0)
062f4bf4 1620 __unload_reps_all_vport(esw, rep_type);
6ed1803a
MB
1621 return err;
1622}
1623
ac004b83
RD
1624#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1625#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1626
1627static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1628 struct mlx5_eswitch *peer_esw)
1629{
1630 int err;
1631
1632 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1633 if (err)
1634 return err;
1635
1636 return 0;
1637}
1638
1639static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1640{
04de7dda 1641 mlx5e_tc_clean_fdb_peer_flows(esw);
ac004b83
RD
1642 esw_del_fdb_peer_miss_rules(esw);
1643}
1644
8463daf1
MG
1645static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1646 struct mlx5_eswitch *peer_esw,
1647 bool pair)
1648{
1649 struct mlx5_flow_root_namespace *peer_ns;
1650 struct mlx5_flow_root_namespace *ns;
1651 int err;
1652
1653 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1654 ns = esw->dev->priv.steering->fdb_root_ns;
1655
1656 if (pair) {
1657 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1658 if (err)
1659 return err;
1660
e53e6655 1661 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
1662 if (err) {
1663 mlx5_flow_namespace_set_peer(ns, NULL);
1664 return err;
1665 }
1666 } else {
1667 mlx5_flow_namespace_set_peer(ns, NULL);
1668 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1669 }
1670
1671 return 0;
1672}
1673
ac004b83
RD
1674static int mlx5_esw_offloads_devcom_event(int event,
1675 void *my_data,
1676 void *event_data)
1677{
1678 struct mlx5_eswitch *esw = my_data;
ac004b83 1679 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 1680 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
1681 int err;
1682
1683 switch (event) {
1684 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
1685 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1686 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1687 break;
1688
8463daf1 1689 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
1690 if (err)
1691 goto err_out;
8463daf1
MG
1692 err = mlx5_esw_offloads_pair(esw, peer_esw);
1693 if (err)
1694 goto err_peer;
ac004b83
RD
1695
1696 err = mlx5_esw_offloads_pair(peer_esw, esw);
1697 if (err)
1698 goto err_pair;
1699
1700 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1701 break;
1702
1703 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1704 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1705 break;
1706
1707 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1708 mlx5_esw_offloads_unpair(peer_esw);
1709 mlx5_esw_offloads_unpair(esw);
8463daf1 1710 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1711 break;
1712 }
1713
1714 return 0;
1715
1716err_pair:
1717 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
1718err_peer:
1719 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1720err_out:
1721 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1722 event, err);
1723 return err;
1724}
1725
1726static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1727{
1728 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1729
04de7dda
RD
1730 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1731 mutex_init(&esw->offloads.peer_mutex);
1732
ac004b83
RD
1733 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1734 return;
1735
1736 mlx5_devcom_register_component(devcom,
1737 MLX5_DEVCOM_ESW_OFFLOADS,
1738 mlx5_esw_offloads_devcom_event,
1739 esw);
1740
1741 mlx5_devcom_send_event(devcom,
1742 MLX5_DEVCOM_ESW_OFFLOADS,
1743 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1744}
1745
1746static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1747{
1748 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1749
1750 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1751 return;
1752
1753 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1754 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1755
1756 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1757}
1758
18486737
EB
1759static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1760 struct mlx5_vport *vport)
1761{
18486737
EB
1762 struct mlx5_flow_act flow_act = {0};
1763 struct mlx5_flow_spec *spec;
1764 int err = 0;
1765
1766 /* For prio tag mode, there is only 1 FTEs:
7445cfb1
JL
1767 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1768 * required, allow
18486737
EB
1769 * Unmatched traffic is allowed by default
1770 */
18486737 1771 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
b7826076
PP
1772 if (!spec)
1773 return -ENOMEM;
18486737
EB
1774
1775 /* Untagged packets - push prio tag VLAN, allow */
1776 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1777 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1778 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1779 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1780 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1781 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1782 flow_act.vlan[0].vid = 0;
1783 flow_act.vlan[0].prio = 0;
7445cfb1 1784
d68316b5 1785 if (vport->ingress.offloads.modify_metadata_rule) {
7445cfb1 1786 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
d68316b5 1787 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
7445cfb1
JL
1788 }
1789
18486737
EB
1790 vport->ingress.allow_rule =
1791 mlx5_add_flow_rules(vport->ingress.acl, spec,
1792 &flow_act, NULL, 0);
1793 if (IS_ERR(vport->ingress.allow_rule)) {
1794 err = PTR_ERR(vport->ingress.allow_rule);
1795 esw_warn(esw->dev,
1796 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1797 vport->vport, err);
1798 vport->ingress.allow_rule = NULL;
18486737
EB
1799 }
1800
18486737 1801 kvfree(spec);
18486737
EB
1802 return err;
1803}
1804
7445cfb1
JL
1805static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1806 struct mlx5_vport *vport)
1807{
1808 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
9446d17e 1809 static const struct mlx5_flow_spec spec = {};
7445cfb1 1810 struct mlx5_flow_act flow_act = {};
7445cfb1
JL
1811 int err = 0;
1812
1813 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1814 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1815 MLX5_SET(set_action_in, action, data,
1816 mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
1817
d68316b5 1818 vport->ingress.offloads.modify_metadata =
2b688ea5
MG
1819 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1820 1, action);
d68316b5
PP
1821 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
1822 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
7445cfb1
JL
1823 esw_warn(esw->dev,
1824 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1825 vport->vport, err);
1826 return err;
1827 }
1828
1829 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
d68316b5
PP
1830 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1831 vport->ingress.offloads.modify_metadata_rule =
1832 mlx5_add_flow_rules(vport->ingress.acl,
1833 &spec, &flow_act, NULL, 0);
1834 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
1835 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
7445cfb1
JL
1836 esw_warn(esw->dev,
1837 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1838 vport->vport, err);
b7826076 1839 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
d68316b5 1840 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1 1841 }
7445cfb1
JL
1842 return err;
1843}
1844
a962d7a6
PP
1845static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1846 struct mlx5_vport *vport)
7445cfb1 1847{
d68316b5
PP
1848 if (vport->ingress.offloads.modify_metadata_rule) {
1849 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
1850 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
7445cfb1 1851
d68316b5 1852 vport->ingress.offloads.modify_metadata_rule = NULL;
7445cfb1
JL
1853 }
1854}
1855
10652f39
PP
1856static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
1857 struct mlx5_vport *vport)
18486737 1858{
10652f39
PP
1859 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1860 struct mlx5_flow_group *g;
b7826076 1861 void *match_criteria;
10652f39 1862 u32 *flow_group_in;
b7826076 1863 u32 flow_index = 0;
10652f39 1864 int ret = 0;
18486737 1865
10652f39
PP
1866 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1867 if (!flow_group_in)
1868 return -ENOMEM;
18486737 1869
b7826076
PP
1870 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
1871 /* This group is to hold FTE to match untagged packets when prio_tag
1872 * is enabled.
1873 */
1874 memset(flow_group_in, 0, inlen);
18486737 1875
b7826076
PP
1876 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1877 flow_group_in, match_criteria);
1878 MLX5_SET(create_flow_group_in, flow_group_in,
1879 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1880 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1881 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1882 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1883
1884 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1885 if (IS_ERR(g)) {
1886 ret = PTR_ERR(g);
1887 esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
1888 vport->vport, ret);
1889 goto prio_tag_err;
1890 }
1891 vport->ingress.offloads.metadata_prio_tag_grp = g;
1892 flow_index++;
1893 }
1894
1895 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1896 /* This group holds an FTE with no matches for add metadata for
1897 * tagged packets, if prio-tag is enabled (as a fallthrough),
1898 * or all traffic in case prio-tag is disabled.
1899 */
1900 memset(flow_group_in, 0, inlen);
1901 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1902 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1903
1904 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1905 if (IS_ERR(g)) {
1906 ret = PTR_ERR(g);
1907 esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
1908 vport->vport, ret);
1909 goto metadata_err;
1910 }
1911 vport->ingress.offloads.metadata_allmatch_grp = g;
1912 }
1913
1914 kvfree(flow_group_in);
1915 return 0;
1916
1917metadata_err:
1918 if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
1919 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
1920 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
18486737 1921 }
b7826076 1922prio_tag_err:
10652f39
PP
1923 kvfree(flow_group_in);
1924 return ret;
1925}
18486737 1926
10652f39
PP
1927static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
1928{
b7826076
PP
1929 if (vport->ingress.offloads.metadata_allmatch_grp) {
1930 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
1931 vport->ingress.offloads.metadata_allmatch_grp = NULL;
1932 }
1933
1934 if (vport->ingress.offloads.metadata_prio_tag_grp) {
1935 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
1936 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
10652f39 1937 }
18486737
EB
1938}
1939
b1a3380a
VP
1940static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1941 struct mlx5_vport *vport)
18486737 1942{
b7826076 1943 int num_ftes = 0;
18486737
EB
1944 int err;
1945
7445cfb1 1946 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
b7826076 1947 !esw_check_ingress_prio_tag_enabled(esw, vport))
7445cfb1
JL
1948 return 0;
1949
1950 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076
PP
1951
1952 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
1953 num_ftes++;
1954 if (esw_check_ingress_prio_tag_enabled(esw, vport))
1955 num_ftes++;
1956
1957 err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
7445cfb1
JL
1958 if (err) {
1959 esw_warn(esw->dev,
1960 "failed to enable ingress acl (%d) on vport[%d]\n",
1961 err, vport->vport);
1962 return err;
1963 }
1964
10652f39
PP
1965 err = esw_vport_create_ingress_acl_group(esw, vport);
1966 if (err)
1967 goto group_err;
1968
7445cfb1
JL
1969 esw_debug(esw->dev,
1970 "vport[%d] configure ingress rules\n", vport->vport);
1971
1972 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1973 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
1974 if (err)
10652f39 1975 goto metadata_err;
7445cfb1
JL
1976 }
1977
b7826076 1978 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
786ef904 1979 err = esw_vport_ingress_prio_tag_config(esw, vport);
18486737 1980 if (err)
10652f39 1981 goto prio_tag_err;
7445cfb1 1982 }
10652f39 1983 return 0;
7445cfb1 1984
10652f39
PP
1985prio_tag_err:
1986 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
1987metadata_err:
10652f39
PP
1988 esw_vport_destroy_ingress_acl_group(vport);
1989group_err:
1990 esw_vport_destroy_ingress_acl_table(vport);
7445cfb1
JL
1991 return err;
1992}
1993
6d94e610
VP
1994static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1995 struct mlx5_vport *vport)
1996{
1997 int err;
1998
1999 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
2000 return 0;
2001
2002 esw_vport_cleanup_egress_rules(esw, vport);
2003
2004 err = esw_vport_enable_egress_acl(esw, vport);
2005 if (err)
2006 return err;
2007
fdde49e0
PP
2008 /* For prio tag mode, there is only 1 FTEs:
2009 * 1) prio tag packets - pop the prio tag VLAN, allow
2010 * Unmatched traffic is allowed by default
2011 */
2012 esw_debug(esw->dev,
2013 "vport[%d] configure prio tag egress rules\n", vport->vport);
6d94e610 2014
fdde49e0
PP
2015 /* prio tag vlan rule - pop it so VF receives untagged packets */
2016 err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
2017 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
2018 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
7445cfb1 2019 if (err)
6d94e610
VP
2020 esw_vport_disable_egress_acl(esw, vport);
2021
7445cfb1
JL
2022 return err;
2023}
2024
92ab1eb3
JL
2025static bool
2026esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2027{
2028 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2029 return false;
2030
2031 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2032 MLX5_FDB_TO_VPORT_REG_C_0))
2033 return false;
2034
2035 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2036 return false;
2037
2038 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2039 mlx5_ecpf_vport_exists(esw->dev))
2040 return false;
2041
2042 return true;
2043}
2044
748da30b 2045int
89a0f1fb
PP
2046esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2047 struct mlx5_vport *vport)
7445cfb1 2048{
7445cfb1
JL
2049 int err;
2050
89a0f1fb
PP
2051 err = esw_vport_ingress_config(esw, vport);
2052 if (err)
2053 return err;
7445cfb1 2054
89a0f1fb
PP
2055 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
2056 err = esw_vport_egress_config(esw, vport);
a962d7a6 2057 if (err) {
10652f39 2058 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076
PP
2059 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2060 esw_vport_destroy_ingress_acl_group(vport);
10652f39 2061 esw_vport_destroy_ingress_acl_table(vport);
7445cfb1 2062 }
18486737 2063 }
89a0f1fb
PP
2064 return err;
2065}
18486737 2066
748da30b 2067void
89a0f1fb
PP
2068esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2069 struct mlx5_vport *vport)
2070{
2071 esw_vport_disable_egress_acl(esw, vport);
10652f39 2072 esw_vport_cleanup_ingress_rules(esw, vport);
b7826076 2073 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
10652f39
PP
2074 esw_vport_destroy_ingress_acl_group(vport);
2075 esw_vport_destroy_ingress_acl_table(vport);
89a0f1fb 2076}
7445cfb1 2077
748da30b 2078static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
2079{
2080 struct mlx5_vport *vport;
7445cfb1 2081 int err;
18486737 2082
92ab1eb3
JL
2083 if (esw_check_vport_match_metadata_supported(esw))
2084 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737 2085
748da30b
VP
2086 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2087 err = esw_vport_create_offloads_acl_tables(esw, vport);
2088 if (err)
2089 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
2090 return err;
2091}
2092
748da30b 2093static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 2094{
786ef904 2095 struct mlx5_vport *vport;
7445cfb1 2096
748da30b
VP
2097 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2098 esw_vport_destroy_offloads_acl_tables(esw, vport);
7445cfb1 2099 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
2100}
2101
062f4bf4 2102static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 2103{
062f4bf4
BW
2104 int num_vfs = esw->esw_funcs.num_vfs;
2105 int total_vports;
6ed1803a
MB
2106 int err;
2107
062f4bf4
BW
2108 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2109 total_vports = esw->total_vports;
2110 else
2111 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2112
5c1d260e 2113 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
e52c2802
PB
2114 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
2115
748da30b 2116 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1
JL
2117 if (err)
2118 return err;
18486737 2119
062f4bf4 2120 err = esw_create_offloads_fdb_tables(esw, total_vports);
c930a3ad 2121 if (err)
7445cfb1 2122 goto create_fdb_err;
c930a3ad 2123
062f4bf4 2124 err = esw_create_offloads_table(esw, total_vports);
c930a3ad
OG
2125 if (err)
2126 goto create_ft_err;
2127
062f4bf4 2128 err = esw_create_vport_rx_group(esw, total_vports);
c930a3ad
OG
2129 if (err)
2130 goto create_fg_err;
2131
2132 return 0;
2133
2134create_fg_err:
2135 esw_destroy_offloads_table(esw);
2136
2137create_ft_err:
1967ce6e 2138 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 2139
7445cfb1 2140create_fdb_err:
748da30b 2141 esw_destroy_uplink_offloads_acl_tables(esw);
7445cfb1 2142
c930a3ad
OG
2143 return err;
2144}
2145
eca8cc38
BW
2146static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2147{
2148 esw_destroy_vport_rx_group(esw);
2149 esw_destroy_offloads_table(esw);
2150 esw_destroy_offloads_fdb_tables(esw);
748da30b 2151 esw_destroy_uplink_offloads_acl_tables(esw);
eca8cc38
BW
2152}
2153
7e736f9a
PP
2154static void
2155esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 2156{
5ccf2770 2157 bool host_pf_disabled;
7e736f9a 2158 u16 new_num_vfs;
a3888f33 2159
7e736f9a
PP
2160 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2161 host_params_context.host_num_of_vfs);
5ccf2770
BW
2162 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2163 host_params_context.host_pf_disabled);
a3888f33 2164
7e736f9a
PP
2165 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2166 return;
a3888f33
BW
2167
2168 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929
VP
2169 if (esw->esw_funcs.num_vfs > 0) {
2170 esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
a3888f33 2171 } else {
7e736f9a 2172 int err;
a3888f33 2173
7e736f9a 2174 err = esw_offloads_load_vf_reps(esw, new_num_vfs);
a3888f33 2175 if (err)
7e736f9a 2176 return;
a3888f33 2177 }
7e736f9a 2178 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
2179}
2180
7e736f9a 2181static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 2182{
7e736f9a
PP
2183 struct mlx5_host_work *host_work;
2184 struct mlx5_eswitch *esw;
dd28087c 2185 const u32 *out;
ac35dcd6 2186
7e736f9a
PP
2187 host_work = container_of(work, struct mlx5_host_work, work);
2188 esw = host_work->esw;
a3888f33 2189
dd28087c
PP
2190 out = mlx5_esw_query_functions(esw->dev);
2191 if (IS_ERR(out))
7e736f9a 2192 goto out;
a3888f33 2193
7e736f9a 2194 esw_vfs_changed_event_handler(esw, out);
dd28087c 2195 kvfree(out);
a3888f33 2196out:
ac35dcd6
VP
2197 kfree(host_work);
2198}
2199
16fff98a 2200int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 2201{
cd56f929 2202 struct mlx5_esw_functions *esw_funcs;
a3888f33 2203 struct mlx5_host_work *host_work;
a3888f33
BW
2204 struct mlx5_eswitch *esw;
2205
2206 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2207 if (!host_work)
2208 return NOTIFY_DONE;
2209
cd56f929
VP
2210 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2211 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2212
2213 host_work->esw = esw;
2214
062f4bf4 2215 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2216 queue_work(esw->work_queue, &host_work->work);
2217
2218 return NOTIFY_OK;
2219}
2220
5896b972 2221int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38
BW
2222{
2223 int err;
2224
9a64144d
MG
2225 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2226 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2227 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2228 else
2229 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2230
8463daf1 2231 mlx5_rdma_enable_roce(esw->dev);
062f4bf4 2232 err = esw_offloads_steering_init(esw);
eca8cc38 2233 if (err)
8463daf1 2234 goto err_steering_init;
eca8cc38 2235
332bd3a5
PP
2236 err = esw_set_passing_vport_metadata(esw, true);
2237 if (err)
2238 goto err_vport_metadata;
c1286050 2239
925a6acc
PP
2240 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
2241 if (err)
2242 goto err_vports;
c1286050 2243
062f4bf4 2244 err = esw_offloads_load_all_reps(esw);
eca8cc38
BW
2245 if (err)
2246 goto err_reps;
2247
2248 esw_offloads_devcom_init(esw);
10caabda 2249 mutex_init(&esw->offloads.termtbl_mutex);
a3888f33 2250
eca8cc38
BW
2251 return 0;
2252
2253err_reps:
5896b972 2254 mlx5_eswitch_disable_pf_vf_vports(esw);
925a6acc 2255err_vports:
332bd3a5 2256 esw_set_passing_vport_metadata(esw, false);
c1286050 2257err_vport_metadata:
eca8cc38 2258 esw_offloads_steering_cleanup(esw);
8463daf1
MG
2259err_steering_init:
2260 mlx5_rdma_disable_roce(esw->dev);
eca8cc38
BW
2261 return err;
2262}
2263
db7ff19e
EB
2264static int esw_offloads_stop(struct mlx5_eswitch *esw,
2265 struct netlink_ext_ack *extack)
c930a3ad 2266{
062f4bf4 2267 int err, err1;
c930a3ad 2268
556b9d16 2269 mlx5_eswitch_disable(esw, false);
062f4bf4 2270 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
6c419ba8 2271 if (err) {
8c98ee77 2272 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
062f4bf4 2273 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
8c98ee77
EB
2274 if (err1) {
2275 NL_SET_ERR_MSG_MOD(extack,
2276 "Failed setting eswitch back to offloads");
2277 }
6c419ba8 2278 }
c930a3ad
OG
2279
2280 return err;
2281}
2282
5896b972 2283void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 2284{
ac004b83 2285 esw_offloads_devcom_cleanup(esw);
062f4bf4 2286 esw_offloads_unload_all_reps(esw);
5896b972 2287 mlx5_eswitch_disable_pf_vf_vports(esw);
332bd3a5 2288 esw_set_passing_vport_metadata(esw, false);
eca8cc38 2289 esw_offloads_steering_cleanup(esw);
8463daf1 2290 mlx5_rdma_disable_roce(esw->dev);
9a64144d 2291 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2292}
2293
ef78618b 2294static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2295{
2296 switch (mode) {
2297 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2298 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2299 break;
2300 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2301 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2302 break;
2303 default:
2304 return -EINVAL;
2305 }
2306
2307 return 0;
2308}
2309
ef78618b
OG
2310static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2311{
2312 switch (mlx5_mode) {
f6455de0 2313 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2314 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2315 break;
f6455de0 2316 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2317 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2318 break;
2319 default:
2320 return -EINVAL;
2321 }
2322
2323 return 0;
2324}
2325
bffaa916
RD
2326static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2327{
2328 switch (mode) {
2329 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2330 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2331 break;
2332 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2333 *mlx5_mode = MLX5_INLINE_MODE_L2;
2334 break;
2335 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2336 *mlx5_mode = MLX5_INLINE_MODE_IP;
2337 break;
2338 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2339 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2340 break;
2341 default:
2342 return -EINVAL;
2343 }
2344
2345 return 0;
2346}
2347
2348static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2349{
2350 switch (mlx5_mode) {
2351 case MLX5_INLINE_MODE_NONE:
2352 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2353 break;
2354 case MLX5_INLINE_MODE_L2:
2355 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2356 break;
2357 case MLX5_INLINE_MODE_IP:
2358 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2359 break;
2360 case MLX5_INLINE_MODE_TCP_UDP:
2361 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2362 break;
2363 default:
2364 return -EINVAL;
2365 }
2366
2367 return 0;
2368}
2369
9d1cef19 2370static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 2371{
9d1cef19 2372 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 2373
9d1cef19
OG
2374 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2375 return -EOPNOTSUPP;
c930a3ad 2376
733d3e54
OG
2377 if(!MLX5_ESWITCH_MANAGER(dev))
2378 return -EPERM;
c930a3ad 2379
f6455de0 2380 if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
c96692fb 2381 !mlx5_core_is_ecpf_esw_manager(dev))
c930a3ad
OG
2382 return -EOPNOTSUPP;
2383
9d1cef19
OG
2384 return 0;
2385}
2386
db7ff19e
EB
2387int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2388 struct netlink_ext_ack *extack)
9d1cef19
OG
2389{
2390 struct mlx5_core_dev *dev = devlink_priv(devlink);
2391 u16 cur_mlx5_mode, mlx5_mode = 0;
2392 int err;
2393
2394 err = mlx5_devlink_eswitch_check(devlink);
2395 if (err)
2396 return err;
2397
2398 cur_mlx5_mode = dev->priv.eswitch->mode;
2399
ef78618b 2400 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2401 return -EINVAL;
2402
2403 if (cur_mlx5_mode == mlx5_mode)
2404 return 0;
2405
2406 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 2407 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 2408 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 2409 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
2410 else
2411 return -EINVAL;
feae9087
OG
2412}
2413
2414int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2415{
9d1cef19
OG
2416 struct mlx5_core_dev *dev = devlink_priv(devlink);
2417 int err;
c930a3ad 2418
9d1cef19
OG
2419 err = mlx5_devlink_eswitch_check(devlink);
2420 if (err)
2421 return err;
c930a3ad 2422
ef78618b 2423 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 2424}
127ea380 2425
db7ff19e
EB
2426int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2427 struct netlink_ext_ack *extack)
bffaa916
RD
2428{
2429 struct mlx5_core_dev *dev = devlink_priv(devlink);
2430 struct mlx5_eswitch *esw = dev->priv.eswitch;
db68cc56 2431 int err, vport, num_vport;
bffaa916
RD
2432 u8 mlx5_mode;
2433
9d1cef19
OG
2434 err = mlx5_devlink_eswitch_check(devlink);
2435 if (err)
2436 return err;
bffaa916 2437
c415f704
OG
2438 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2439 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2440 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2441 return 0;
2442 /* fall through */
2443 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2444 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 2445 return -EOPNOTSUPP;
c415f704
OG
2446 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2447 break;
2448 }
bffaa916 2449
525e84be 2450 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2451 NL_SET_ERR_MSG_MOD(extack,
2452 "Can't set inline mode when flows are configured");
375f51e2
RD
2453 return -EOPNOTSUPP;
2454 }
2455
bffaa916
RD
2456 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2457 if (err)
2458 goto out;
2459
411ec9e0 2460 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2461 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2462 if (err) {
8c98ee77
EB
2463 NL_SET_ERR_MSG_MOD(extack,
2464 "Failed to set min inline on vport");
bffaa916
RD
2465 goto revert_inline_mode;
2466 }
2467 }
2468
2469 esw->offloads.inline_mode = mlx5_mode;
2470 return 0;
2471
2472revert_inline_mode:
db68cc56 2473 num_vport = --vport;
411ec9e0 2474 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
2475 mlx5_modify_nic_vport_min_inline(dev,
2476 vport,
2477 esw->offloads.inline_mode);
2478out:
2479 return err;
2480}
2481
2482int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2483{
2484 struct mlx5_core_dev *dev = devlink_priv(devlink);
2485 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2486 int err;
bffaa916 2487
9d1cef19
OG
2488 err = mlx5_devlink_eswitch_check(devlink);
2489 if (err)
2490 return err;
bffaa916 2491
bffaa916
RD
2492 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2493}
2494
062f4bf4 2495int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
bffaa916 2496{
c415f704 2497 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
2498 struct mlx5_core_dev *dev = esw->dev;
2499 int vport;
bffaa916
RD
2500
2501 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2502 return -EOPNOTSUPP;
2503
f6455de0 2504 if (esw->mode == MLX5_ESWITCH_NONE)
bffaa916
RD
2505 return -EOPNOTSUPP;
2506
c415f704
OG
2507 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2508 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2509 mlx5_mode = MLX5_INLINE_MODE_NONE;
2510 goto out;
2511 case MLX5_CAP_INLINE_MODE_L2:
2512 mlx5_mode = MLX5_INLINE_MODE_L2;
2513 goto out;
2514 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2515 goto query_vports;
2516 }
bffaa916 2517
c415f704 2518query_vports:
411ec9e0
BW
2519 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2520 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916 2521 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
411ec9e0 2522 if (prev_mlx5_mode != mlx5_mode)
bffaa916
RD
2523 return -EINVAL;
2524 prev_mlx5_mode = mlx5_mode;
2525 }
2526
c415f704 2527out:
bffaa916
RD
2528 *mode = mlx5_mode;
2529 return 0;
2530}
2531
98fdbea5
LR
2532int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2533 enum devlink_eswitch_encap_mode encap,
db7ff19e 2534 struct netlink_ext_ack *extack)
7768d197
RD
2535{
2536 struct mlx5_core_dev *dev = devlink_priv(devlink);
2537 struct mlx5_eswitch *esw = dev->priv.eswitch;
2538 int err;
2539
9d1cef19
OG
2540 err = mlx5_devlink_eswitch_check(devlink);
2541 if (err)
2542 return err;
7768d197
RD
2543
2544 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2545 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
2546 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2547 return -EOPNOTSUPP;
2548
2549 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2550 return -EOPNOTSUPP;
2551
f6455de0 2552 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197
RD
2553 esw->offloads.encap = encap;
2554 return 0;
2555 }
2556
2557 if (esw->offloads.encap == encap)
2558 return 0;
2559
525e84be 2560 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2561 NL_SET_ERR_MSG_MOD(extack,
2562 "Can't set encapsulation when flows are configured");
7768d197
RD
2563 return -EOPNOTSUPP;
2564 }
2565
e52c2802 2566 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2567
2568 esw->offloads.encap = encap;
e52c2802
PB
2569
2570 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2571
7768d197 2572 if (err) {
8c98ee77
EB
2573 NL_SET_ERR_MSG_MOD(extack,
2574 "Failed re-creating fast FDB table");
7768d197 2575 esw->offloads.encap = !encap;
e52c2802 2576 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2577 }
e52c2802 2578
7768d197
RD
2579 return err;
2580}
2581
98fdbea5
LR
2582int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2583 enum devlink_eswitch_encap_mode *encap)
7768d197
RD
2584{
2585 struct mlx5_core_dev *dev = devlink_priv(devlink);
2586 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2587 int err;
7768d197 2588
9d1cef19
OG
2589 err = mlx5_devlink_eswitch_check(devlink);
2590 if (err)
2591 return err;
7768d197
RD
2592
2593 *encap = esw->offloads.encap;
2594 return 0;
2595}
2596
f8e8fa02 2597void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 2598 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 2599 u8 rep_type)
127ea380 2600{
8693115a 2601 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
2602 struct mlx5_eswitch_rep *rep;
2603 int i;
9deb2241 2604
8693115a 2605 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 2606 mlx5_esw_for_all_reps(esw, i, rep) {
8693115a
PP
2607 rep_data = &rep->rep_data[rep_type];
2608 atomic_set(&rep_data->state, REP_REGISTERED);
f8e8fa02 2609 }
127ea380 2610}
f8e8fa02 2611EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2612
f8e8fa02 2613void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2614{
cb67b832 2615 struct mlx5_eswitch_rep *rep;
f8e8fa02 2616 int i;
cb67b832 2617
f6455de0 2618 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 2619 __unload_reps_all_vport(esw, rep_type);
127ea380 2620
f8e8fa02 2621 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 2622 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 2623}
f8e8fa02 2624EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2625
a4b97ab4 2626void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2627{
726293f1
HHZ
2628 struct mlx5_eswitch_rep *rep;
2629
879c8f84 2630 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 2631 return rep->rep_data[rep_type].priv;
726293f1 2632}
22215908
MB
2633
2634void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 2635 u16 vport,
22215908
MB
2636 u8 rep_type)
2637{
22215908
MB
2638 struct mlx5_eswitch_rep *rep;
2639
879c8f84 2640 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2641
8693115a
PP
2642 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2643 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2644 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
2645 return NULL;
2646}
57cbd893 2647EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2648
2649void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2650{
879c8f84 2651 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2652}
57cbd893
MB
2653EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2654
2655struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 2656 u16 vport)
57cbd893 2657{
879c8f84 2658 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2659}
2660EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
2661
2662bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2663{
2664 return vport_num >= MLX5_VPORT_FIRST_VF &&
2665 vport_num <= esw->dev->priv.sriov.max_vfs;
2666}
7445cfb1
JL
2667
2668bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2669{
2670 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2671}
2672EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2673
2674u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
2675 u16 vport_num)
2676{
2677 return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
2678}
2679EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);