]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
Merge tag 'asoc-v5.7' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch.c
CommitLineData
073bb189
SM
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
86d722ad 37#include <linux/mlx5/fs.h>
073bb189 38#include "mlx5_core.h"
6933a937 39#include "lib/eq.h"
073bb189 40#include "eswitch.h"
b8a0dbe3 41#include "fs_core.h"
a3888f33 42#include "ecpf.h"
073bb189 43
073bb189
SM
44enum {
45 MLX5_ACTION_NONE = 0,
46 MLX5_ACTION_ADD = 1,
47 MLX5_ACTION_DEL = 2,
48};
49
81848731
SM
50/* Vport UC/MC hash node */
51struct vport_addr {
52 struct l2addr_node node;
53 u8 action;
7e4c4330 54 u16 vport;
eeb66cdb
SM
55 struct mlx5_flow_handle *flow_rule;
56 bool mpfs; /* UC MAC was added to MPFs */
a35f71f2
MHY
57 /* A flag indicating that mac was added due to mc promiscuous vport */
58 bool mc_promisc;
073bb189
SM
59};
60
8da202b2
HN
61static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
62static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
63
5d9986a3
BW
64struct mlx5_vport *__must_check
65mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
879c8f84 66{
5d9986a3
BW
67 u16 idx;
68
69 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
70 return ERR_PTR(-EPERM);
71
72 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
73
74 if (idx > esw->total_vports - 1) {
75 esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
76 vport_num, idx);
77 return ERR_PTR(-EINVAL);
78 }
5ae51620 79
5ae51620 80 return &esw->vports[idx];
879c8f84
BW
81}
82
81848731 83static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
073bb189
SM
84 u32 events_mask)
85{
c4f287c4
SM
86 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
87 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
073bb189 88 void *nic_vport_ctx;
073bb189
SM
89
90 MLX5_SET(modify_nic_vport_context_in, in,
91 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
92 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
93 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
eca4a928 94 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
073bb189
SM
95 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
96 in, nic_vport_context);
97
98 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
99
5019833d 100 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
073bb189
SM
101 MLX5_SET(nic_vport_context, nic_vport_ctx,
102 event_on_uc_address_change, 1);
5019833d 103 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
073bb189
SM
104 MLX5_SET(nic_vport_context, nic_vport_ctx,
105 event_on_mc_address_change, 1);
5019833d 106 if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
a35f71f2
MHY
107 MLX5_SET(nic_vport_context, nic_vport_ctx,
108 event_on_promisc_change, 1);
073bb189 109
c4f287c4 110 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
073bb189
SM
111}
112
9e7ea352 113/* E-Switch vport context HW commands */
238302fa
PP
114int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
115 bool other_vport,
116 void *in, int inlen)
9e7ea352 117{
c4f287c4 118 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
9e7ea352 119
c4f287c4
SM
120 MLX5_SET(modify_esw_vport_context_in, in, opcode,
121 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
9e7ea352 122 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
238302fa 123 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
c4f287c4 124 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
9e7ea352
SM
125}
126
238302fa
PP
127int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
128 bool other_vport,
129 void *out, int outlen)
57843868
JL
130{
131 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
132
133 MLX5_SET(query_esw_vport_context_in, in, opcode,
134 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
135 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
238302fa 136 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
57843868
JL
137 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
138}
139
7e4c4330 140static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
e33dfe31 141 u16 vlan, u8 qos, u8 set_flags)
9e7ea352 142{
c4f287c4 143 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
9e7ea352
SM
144
145 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
146 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
9eb78923 147 return -EOPNOTSUPP;
9e7ea352 148
e33dfe31
OG
149 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
150 vport, vlan, qos, set_flags);
151
152 if (set_flags & SET_VLAN_STRIP)
9e7ea352
SM
153 MLX5_SET(modify_esw_vport_context_in, in,
154 esw_vport_context.vport_cvlan_strip, 1);
e33dfe31
OG
155
156 if (set_flags & SET_VLAN_INSERT) {
9e7ea352
SM
157 /* insert only if no vlan in packet */
158 MLX5_SET(modify_esw_vport_context_in, in,
159 esw_vport_context.vport_cvlan_insert, 1);
e33dfe31 160
9e7ea352
SM
161 MLX5_SET(modify_esw_vport_context_in, in,
162 esw_vport_context.cvlan_pcp, qos);
163 MLX5_SET(modify_esw_vport_context_in, in,
164 esw_vport_context.cvlan_id, vlan);
165 }
166
167 MLX5_SET(modify_esw_vport_context_in, in,
168 field_select.vport_cvlan_strip, 1);
169 MLX5_SET(modify_esw_vport_context_in, in,
170 field_select.vport_cvlan_insert, 1);
171
238302fa
PP
172 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true,
173 in, sizeof(in));
9e7ea352
SM
174}
175
81848731 176/* E-Switch FDB */
74491de9 177static struct mlx5_flow_handle *
7e4c4330 178__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
78a9199b 179 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
81848731 180{
78a9199b
MHY
181 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
182 MLX5_MATCH_OUTER_HEADERS);
74491de9 183 struct mlx5_flow_handle *flow_rule = NULL;
66958ed9 184 struct mlx5_flow_act flow_act = {0};
4c5009c5 185 struct mlx5_flow_destination dest = {};
c5bb1730 186 struct mlx5_flow_spec *spec;
a35f71f2
MHY
187 void *mv_misc = NULL;
188 void *mc_misc = NULL;
78a9199b
MHY
189 u8 *dmac_v = NULL;
190 u8 *dmac_c = NULL;
81848731 191
a35f71f2
MHY
192 if (rx_rule)
193 match_header |= MLX5_MATCH_MISC_PARAMETERS;
c5bb1730 194
1b9a07ee
LR
195 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
196 if (!spec)
c5bb1730 197 return NULL;
1b9a07ee 198
c5bb1730 199 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
81848731 200 outer_headers.dmac_47_16);
c5bb1730 201 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
81848731
SM
202 outer_headers.dmac_47_16);
203
a35f71f2 204 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
78a9199b
MHY
205 ether_addr_copy(dmac_v, mac_v);
206 ether_addr_copy(dmac_c, mac_c);
207 }
81848731 208
a35f71f2 209 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
c5bb1730
MG
210 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
211 misc_parameters);
212 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
213 misc_parameters);
b05af6aa 214 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
a35f71f2
MHY
215 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
216 }
217
81848731 218 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 219 dest.vport.num = vport;
81848731
SM
220
221 esw_debug(esw->dev,
222 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
223 dmac_v, dmac_c, vport);
c5bb1730 224 spec->match_criteria_enable = match_header;
66958ed9 225 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
81848731 226 flow_rule =
52fff327 227 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
66958ed9 228 &flow_act, &dest, 1);
3f42ac66 229 if (IS_ERR(flow_rule)) {
2974ab6e
SM
230 esw_warn(esw->dev,
231 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
81848731
SM
232 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
233 flow_rule = NULL;
234 }
c5bb1730
MG
235
236 kvfree(spec);
81848731
SM
237 return flow_rule;
238}
239
74491de9 240static struct mlx5_flow_handle *
7e4c4330 241esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
78a9199b
MHY
242{
243 u8 mac_c[ETH_ALEN];
244
245 eth_broadcast_addr(mac_c);
a35f71f2
MHY
246 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
247}
248
74491de9 249static struct mlx5_flow_handle *
7e4c4330 250esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
a35f71f2
MHY
251{
252 u8 mac_c[ETH_ALEN];
253 u8 mac_v[ETH_ALEN];
254
255 eth_zero_addr(mac_c);
256 eth_zero_addr(mac_v);
257 mac_c[0] = 0x01;
258 mac_v[0] = 0x01;
259 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
260}
261
74491de9 262static struct mlx5_flow_handle *
7e4c4330 263esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
a35f71f2
MHY
264{
265 u8 mac_c[ETH_ALEN];
266 u8 mac_v[ETH_ALEN];
267
268 eth_zero_addr(mac_c);
269 eth_zero_addr(mac_v);
270 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
78a9199b
MHY
271}
272
8da202b2
HN
273enum {
274 LEGACY_VEPA_PRIO = 0,
275 LEGACY_FDB_PRIO,
276};
277
278static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
279{
61dc7b01 280 struct mlx5_flow_table_attr ft_attr = {};
8da202b2
HN
281 struct mlx5_core_dev *dev = esw->dev;
282 struct mlx5_flow_namespace *root_ns;
283 struct mlx5_flow_table *fdb;
284 int err;
285
286 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
287 if (!root_ns) {
288 esw_warn(dev, "Failed to get FDB flow namespace\n");
289 return -EOPNOTSUPP;
290 }
291
292 /* num FTE 2, num FG 2 */
61dc7b01
PB
293 ft_attr.prio = LEGACY_VEPA_PRIO;
294 ft_attr.max_fte = 2;
295 ft_attr.autogroup.max_num_groups = 2;
296 fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
8da202b2
HN
297 if (IS_ERR(fdb)) {
298 err = PTR_ERR(fdb);
299 esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
300 return err;
301 }
302 esw->fdb_table.legacy.vepa_fdb = fdb;
303
304 return 0;
305}
306
8e3debc0 307static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
81848731 308{
86d722ad 309 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
b3ba5149 310 struct mlx5_flow_table_attr ft_attr = {};
81848731 311 struct mlx5_core_dev *dev = esw->dev;
86d722ad 312 struct mlx5_flow_namespace *root_ns;
81848731 313 struct mlx5_flow_table *fdb;
86d722ad
MG
314 struct mlx5_flow_group *g;
315 void *match_criteria;
316 int table_size;
317 u32 *flow_group_in;
81848731 318 u8 *dmac;
86d722ad 319 int err = 0;
81848731
SM
320
321 esw_debug(dev, "Create FDB log_max_size(%d)\n",
322 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
323
328edb49 324 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
86d722ad
MG
325 if (!root_ns) {
326 esw_warn(dev, "Failed to get FDB flow namespace\n");
eff596da 327 return -EOPNOTSUPP;
86d722ad 328 }
81848731 329
1b9a07ee 330 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
86d722ad
MG
331 if (!flow_group_in)
332 return -ENOMEM;
86d722ad
MG
333
334 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
b3ba5149 335 ft_attr.max_fte = table_size;
8da202b2 336 ft_attr.prio = LEGACY_FDB_PRIO;
b3ba5149 337 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
3f42ac66 338 if (IS_ERR(fdb)) {
86d722ad
MG
339 err = PTR_ERR(fdb);
340 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
341 goto out;
342 }
52fff327 343 esw->fdb_table.legacy.fdb = fdb;
81848731 344
78a9199b 345 /* Addresses group : Full match unicast/multicast addresses */
86d722ad
MG
346 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
347 MLX5_MATCH_OUTER_HEADERS);
348 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
349 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
350 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
78a9199b
MHY
351 /* Preserve 2 entries for allmulti and promisc rules*/
352 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
86d722ad 353 eth_broadcast_addr(dmac);
86d722ad 354 g = mlx5_create_flow_group(fdb, flow_group_in);
3f42ac66 355 if (IS_ERR(g)) {
86d722ad
MG
356 err = PTR_ERR(g);
357 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
358 goto out;
359 }
6ab36e35 360 esw->fdb_table.legacy.addr_grp = g;
78a9199b
MHY
361
362 /* Allmulti group : One rule that forwards any mcast traffic */
363 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
364 MLX5_MATCH_OUTER_HEADERS);
365 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
366 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
367 eth_zero_addr(dmac);
368 dmac[0] = 0x01;
369 g = mlx5_create_flow_group(fdb, flow_group_in);
3f42ac66 370 if (IS_ERR(g)) {
78a9199b
MHY
371 err = PTR_ERR(g);
372 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
373 goto out;
374 }
6ab36e35 375 esw->fdb_table.legacy.allmulti_grp = g;
78a9199b
MHY
376
377 /* Promiscuous group :
378 * One rule that forward all unmatched traffic from previous groups
379 */
380 eth_zero_addr(dmac);
381 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
382 MLX5_MATCH_MISC_PARAMETERS);
383 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
384 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
385 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
386 g = mlx5_create_flow_group(fdb, flow_group_in);
3f42ac66 387 if (IS_ERR(g)) {
78a9199b
MHY
388 err = PTR_ERR(g);
389 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
390 goto out;
391 }
6ab36e35 392 esw->fdb_table.legacy.promisc_grp = g;
78a9199b 393
86d722ad 394out:
8da202b2
HN
395 if (err)
396 esw_destroy_legacy_fdb_table(esw);
78a9199b 397
3fe3d819 398 kvfree(flow_group_in);
86d722ad 399 return err;
81848731
SM
400}
401
8da202b2
HN
402static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
403{
404 esw_debug(esw->dev, "Destroy VEPA Table\n");
405 if (!esw->fdb_table.legacy.vepa_fdb)
406 return;
407
408 mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
409 esw->fdb_table.legacy.vepa_fdb = NULL;
410}
411
6ab36e35 412static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
81848731 413{
8da202b2 414 esw_debug(esw->dev, "Destroy FDB Table\n");
52fff327 415 if (!esw->fdb_table.legacy.fdb)
81848731
SM
416 return;
417
8da202b2
HN
418 if (esw->fdb_table.legacy.promisc_grp)
419 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
420 if (esw->fdb_table.legacy.allmulti_grp)
421 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
422 if (esw->fdb_table.legacy.addr_grp)
423 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
52fff327 424 mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
8da202b2 425
52fff327 426 esw->fdb_table.legacy.fdb = NULL;
6ab36e35
OG
427 esw->fdb_table.legacy.addr_grp = NULL;
428 esw->fdb_table.legacy.allmulti_grp = NULL;
429 esw->fdb_table.legacy.promisc_grp = NULL;
81848731
SM
430}
431
8da202b2
HN
432static int esw_create_legacy_table(struct mlx5_eswitch *esw)
433{
434 int err;
435
8a91ad93
RD
436 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
437
8da202b2
HN
438 err = esw_create_legacy_vepa_table(esw);
439 if (err)
440 return err;
441
442 err = esw_create_legacy_fdb_table(esw);
443 if (err)
444 esw_destroy_legacy_vepa_table(esw);
445
446 return err;
447}
448
925a6acc
PP
449static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
450{
451 esw_cleanup_vepa_rules(esw);
452 esw_destroy_legacy_fdb_table(esw);
453 esw_destroy_legacy_vepa_table(esw);
454}
455
5896b972
PP
456#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
457 MLX5_VPORT_MC_ADDR_CHANGE | \
458 MLX5_VPORT_PROMISC_CHANGE)
459
460static int esw_legacy_enable(struct mlx5_eswitch *esw)
461{
383de108
DL
462 struct mlx5_vport *vport;
463 int ret, i;
5896b972
PP
464
465 ret = esw_create_legacy_table(esw);
466 if (ret)
467 return ret;
468
383de108
DL
469 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
470 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
471
925a6acc
PP
472 ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
473 if (ret)
474 esw_destroy_legacy_table(esw);
475 return ret;
8da202b2
HN
476}
477
5896b972
PP
478static void esw_legacy_disable(struct mlx5_eswitch *esw)
479{
480 struct esw_mc_addr *mc_promisc;
481
482 mlx5_eswitch_disable_pf_vf_vports(esw);
483
484 mc_promisc = &esw->mc_promisc;
485 if (mc_promisc->uplink_rule)
486 mlx5_del_flow_rules(mc_promisc->uplink_rule);
487
488 esw_destroy_legacy_table(esw);
489}
490
81848731
SM
491/* E-Switch vport UC/MC lists management */
492typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
493 struct vport_addr *vaddr);
494
495static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
496{
81848731 497 u8 *mac = vaddr->node.addr;
7e4c4330 498 u16 vport = vaddr->vport;
81848731
SM
499 int err;
500
a1b3839a
BW
501 /* Skip mlx5_mpfs_add_mac for eswitch_managers,
502 * it is already done by its netdev in mlx5e_execute_l2_action
eeb66cdb 503 */
ea2300e0 504 if (mlx5_esw_is_manager_vport(esw, vport))
eeb66cdb
SM
505 goto fdb_add;
506
507 err = mlx5_mpfs_add_mac(esw->dev, mac);
508 if (err) {
073bb189 509 esw_warn(esw->dev,
a1b3839a 510 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
eeb66cdb
SM
511 mac, vport, err);
512 return err;
073bb189 513 }
eeb66cdb 514 vaddr->mpfs = true;
073bb189 515
eeb66cdb 516fdb_add:
69697b6e 517 /* SRIOV is enabled: Forward UC MAC to vport */
f6455de0 518 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
81848731
SM
519 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
520
eeb66cdb
SM
521 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
522 vport, mac, vaddr->flow_rule);
523
1547f538 524 return 0;
073bb189
SM
525}
526
81848731 527static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
073bb189 528{
81848731 529 u8 *mac = vaddr->node.addr;
7e4c4330 530 u16 vport = vaddr->vport;
eeb66cdb 531 int err = 0;
81848731 532
e019cb53 533 /* Skip mlx5_mpfs_del_mac for eswitch managers,
a1b3839a 534 * it is already done by its netdev in mlx5e_execute_l2_action
eeb66cdb 535 */
ea2300e0 536 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
eeb66cdb 537 goto fdb_del;
81848731 538
eeb66cdb
SM
539 err = mlx5_mpfs_del_mac(esw->dev, mac);
540 if (err)
541 esw_warn(esw->dev,
542 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
543 mac, vport, err);
544 vaddr->mpfs = false;
81848731 545
eeb66cdb 546fdb_del:
81848731 547 if (vaddr->flow_rule)
74491de9 548 mlx5_del_flow_rules(vaddr->flow_rule);
81848731
SM
549 vaddr->flow_rule = NULL;
550
81848731
SM
551 return 0;
552}
553
a35f71f2
MHY
554static void update_allmulti_vports(struct mlx5_eswitch *esw,
555 struct vport_addr *vaddr,
556 struct esw_mc_addr *esw_mc)
557{
558 u8 *mac = vaddr->node.addr;
879c8f84
BW
559 struct mlx5_vport *vport;
560 u16 i, vport_num;
a35f71f2 561
879c8f84 562 mlx5_esw_for_all_vports(esw, i, vport) {
a35f71f2
MHY
563 struct hlist_head *vport_hash = vport->mc_list;
564 struct vport_addr *iter_vaddr =
565 l2addr_hash_find(vport_hash,
566 mac,
567 struct vport_addr);
879c8f84 568 vport_num = vport->vport;
a35f71f2 569 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
879c8f84 570 vaddr->vport == vport_num)
a35f71f2
MHY
571 continue;
572 switch (vaddr->action) {
573 case MLX5_ACTION_ADD:
574 if (iter_vaddr)
575 continue;
576 iter_vaddr = l2addr_hash_add(vport_hash, mac,
577 struct vport_addr,
578 GFP_KERNEL);
579 if (!iter_vaddr) {
580 esw_warn(esw->dev,
581 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
879c8f84 582 mac, vport_num);
a35f71f2
MHY
583 continue;
584 }
879c8f84 585 iter_vaddr->vport = vport_num;
a35f71f2
MHY
586 iter_vaddr->flow_rule =
587 esw_fdb_set_vport_rule(esw,
588 mac,
879c8f84 589 vport_num);
62e3c24a 590 iter_vaddr->mc_promisc = true;
a35f71f2
MHY
591 break;
592 case MLX5_ACTION_DEL:
593 if (!iter_vaddr)
594 continue;
74491de9 595 mlx5_del_flow_rules(iter_vaddr->flow_rule);
a35f71f2
MHY
596 l2addr_hash_del(iter_vaddr);
597 break;
598 }
599 }
600}
601
81848731
SM
602static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
603{
604 struct hlist_head *hash = esw->mc_table;
605 struct esw_mc_addr *esw_mc;
606 u8 *mac = vaddr->node.addr;
7e4c4330 607 u16 vport = vaddr->vport;
81848731 608
52fff327 609 if (!esw->fdb_table.legacy.fdb)
81848731
SM
610 return 0;
611
612 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
613 if (esw_mc)
614 goto add;
615
616 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
617 if (!esw_mc)
618 return -ENOMEM;
619
620 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
b05af6aa 621 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
a35f71f2
MHY
622
623 /* Add this multicast mac to all the mc promiscuous vports */
624 update_allmulti_vports(esw, vaddr, esw_mc);
625
81848731 626add:
a35f71f2
MHY
627 /* If the multicast mac is added as a result of mc promiscuous vport,
628 * don't increment the multicast ref count
629 */
630 if (!vaddr->mc_promisc)
631 esw_mc->refcnt++;
632
81848731
SM
633 /* Forward MC MAC to vport */
634 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
635 esw_debug(esw->dev,
636 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
637 vport, mac, vaddr->flow_rule,
638 esw_mc->refcnt, esw_mc->uplink_rule);
639 return 0;
640}
641
642static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
643{
644 struct hlist_head *hash = esw->mc_table;
645 struct esw_mc_addr *esw_mc;
646 u8 *mac = vaddr->node.addr;
7e4c4330 647 u16 vport = vaddr->vport;
073bb189 648
52fff327 649 if (!esw->fdb_table.legacy.fdb)
81848731
SM
650 return 0;
651
652 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
653 if (!esw_mc) {
654 esw_warn(esw->dev,
655 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
073bb189
SM
656 mac, vport);
657 return -EINVAL;
658 }
81848731
SM
659 esw_debug(esw->dev,
660 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
661 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
662 esw_mc->uplink_rule);
663
664 if (vaddr->flow_rule)
74491de9 665 mlx5_del_flow_rules(vaddr->flow_rule);
81848731
SM
666 vaddr->flow_rule = NULL;
667
a35f71f2
MHY
668 /* If the multicast mac is added as a result of mc promiscuous vport,
669 * don't decrement the multicast ref count.
670 */
671 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
81848731 672 return 0;
073bb189 673
a35f71f2
MHY
674 /* Remove this multicast mac from all the mc promiscuous vports */
675 update_allmulti_vports(esw, vaddr, esw_mc);
676
81848731 677 if (esw_mc->uplink_rule)
74491de9 678 mlx5_del_flow_rules(esw_mc->uplink_rule);
81848731
SM
679
680 l2addr_hash_del(esw_mc);
073bb189
SM
681 return 0;
682}
683
81848731
SM
684/* Apply vport UC/MC list to HW l2 table and FDB table */
685static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
ee813f31 686 struct mlx5_vport *vport, int list_type)
073bb189 687{
81848731
SM
688 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
689 vport_addr_action vport_addr_add;
690 vport_addr_action vport_addr_del;
691 struct vport_addr *addr;
073bb189
SM
692 struct l2addr_node *node;
693 struct hlist_head *hash;
694 struct hlist_node *tmp;
695 int hi;
696
81848731
SM
697 vport_addr_add = is_uc ? esw_add_uc_addr :
698 esw_add_mc_addr;
699 vport_addr_del = is_uc ? esw_del_uc_addr :
700 esw_del_mc_addr;
701
702 hash = is_uc ? vport->uc_list : vport->mc_list;
073bb189 703 for_each_l2hash_node(node, tmp, hash, hi) {
81848731 704 addr = container_of(node, struct vport_addr, node);
073bb189
SM
705 switch (addr->action) {
706 case MLX5_ACTION_ADD:
81848731 707 vport_addr_add(esw, addr);
073bb189
SM
708 addr->action = MLX5_ACTION_NONE;
709 break;
710 case MLX5_ACTION_DEL:
81848731 711 vport_addr_del(esw, addr);
073bb189
SM
712 l2addr_hash_del(addr);
713 break;
714 }
715 }
716}
717
81848731
SM
718/* Sync vport UC/MC list from vport context */
719static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
ee813f31 720 struct mlx5_vport *vport, int list_type)
073bb189 721{
81848731 722 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
073bb189 723 u8 (*mac_list)[ETH_ALEN];
81848731
SM
724 struct l2addr_node *node;
725 struct vport_addr *addr;
073bb189
SM
726 struct hlist_head *hash;
727 struct hlist_node *tmp;
728 int size;
729 int err;
730 int hi;
731 int i;
732
81848731
SM
733 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
734 MLX5_MAX_MC_PER_VPORT(esw->dev);
073bb189
SM
735
736 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
737 if (!mac_list)
738 return;
739
81848731 740 hash = is_uc ? vport->uc_list : vport->mc_list;
073bb189
SM
741
742 for_each_l2hash_node(node, tmp, hash, hi) {
81848731 743 addr = container_of(node, struct vport_addr, node);
073bb189
SM
744 addr->action = MLX5_ACTION_DEL;
745 }
746
586cfa7f
MHY
747 if (!vport->enabled)
748 goto out;
749
ee813f31 750 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
073bb189
SM
751 mac_list, &size);
752 if (err)
761e205b 753 goto out;
81848731 754 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
ee813f31 755 vport->vport, is_uc ? "UC" : "MC", size);
073bb189
SM
756
757 for (i = 0; i < size; i++) {
81848731 758 if (is_uc && !is_valid_ether_addr(mac_list[i]))
073bb189
SM
759 continue;
760
81848731
SM
761 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
762 continue;
763
764 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
073bb189
SM
765 if (addr) {
766 addr->action = MLX5_ACTION_NONE;
a35f71f2
MHY
767 /* If this mac was previously added because of allmulti
768 * promiscuous rx mode, its now converted to be original
769 * vport mac.
770 */
771 if (addr->mc_promisc) {
772 struct esw_mc_addr *esw_mc =
773 l2addr_hash_find(esw->mc_table,
774 mac_list[i],
775 struct esw_mc_addr);
776 if (!esw_mc) {
777 esw_warn(esw->dev,
778 "Failed to MAC(%pM) in mcast DB\n",
779 mac_list[i]);
780 continue;
781 }
782 esw_mc->refcnt++;
783 addr->mc_promisc = false;
784 }
073bb189
SM
785 continue;
786 }
787
81848731 788 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
073bb189
SM
789 GFP_KERNEL);
790 if (!addr) {
791 esw_warn(esw->dev,
792 "Failed to add MAC(%pM) to vport[%d] DB\n",
ee813f31 793 mac_list[i], vport->vport);
073bb189
SM
794 continue;
795 }
ee813f31 796 addr->vport = vport->vport;
073bb189
SM
797 addr->action = MLX5_ACTION_ADD;
798 }
761e205b 799out:
073bb189
SM
800 kfree(mac_list);
801}
802
a35f71f2
MHY
803/* Sync vport UC/MC list from vport context
804 * Must be called after esw_update_vport_addr_list
805 */
ee813f31
PP
806static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
807 struct mlx5_vport *vport)
a35f71f2 808{
a35f71f2
MHY
809 struct l2addr_node *node;
810 struct vport_addr *addr;
811 struct hlist_head *hash;
812 struct hlist_node *tmp;
813 int hi;
814
815 hash = vport->mc_list;
816
817 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
818 u8 *mac = node->addr;
819
820 addr = l2addr_hash_find(hash, mac, struct vport_addr);
821 if (addr) {
822 if (addr->action == MLX5_ACTION_DEL)
823 addr->action = MLX5_ACTION_NONE;
824 continue;
825 }
826 addr = l2addr_hash_add(hash, mac, struct vport_addr,
827 GFP_KERNEL);
828 if (!addr) {
829 esw_warn(esw->dev,
830 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
ee813f31 831 mac, vport->vport);
a35f71f2
MHY
832 continue;
833 }
ee813f31 834 addr->vport = vport->vport;
a35f71f2
MHY
835 addr->action = MLX5_ACTION_ADD;
836 addr->mc_promisc = true;
837 }
838}
839
840/* Apply vport rx mode to HW FDB table */
ee813f31
PP
841static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
842 struct mlx5_vport *vport,
a35f71f2
MHY
843 bool promisc, bool mc_promisc)
844{
0a0ab1d2 845 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
a35f71f2
MHY
846
847 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
848 goto promisc;
849
850 if (mc_promisc) {
851 vport->allmulti_rule =
ee813f31 852 esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
a35f71f2
MHY
853 if (!allmulti_addr->uplink_rule)
854 allmulti_addr->uplink_rule =
855 esw_fdb_set_vport_allmulti_rule(esw,
b05af6aa 856 MLX5_VPORT_UPLINK);
a35f71f2
MHY
857 allmulti_addr->refcnt++;
858 } else if (vport->allmulti_rule) {
74491de9 859 mlx5_del_flow_rules(vport->allmulti_rule);
a35f71f2
MHY
860 vport->allmulti_rule = NULL;
861
862 if (--allmulti_addr->refcnt > 0)
863 goto promisc;
864
865 if (allmulti_addr->uplink_rule)
74491de9 866 mlx5_del_flow_rules(allmulti_addr->uplink_rule);
a35f71f2
MHY
867 allmulti_addr->uplink_rule = NULL;
868 }
869
870promisc:
871 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
872 return;
873
874 if (promisc) {
ee813f31
PP
875 vport->promisc_rule =
876 esw_fdb_set_vport_promisc_rule(esw, vport->vport);
a35f71f2 877 } else if (vport->promisc_rule) {
74491de9 878 mlx5_del_flow_rules(vport->promisc_rule);
a35f71f2
MHY
879 vport->promisc_rule = NULL;
880 }
881}
882
883/* Sync vport rx mode from vport context */
ee813f31
PP
884static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
885 struct mlx5_vport *vport)
a35f71f2 886{
a35f71f2
MHY
887 int promisc_all = 0;
888 int promisc_uc = 0;
889 int promisc_mc = 0;
890 int err;
891
892 err = mlx5_query_nic_vport_promisc(esw->dev,
ee813f31 893 vport->vport,
a35f71f2
MHY
894 &promisc_uc,
895 &promisc_mc,
896 &promisc_all);
897 if (err)
898 return;
899 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
ee813f31 900 vport->vport, promisc_all, promisc_mc);
a35f71f2 901
1ab2068a 902 if (!vport->info.trusted || !vport->enabled) {
a35f71f2
MHY
903 promisc_uc = 0;
904 promisc_mc = 0;
905 promisc_all = 0;
906 }
907
ee813f31 908 esw_apply_vport_rx_mode(esw, vport, promisc_all,
a35f71f2
MHY
909 (promisc_all || promisc_mc));
910}
911
1edc57e2 912static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
073bb189 913{
073bb189 914 struct mlx5_core_dev *dev = vport->dev;
81848731 915 struct mlx5_eswitch *esw = dev->priv.eswitch;
073bb189
SM
916 u8 mac[ETH_ALEN];
917
e1d974d0 918 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
81848731
SM
919 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
920 vport->vport, mac);
921
5019833d 922 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
ee813f31
PP
923 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
924 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
81848731 925 }
073bb189 926
5019833d 927 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
ee813f31 928 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
a35f71f2 929
5019833d 930 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
ee813f31 931 esw_update_vport_rx_mode(esw, vport);
a35f71f2 932 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
ee813f31 933 esw_update_vport_mc_promisc(esw, vport);
a35f71f2
MHY
934 }
935
5019833d 936 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
ee813f31 937 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
073bb189 938
81848731 939 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
073bb189
SM
940 if (vport->enabled)
941 arm_vport_context_events_cmd(dev, vport->vport,
81848731 942 vport->enabled_events);
073bb189
SM
943}
944
1edc57e2
MHY
945static void esw_vport_change_handler(struct work_struct *work)
946{
947 struct mlx5_vport *vport =
948 container_of(work, struct mlx5_vport, vport_change_handler);
949 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
950
951 mutex_lock(&esw->state_lock);
952 esw_vport_change_handle_locked(vport);
953 mutex_unlock(&esw->state_lock);
954}
955
18486737
EB
956int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
957 struct mlx5_vport *vport)
5742df0f
MHY
958{
959 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
960 struct mlx5_flow_group *vlan_grp = NULL;
961 struct mlx5_flow_group *drop_grp = NULL;
962 struct mlx5_core_dev *dev = esw->dev;
963 struct mlx5_flow_namespace *root_ns;
964 struct mlx5_flow_table *acl;
965 void *match_criteria;
966 u32 *flow_group_in;
967 /* The egress acl table contains 2 rules:
968 * 1)Allow traffic with vlan_tag=vst_vlan_id
969 * 2)Drop all other traffic.
970 */
971 int table_size = 2;
972 int err = 0;
973
247f139c
MHY
974 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
975 return -EOPNOTSUPP;
976
977 if (!IS_ERR_OR_NULL(vport->egress.acl))
978 return 0;
5742df0f
MHY
979
980 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
981 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
982
9b93ab98 983 root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
f53297d6 984 mlx5_eswitch_vport_num_to_index(esw, vport->vport));
5742df0f 985 if (!root_ns) {
9b93ab98 986 esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
eff596da 987 return -EOPNOTSUPP;
5742df0f
MHY
988 }
989
1b9a07ee 990 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
5742df0f 991 if (!flow_group_in)
247f139c 992 return -ENOMEM;
5742df0f
MHY
993
994 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
3f42ac66 995 if (IS_ERR(acl)) {
5742df0f
MHY
996 err = PTR_ERR(acl);
997 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
998 vport->vport, err);
999 goto out;
1000 }
1001
1002 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1003 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
10543365 1004 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
5742df0f
MHY
1005 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
1006 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1007 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1008
1009 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
3f42ac66 1010 if (IS_ERR(vlan_grp)) {
5742df0f
MHY
1011 err = PTR_ERR(vlan_grp);
1012 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
1013 vport->vport, err);
1014 goto out;
1015 }
1016
1017 memset(flow_group_in, 0, inlen);
1018 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1019 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1020 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
3f42ac66 1021 if (IS_ERR(drop_grp)) {
5742df0f
MHY
1022 err = PTR_ERR(drop_grp);
1023 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
1024 vport->vport, err);
1025 goto out;
1026 }
1027
1028 vport->egress.acl = acl;
1029 vport->egress.drop_grp = drop_grp;
1030 vport->egress.allowed_vlans_grp = vlan_grp;
1031out:
3fe3d819 1032 kvfree(flow_group_in);
5742df0f
MHY
1033 if (err && !IS_ERR_OR_NULL(vlan_grp))
1034 mlx5_destroy_flow_group(vlan_grp);
1035 if (err && !IS_ERR_OR_NULL(acl))
1036 mlx5_destroy_flow_table(acl);
247f139c 1037 return err;
5742df0f
MHY
1038}
1039
18486737
EB
1040void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
1041 struct mlx5_vport *vport)
dfcb1ed3 1042{
853b5352 1043 if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
74491de9 1044 mlx5_del_flow_rules(vport->egress.allowed_vlan);
853b5352
PP
1045 vport->egress.allowed_vlan = NULL;
1046 }
dfcb1ed3 1047
853b5352
PP
1048 if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) {
1049 mlx5_del_flow_rules(vport->egress.legacy.drop_rule);
1050 vport->egress.legacy.drop_rule = NULL;
1051 }
dfcb1ed3
MHY
1052}
1053
18486737
EB
1054void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1055 struct mlx5_vport *vport)
5742df0f
MHY
1056{
1057 if (IS_ERR_OR_NULL(vport->egress.acl))
1058 return;
1059
1060 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
1061
dfcb1ed3 1062 esw_vport_cleanup_egress_rules(esw, vport);
5742df0f
MHY
1063 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
1064 mlx5_destroy_flow_group(vport->egress.drop_grp);
1065 mlx5_destroy_flow_table(vport->egress.acl);
1066 vport->egress.allowed_vlans_grp = NULL;
1067 vport->egress.drop_grp = NULL;
1068 vport->egress.acl = NULL;
1069}
1070
10652f39
PP
1071static int
1072esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw,
1073 struct mlx5_vport *vport)
5742df0f
MHY
1074{
1075 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1076 struct mlx5_core_dev *dev = esw->dev;
5742df0f
MHY
1077 struct mlx5_flow_group *g;
1078 void *match_criteria;
1079 u32 *flow_group_in;
10652f39 1080 int err;
5742df0f 1081
1b9a07ee 1082 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
5742df0f 1083 if (!flow_group_in)
247f139c 1084 return -ENOMEM;
5742df0f 1085
5742df0f
MHY
1086 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1087
1088 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
10543365 1089 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
5742df0f
MHY
1090 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1091 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1092 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1093 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1094
10652f39 1095 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
3f42ac66 1096 if (IS_ERR(g)) {
5742df0f 1097 err = PTR_ERR(g);
10652f39 1098 esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
5742df0f 1099 vport->vport, err);
10652f39 1100 goto spoof_err;
5742df0f 1101 }
10652f39 1102 vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
5742df0f
MHY
1103
1104 memset(flow_group_in, 0, inlen);
1105 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
10543365 1106 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
5742df0f
MHY
1107 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1108 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1109
10652f39 1110 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
3f42ac66 1111 if (IS_ERR(g)) {
5742df0f 1112 err = PTR_ERR(g);
10652f39 1113 esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
5742df0f 1114 vport->vport, err);
10652f39 1115 goto untagged_err;
5742df0f 1116 }
10652f39 1117 vport->ingress.legacy.allow_untagged_only_grp = g;
5742df0f
MHY
1118
1119 memset(flow_group_in, 0, inlen);
1120 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1121 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1122 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1123 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
1124 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
1125
10652f39 1126 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
3f42ac66 1127 if (IS_ERR(g)) {
5742df0f 1128 err = PTR_ERR(g);
10652f39 1129 esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
5742df0f 1130 vport->vport, err);
10652f39 1131 goto allow_spoof_err;
5742df0f 1132 }
10652f39 1133 vport->ingress.legacy.allow_spoofchk_only_grp = g;
5742df0f
MHY
1134
1135 memset(flow_group_in, 0, inlen);
1136 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
1137 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
1138
10652f39 1139 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
3f42ac66 1140 if (IS_ERR(g)) {
5742df0f 1141 err = PTR_ERR(g);
10652f39 1142 esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
5742df0f 1143 vport->vport, err);
10652f39 1144 goto drop_err;
5742df0f 1145 }
10652f39
PP
1146 vport->ingress.legacy.drop_grp = g;
1147 kvfree(flow_group_in);
1148 return 0;
5742df0f 1149
10652f39
PP
1150drop_err:
1151 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
1152 mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
1153 vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
5742df0f 1154 }
10652f39
PP
1155allow_spoof_err:
1156 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
1157 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
1158 vport->ingress.legacy.allow_untagged_only_grp = NULL;
1159 }
1160untagged_err:
1161 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
1162 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
1163 vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
1164 }
1165spoof_err:
3fe3d819 1166 kvfree(flow_group_in);
247f139c 1167 return err;
5742df0f
MHY
1168}
1169
10652f39
PP
1170int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
1171 struct mlx5_vport *vport, int table_size)
1172{
1173 struct mlx5_core_dev *dev = esw->dev;
1174 struct mlx5_flow_namespace *root_ns;
1175 struct mlx5_flow_table *acl;
1176 int vport_index;
1177 int err;
1178
1179 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1180 return -EOPNOTSUPP;
1181
1182 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
1183 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
1184
1185 vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport);
1186 root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1187 vport_index);
1188 if (!root_ns) {
1189 esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n",
1190 vport->vport);
1191 return -EOPNOTSUPP;
1192 }
1193
1194 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1195 if (IS_ERR(acl)) {
1196 err = PTR_ERR(acl);
1197 esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n",
1198 vport->vport, err);
1199 return err;
1200 }
1201 vport->ingress.acl = acl;
1202 return 0;
1203}
1204
1205void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport)
1206{
1207 if (!vport->ingress.acl)
1208 return;
1209
1210 mlx5_destroy_flow_table(vport->ingress.acl);
1211 vport->ingress.acl = NULL;
1212}
1213
18486737
EB
1214void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1215 struct mlx5_vport *vport)
dfcb1ed3 1216{
10652f39 1217 if (vport->ingress.legacy.drop_rule) {
853b5352
PP
1218 mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
1219 vport->ingress.legacy.drop_rule = NULL;
1220 }
f942380c 1221
10652f39 1222 if (vport->ingress.allow_rule) {
74491de9 1223 mlx5_del_flow_rules(vport->ingress.allow_rule);
853b5352
PP
1224 vport->ingress.allow_rule = NULL;
1225 }
dfcb1ed3
MHY
1226}
1227
10652f39
PP
1228static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
1229 struct mlx5_vport *vport)
5742df0f 1230{
10652f39 1231 if (!vport->ingress.acl)
5742df0f
MHY
1232 return;
1233
1234 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
1235
dfcb1ed3 1236 esw_vport_cleanup_ingress_rules(esw, vport);
10652f39
PP
1237 if (vport->ingress.legacy.allow_spoofchk_only_grp) {
1238 mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
1239 vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
1240 }
1241 if (vport->ingress.legacy.allow_untagged_only_grp) {
1242 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
1243 vport->ingress.legacy.allow_untagged_only_grp = NULL;
1244 }
1245 if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
1246 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
1247 vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
1248 }
1249 if (vport->ingress.legacy.drop_grp) {
1250 mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
1251 vport->ingress.legacy.drop_grp = NULL;
1252 }
1253 esw_vport_destroy_ingress_acl_table(vport);
5742df0f
MHY
1254}
1255
dfcb1ed3
MHY
1256static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1257 struct mlx5_vport *vport)
1258{
853b5352 1259 struct mlx5_fc *counter = vport->ingress.legacy.drop_counter;
b8a0dbe3
EE
1260 struct mlx5_flow_destination drop_ctr_dst = {0};
1261 struct mlx5_flow_destination *dst = NULL;
66958ed9 1262 struct mlx5_flow_act flow_act = {0};
8b3f2eb0 1263 struct mlx5_flow_spec *spec = NULL;
b8a0dbe3 1264 int dest_num = 0;
dfcb1ed3 1265 int err = 0;
f942380c 1266 u8 *smac_v;
dfcb1ed3 1267
10652f39
PP
1268 /* The ingress acl table contains 4 groups
1269 * (2 active rules at the same time -
1270 * 1 allow rule from one of the first 3 groups.
1271 * 1 drop rule from the last group):
1272 * 1)Allow untagged traffic with smac=original mac.
1273 * 2)Allow untagged traffic.
1274 * 3)Allow traffic with smac=original mac.
1275 * 4)Drop all other traffic.
1276 */
1277 int table_size = 4;
1278
dfcb1ed3
MHY
1279 esw_vport_cleanup_ingress_rules(esw, vport);
1280
1ab2068a 1281 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
10652f39 1282 esw_vport_disable_legacy_ingress_acl(esw, vport);
dfcb1ed3 1283 return 0;
01f51f22
MHY
1284 }
1285
10652f39
PP
1286 if (!vport->ingress.acl) {
1287 err = esw_vport_create_ingress_acl_table(esw, vport, table_size);
1288 if (err) {
1289 esw_warn(esw->dev,
1290 "vport[%d] enable ingress acl err (%d)\n",
1291 err, vport->vport);
1292 return err;
1293 }
1294
1295 err = esw_vport_create_legacy_ingress_acl_groups(esw, vport);
1296 if (err)
1297 goto out;
247f139c 1298 }
dfcb1ed3
MHY
1299
1300 esw_debug(esw->dev,
1301 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1ab2068a 1302 vport->vport, vport->info.vlan, vport->info.qos);
dfcb1ed3 1303
1b9a07ee 1304 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1305 if (!spec) {
dfcb1ed3 1306 err = -ENOMEM;
dfcb1ed3
MHY
1307 goto out;
1308 }
dfcb1ed3 1309
1ab2068a 1310 if (vport->info.vlan || vport->info.qos)
10543365 1311 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
f942380c 1312
1ab2068a 1313 if (vport->info.spoofchk) {
c5bb1730
MG
1314 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1315 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
f942380c 1316 smac_v = MLX5_ADDR_OF(fte_match_param,
c5bb1730 1317 spec->match_value,
f942380c 1318 outer_headers.smac_47_16);
1ab2068a 1319 ether_addr_copy(smac_v, vport->info.mac);
f942380c
MHY
1320 }
1321
c5bb1730 1322 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
66958ed9 1323 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
f942380c 1324 vport->ingress.allow_rule =
74491de9 1325 mlx5_add_flow_rules(vport->ingress.acl, spec,
66958ed9 1326 &flow_act, NULL, 0);
3f42ac66 1327 if (IS_ERR(vport->ingress.allow_rule)) {
f942380c 1328 err = PTR_ERR(vport->ingress.allow_rule);
2974ab6e
SM
1329 esw_warn(esw->dev,
1330 "vport[%d] configure ingress allow rule, err(%d)\n",
1331 vport->vport, err);
f942380c
MHY
1332 vport->ingress.allow_rule = NULL;
1333 goto out;
1334 }
1335
c5bb1730 1336 memset(spec, 0, sizeof(*spec));
66958ed9 1337 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
b8a0dbe3
EE
1338
1339 /* Attach drop flow counter */
1340 if (counter) {
1341 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1342 drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 1343 drop_ctr_dst.counter_id = mlx5_fc_id(counter);
b8a0dbe3
EE
1344 dst = &drop_ctr_dst;
1345 dest_num++;
1346 }
853b5352 1347 vport->ingress.legacy.drop_rule =
74491de9 1348 mlx5_add_flow_rules(vport->ingress.acl, spec,
b8a0dbe3 1349 &flow_act, dst, dest_num);
853b5352
PP
1350 if (IS_ERR(vport->ingress.legacy.drop_rule)) {
1351 err = PTR_ERR(vport->ingress.legacy.drop_rule);
2974ab6e
SM
1352 esw_warn(esw->dev,
1353 "vport[%d] configure ingress drop rule, err(%d)\n",
1354 vport->vport, err);
853b5352 1355 vport->ingress.legacy.drop_rule = NULL;
f942380c 1356 goto out;
dfcb1ed3 1357 }
10652f39
PP
1358 kvfree(spec);
1359 return 0;
f942380c 1360
dfcb1ed3 1361out:
10652f39 1362 esw_vport_disable_legacy_ingress_acl(esw, vport);
c5bb1730 1363 kvfree(spec);
dfcb1ed3
MHY
1364 return err;
1365}
1366
fdde49e0
PP
1367int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
1368 struct mlx5_vport *vport,
1369 u16 vlan_id, u32 flow_action)
1370{
1371 struct mlx5_flow_act flow_act = {};
1372 struct mlx5_flow_spec *spec;
1373 int err = 0;
1374
1375 if (vport->egress.allowed_vlan)
1376 return -EEXIST;
1377
1378 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1379 if (!spec)
1380 return -ENOMEM;
1381
1382 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1383 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1384 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1385 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id);
1386
1387 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1388 flow_act.action = flow_action;
1389 vport->egress.allowed_vlan =
1390 mlx5_add_flow_rules(vport->egress.acl, spec,
1391 &flow_act, NULL, 0);
1392 if (IS_ERR(vport->egress.allowed_vlan)) {
1393 err = PTR_ERR(vport->egress.allowed_vlan);
1394 esw_warn(esw->dev,
1395 "vport[%d] configure egress vlan rule failed, err(%d)\n",
1396 vport->vport, err);
1397 vport->egress.allowed_vlan = NULL;
1398 }
1399
c5bb1730 1400 kvfree(spec);
dfcb1ed3
MHY
1401 return err;
1402}
1403
1404static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1405 struct mlx5_vport *vport)
1406{
853b5352 1407 struct mlx5_fc *counter = vport->egress.legacy.drop_counter;
b8a0dbe3
EE
1408 struct mlx5_flow_destination drop_ctr_dst = {0};
1409 struct mlx5_flow_destination *dst = NULL;
66958ed9 1410 struct mlx5_flow_act flow_act = {0};
c5bb1730 1411 struct mlx5_flow_spec *spec;
b8a0dbe3 1412 int dest_num = 0;
dfcb1ed3
MHY
1413 int err = 0;
1414
dfcb1ed3
MHY
1415 esw_vport_cleanup_egress_rules(esw, vport);
1416
1ab2068a 1417 if (!vport->info.vlan && !vport->info.qos) {
01f51f22 1418 esw_vport_disable_egress_acl(esw, vport);
dfcb1ed3 1419 return 0;
01f51f22
MHY
1420 }
1421
247f139c
MHY
1422 err = esw_vport_enable_egress_acl(esw, vport);
1423 if (err) {
1424 mlx5_core_warn(esw->dev,
1425 "failed to enable egress acl (%d) on vport[%d]\n",
1426 err, vport->vport);
1427 return err;
1428 }
dfcb1ed3
MHY
1429
1430 esw_debug(esw->dev,
1431 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1ab2068a 1432 vport->vport, vport->info.vlan, vport->info.qos);
dfcb1ed3 1433
dfcb1ed3 1434 /* Allowed vlan rule */
fdde49e0
PP
1435 err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan,
1436 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
1437 if (err)
1438 return err;
dfcb1ed3 1439
fdde49e0
PP
1440 /* Drop others rule (star rule) */
1441 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1442 if (!spec)
dfcb1ed3 1443 goto out;
dfcb1ed3 1444
66958ed9 1445 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
b8a0dbe3
EE
1446
1447 /* Attach egress drop flow counter */
1448 if (counter) {
1449 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1450 drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 1451 drop_ctr_dst.counter_id = mlx5_fc_id(counter);
b8a0dbe3
EE
1452 dst = &drop_ctr_dst;
1453 dest_num++;
1454 }
853b5352 1455 vport->egress.legacy.drop_rule =
74491de9 1456 mlx5_add_flow_rules(vport->egress.acl, spec,
b8a0dbe3 1457 &flow_act, dst, dest_num);
853b5352
PP
1458 if (IS_ERR(vport->egress.legacy.drop_rule)) {
1459 err = PTR_ERR(vport->egress.legacy.drop_rule);
2974ab6e
SM
1460 esw_warn(esw->dev,
1461 "vport[%d] configure egress drop rule failed, err(%d)\n",
1462 vport->vport, err);
853b5352 1463 vport->egress.legacy.drop_rule = NULL;
dfcb1ed3
MHY
1464 }
1465out:
c5bb1730 1466 kvfree(spec);
dfcb1ed3
MHY
1467 return err;
1468}
1469
6cedde45
EC
1470static bool element_type_supported(struct mlx5_eswitch *esw, int type)
1471{
694a2960 1472 const struct mlx5_core_dev *dev = esw->dev;
6cedde45
EC
1473
1474 switch (type) {
1475 case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
1476 return MLX5_CAP_QOS(dev, esw_element_type) &
1477 ELEMENT_TYPE_CAP_MASK_TASR;
1478 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
1479 return MLX5_CAP_QOS(dev, esw_element_type) &
1480 ELEMENT_TYPE_CAP_MASK_VPORT;
1481 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
1482 return MLX5_CAP_QOS(dev, esw_element_type) &
1483 ELEMENT_TYPE_CAP_MASK_VPORT_TC;
1484 case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
1485 return MLX5_CAP_QOS(dev, esw_element_type) &
1486 ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
1487 }
1488 return false;
1489}
1490
1bd27b11 1491/* Vport QoS management */
610090eb 1492static void esw_create_tsar(struct mlx5_eswitch *esw)
1bd27b11
MHY
1493{
1494 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1495 struct mlx5_core_dev *dev = esw->dev;
6cedde45 1496 __be32 *attr;
1bd27b11
MHY
1497 int err;
1498
1499 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
610090eb 1500 return;
1bd27b11 1501
6cedde45 1502 if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
610090eb 1503 return;
1bd27b11
MHY
1504
1505 if (esw->qos.enabled)
610090eb 1506 return;
1bd27b11 1507
6cedde45
EC
1508 MLX5_SET(scheduling_context, tsar_ctx, element_type,
1509 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
1510
1511 attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
1512 *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
1bd27b11
MHY
1513
1514 err = mlx5_create_scheduling_element_cmd(dev,
1515 SCHEDULING_HIERARCHY_E_SWITCH,
18a89ab7 1516 tsar_ctx,
1bd27b11
MHY
1517 &esw->qos.root_tsar_id);
1518 if (err) {
1519 esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
610090eb 1520 return;
1bd27b11
MHY
1521 }
1522
1523 esw->qos.enabled = true;
1bd27b11
MHY
1524}
1525
1526static void esw_destroy_tsar(struct mlx5_eswitch *esw)
1527{
1528 int err;
1529
1530 if (!esw->qos.enabled)
1531 return;
1532
1533 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1534 SCHEDULING_HIERARCHY_E_SWITCH,
1535 esw->qos.root_tsar_id);
1536 if (err)
1537 esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
1538
1539 esw->qos.enabled = false;
1540}
1541
ee813f31
PP
1542static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
1543 struct mlx5_vport *vport,
c9497c98 1544 u32 initial_max_rate, u32 initial_bw_share)
1bd27b11
MHY
1545{
1546 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1bd27b11
MHY
1547 struct mlx5_core_dev *dev = esw->dev;
1548 void *vport_elem;
1549 int err = 0;
1550
1551 if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
1552 !MLX5_CAP_QOS(dev, esw_scheduling))
1553 return 0;
1554
1555 if (vport->qos.enabled)
1556 return -EEXIST;
1557
18a89ab7 1558 MLX5_SET(scheduling_context, sched_ctx, element_type,
1bd27b11 1559 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
18a89ab7 1560 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1bd27b11 1561 element_attributes);
ee813f31 1562 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
18a89ab7 1563 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1bd27b11 1564 esw->qos.root_tsar_id);
18a89ab7 1565 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1bd27b11 1566 initial_max_rate);
18a89ab7 1567 MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
1bd27b11
MHY
1568
1569 err = mlx5_create_scheduling_element_cmd(dev,
1570 SCHEDULING_HIERARCHY_E_SWITCH,
18a89ab7 1571 sched_ctx,
1bd27b11
MHY
1572 &vport->qos.esw_tsar_ix);
1573 if (err) {
1574 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
ee813f31 1575 vport->vport, err);
1bd27b11
MHY
1576 return err;
1577 }
1578
1579 vport->qos.enabled = true;
1580 return 0;
1581}
1582
ee813f31
PP
1583static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
1584 struct mlx5_vport *vport)
1bd27b11 1585{
ee813f31 1586 int err;
1bd27b11
MHY
1587
1588 if (!vport->qos.enabled)
1589 return;
1590
1591 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1592 SCHEDULING_HIERARCHY_E_SWITCH,
1593 vport->qos.esw_tsar_ix);
1594 if (err)
1595 esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
ee813f31 1596 vport->vport, err);
1bd27b11
MHY
1597
1598 vport->qos.enabled = false;
1599}
1600
ee813f31
PP
1601static int esw_vport_qos_config(struct mlx5_eswitch *esw,
1602 struct mlx5_vport *vport,
c9497c98 1603 u32 max_rate, u32 bw_share)
bd77bf1c
MHY
1604{
1605 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
bd77bf1c
MHY
1606 struct mlx5_core_dev *dev = esw->dev;
1607 void *vport_elem;
1608 u32 bitmask = 0;
1609 int err = 0;
1610
1611 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1612 return -EOPNOTSUPP;
1613
1614 if (!vport->qos.enabled)
1615 return -EIO;
1616
18a89ab7 1617 MLX5_SET(scheduling_context, sched_ctx, element_type,
bd77bf1c 1618 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
18a89ab7 1619 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
bd77bf1c 1620 element_attributes);
ee813f31 1621 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
18a89ab7 1622 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
bd77bf1c 1623 esw->qos.root_tsar_id);
18a89ab7 1624 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
bd77bf1c 1625 max_rate);
18a89ab7 1626 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
bd77bf1c 1627 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
c9497c98 1628 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
bd77bf1c
MHY
1629
1630 err = mlx5_modify_scheduling_element_cmd(dev,
1631 SCHEDULING_HIERARCHY_E_SWITCH,
18a89ab7 1632 sched_ctx,
bd77bf1c
MHY
1633 vport->qos.esw_tsar_ix,
1634 bitmask);
1635 if (err) {
1636 esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
ee813f31 1637 vport->vport, err);
bd77bf1c
MHY
1638 return err;
1639 }
1640
1641 return 0;
1642}
1643
fcb64c0f
EC
1644int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
1645 u32 rate_mbps)
1646{
1647 u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
1648 struct mlx5_vport *vport;
1649
1650 vport = mlx5_eswitch_get_vport(esw, vport_num);
1651 MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
1652
1653 return mlx5_modify_scheduling_element_cmd(esw->dev,
1654 SCHEDULING_HIERARCHY_E_SWITCH,
1655 ctx,
1656 vport->qos.esw_tsar_ix,
1657 MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW);
1658}
1659
1ab2068a
MHY
1660static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1661{
1662 ((u8 *)node_guid)[7] = mac[0];
1663 ((u8 *)node_guid)[6] = mac[1];
1664 ((u8 *)node_guid)[5] = mac[2];
1665 ((u8 *)node_guid)[4] = 0xff;
1666 ((u8 *)node_guid)[3] = 0xfe;
1667 ((u8 *)node_guid)[2] = mac[3];
1668 ((u8 *)node_guid)[1] = mac[4];
1669 ((u8 *)node_guid)[0] = mac[5];
1670}
1671
1672static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
1673 struct mlx5_vport *vport)
1674{
02f3afd9 1675 u16 vport_num = vport->vport;
5f5d2536 1676 int flags;
1ab2068a 1677
ea2300e0 1678 if (mlx5_esw_is_manager_vport(esw, vport_num))
1ab2068a
MHY
1679 return;
1680
1681 mlx5_modify_vport_admin_state(esw->dev,
cc9c82a8 1682 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
cbc44e76 1683 vport_num, 1,
1ab2068a 1684 vport->info.link_state);
a1b3839a
BW
1685
1686 /* Host PF has its own mac/guid. */
1687 if (vport_num) {
1688 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
1689 vport->info.mac);
1690 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
1691 vport->info.node_guid);
1692 }
1693
5f5d2536
BW
1694 flags = (vport->info.vlan || vport->info.qos) ?
1695 SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
1ab2068a 1696 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
5f5d2536 1697 flags);
1ab2068a 1698}
1bd27b11 1699
b7752f83
PP
1700static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
1701 struct mlx5_vport *vport)
b8a0dbe3 1702{
b7752f83
PP
1703 int ret;
1704
1705 /* Only non manager vports need ACL in legacy mode */
1706 if (mlx5_esw_is_manager_vport(esw, vport->vport))
1707 return 0;
b8a0dbe3 1708
b7752f83
PP
1709 if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
1710 MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
1711 vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
853b5352 1712 if (IS_ERR(vport->ingress.legacy.drop_counter)) {
b7752f83 1713 esw_warn(esw->dev,
b8a0dbe3
EE
1714 "vport[%d] configure ingress drop rule counter failed\n",
1715 vport->vport);
853b5352 1716 vport->ingress.legacy.drop_counter = NULL;
b8a0dbe3
EE
1717 }
1718 }
1719
b7752f83
PP
1720 ret = esw_vport_ingress_config(esw, vport);
1721 if (ret)
1722 goto ingress_err;
1723
1724 if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
1725 MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
1726 vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
853b5352 1727 if (IS_ERR(vport->egress.legacy.drop_counter)) {
b7752f83 1728 esw_warn(esw->dev,
b8a0dbe3
EE
1729 "vport[%d] configure egress drop rule counter failed\n",
1730 vport->vport);
853b5352 1731 vport->egress.legacy.drop_counter = NULL;
b8a0dbe3
EE
1732 }
1733 }
f5d0c01d
PP
1734
1735 ret = esw_vport_egress_config(esw, vport);
1736 if (ret)
b7752f83
PP
1737 goto egress_err;
1738
1739 return 0;
f5d0c01d 1740
b7752f83 1741egress_err:
10652f39 1742 esw_vport_disable_legacy_ingress_acl(esw, vport);
b7752f83
PP
1743 mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
1744 vport->egress.legacy.drop_counter = NULL;
1745
1746ingress_err:
1747 mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
1748 vport->ingress.legacy.drop_counter = NULL;
f5d0c01d 1749 return ret;
b8a0dbe3
EE
1750}
1751
f5d0c01d
PP
1752static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
1753 struct mlx5_vport *vport)
b8a0dbe3 1754{
f5d0c01d
PP
1755 if (esw->mode == MLX5_ESWITCH_LEGACY)
1756 return esw_vport_create_legacy_acl_tables(esw, vport);
748da30b
VP
1757 else
1758 return esw_vport_create_offloads_acl_tables(esw, vport);
f5d0c01d 1759}
b8a0dbe3 1760
f5d0c01d
PP
1761static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
1762 struct mlx5_vport *vport)
1763
1764{
1765 if (mlx5_esw_is_manager_vport(esw, vport->vport))
1766 return;
1767
1768 esw_vport_disable_egress_acl(esw, vport);
b7752f83
PP
1769 mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
1770 vport->egress.legacy.drop_counter = NULL;
1771
10652f39 1772 esw_vport_disable_legacy_ingress_acl(esw, vport);
b7752f83
PP
1773 mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
1774 vport->ingress.legacy.drop_counter = NULL;
f5d0c01d
PP
1775}
1776
1777static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
1778 struct mlx5_vport *vport)
1779{
1780 if (esw->mode == MLX5_ESWITCH_LEGACY)
1781 esw_vport_destroy_legacy_acl_tables(esw, vport);
748da30b
VP
1782 else
1783 esw_vport_destroy_offloads_acl_tables(esw, vport);
b8a0dbe3
EE
1784}
1785
925a6acc
PP
1786static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
1787 enum mlx5_eswitch_vport_event enabled_events)
073bb189 1788{
879c8f84 1789 u16 vport_num = vport->vport;
f5d0c01d 1790 int ret;
073bb189 1791
dfcb1ed3 1792 mutex_lock(&esw->state_lock);
81848731
SM
1793 WARN_ON(vport->enabled);
1794
1795 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
5742df0f 1796
1ab2068a
MHY
1797 /* Restore old vport configuration */
1798 esw_apply_vport_conf(esw, vport);
81848731 1799
f5d0c01d
PP
1800 ret = esw_vport_setup_acl(esw, vport);
1801 if (ret)
1802 goto done;
1803
1bd27b11 1804 /* Attach vport to the eswitch rate limiter */
ee813f31 1805 if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
c9497c98 1806 vport->qos.bw_share))
1bd27b11
MHY
1807 esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
1808
81848731 1809 /* Sync with current vport context */
5019833d 1810 vport->enabled_events = enabled_events;
073bb189 1811 vport->enabled = true;
073bb189 1812
a1b3839a
BW
1813 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
1814 * in smartNIC as it's a vport group manager.
1815 */
ea2300e0 1816 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
a1b3839a 1817 (!vport_num && mlx5_core_is_ecpf(esw->dev)))
1ab2068a
MHY
1818 vport->info.trusted = true;
1819
25fff58c 1820 esw_vport_change_handle_locked(vport);
81848731
SM
1821
1822 esw->enabled_vports++;
1823 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
f5d0c01d 1824done:
dfcb1ed3 1825 mutex_unlock(&esw->state_lock);
f5d0c01d 1826 return ret;
81848731
SM
1827}
1828
879c8f84
BW
1829static void esw_disable_vport(struct mlx5_eswitch *esw,
1830 struct mlx5_vport *vport)
073bb189 1831{
879c8f84 1832 u16 vport_num = vport->vport;
073bb189 1833
77b09430 1834 mutex_lock(&esw->state_lock);
073bb189 1835 if (!vport->enabled)
77b09430 1836 goto done;
073bb189 1837
81848731 1838 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
073bb189 1839 /* Mark this vport as disabled to discard new events */
073bb189 1840 vport->enabled = false;
831cae1d 1841
073bb189
SM
1842 /* Disable events from this vport */
1843 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
586cfa7f
MHY
1844 /* We don't assume VFs will cleanup after themselves.
1845 * Calling vport change handler while vport is disabled will cleanup
1846 * the vport resources.
1847 */
1edc57e2 1848 esw_vport_change_handle_locked(vport);
586cfa7f 1849 vport->enabled_events = 0;
ee813f31 1850 esw_vport_disable_qos(esw, vport);
f5d0c01d
PP
1851
1852 if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
1853 esw->mode == MLX5_ESWITCH_LEGACY)
1ab2068a 1854 mlx5_modify_vport_admin_state(esw->dev,
cc9c82a8 1855 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
cbc44e76 1856 vport_num, 1,
cc9c82a8 1857 MLX5_VPORT_ADMIN_STATE_DOWN);
f5d0c01d
PP
1858
1859 esw_vport_cleanup_acl(esw, vport);
81848731 1860 esw->enabled_vports--;
77b09430
PP
1861
1862done:
dfcb1ed3 1863 mutex_unlock(&esw->state_lock);
073bb189
SM
1864}
1865
6933a937
SM
1866static int eswitch_vport_event(struct notifier_block *nb,
1867 unsigned long type, void *data)
1868{
1869 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
1870 struct mlx5_eqe *eqe = data;
1871 struct mlx5_vport *vport;
1872 u16 vport_num;
1873
1874 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
879c8f84 1875 vport = mlx5_eswitch_get_vport(esw, vport_num);
77b09430 1876 if (!IS_ERR(vport))
6933a937 1877 queue_work(esw->work_queue, &vport->vport_change_handler);
6933a937
SM
1878 return NOTIFY_OK;
1879}
1880
dd28087c
PP
1881/**
1882 * mlx5_esw_query_functions - Returns raw output about functions state
1883 * @dev: Pointer to device to query
1884 *
1885 * mlx5_esw_query_functions() allocates and returns functions changed
1886 * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
1887 * Caller must free the memory using kvfree() when valid pointer is returned.
1888 */
1889const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
cd56f929 1890{
dd28087c 1891 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
10ee82ce 1892 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
dd28087c
PP
1893 u32 *out;
1894 int err;
1895
1896 out = kvzalloc(outlen, GFP_KERNEL);
1897 if (!out)
1898 return ERR_PTR(-ENOMEM);
cd56f929
VP
1899
1900 MLX5_SET(query_esw_functions_in, in, opcode,
1901 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
1902
dd28087c
PP
1903 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
1904 if (!err)
1905 return out;
1906
1907 kvfree(out);
1908 return ERR_PTR(err);
cd56f929
VP
1909}
1910
16fff98a
BW
1911static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
1912{
4a3929b2
BW
1913 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1914 mlx5_eq_notifier_register(esw->dev, &esw->nb);
1915
1916 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
16fff98a
BW
1917 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
1918 ESW_FUNCTIONS_CHANGED);
1919 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
1920 }
1921}
1922
1923static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
1924{
4a3929b2 1925 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
16fff98a
BW
1926 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
1927
4a3929b2
BW
1928 mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1929
16fff98a 1930 flush_workqueue(esw->work_queue);
cd56f929
VP
1931}
1932
556b9d16
AL
1933static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
1934{
1935 struct mlx5_vport *vport;
1936 int i;
1937
3b83b6c2 1938 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
556b9d16 1939 memset(&vport->info, 0, sizeof(vport->info));
3b83b6c2
DL
1940 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1941 }
556b9d16
AL
1942}
1943
073bb189 1944/* Public E-Switch API */
a8d70a05
EC
1945#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
1946
5019833d
PP
1947/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
1948 * whichever are present on the eswitch.
1949 */
925a6acc 1950int
5019833d
PP
1951mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1952 enum mlx5_eswitch_vport_event enabled_events)
1953{
1954 struct mlx5_vport *vport;
925a6acc
PP
1955 int num_vfs;
1956 int ret;
5019833d
PP
1957 int i;
1958
1959 /* Enable PF vport */
1960 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
925a6acc
PP
1961 ret = esw_enable_vport(esw, vport, enabled_events);
1962 if (ret)
1963 return ret;
5019833d 1964
925a6acc 1965 /* Enable ECPF vport */
5019833d
PP
1966 if (mlx5_ecpf_vport_exists(esw->dev)) {
1967 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
925a6acc
PP
1968 ret = esw_enable_vport(esw, vport, enabled_events);
1969 if (ret)
1970 goto ecpf_err;
5019833d
PP
1971 }
1972
1973 /* Enable VF vports */
925a6acc
PP
1974 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
1975 ret = esw_enable_vport(esw, vport, enabled_events);
1976 if (ret)
1977 goto vf_err;
1978 }
1979 return 0;
1980
1981vf_err:
1982 num_vfs = i - 1;
1983 mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs)
1984 esw_disable_vport(esw, vport);
1985
1986 if (mlx5_ecpf_vport_exists(esw->dev)) {
1987 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1988 esw_disable_vport(esw, vport);
1989 }
1990
1991ecpf_err:
1992 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1993 esw_disable_vport(esw, vport);
1994 return ret;
5019833d
PP
1995}
1996
1997/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
1998 * whichever are previously enabled on the eswitch.
1999 */
2000void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
81848731 2001{
879c8f84 2002 struct mlx5_vport *vport;
5019833d
PP
2003 int i;
2004
2005 mlx5_esw_for_all_vports_reverse(esw, i, vport)
2006 esw_disable_vport(esw, vport);
2007}
2008
5019833d
PP
2009int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
2010{
81848731 2011 int err;
81848731 2012
a8d70a05 2013 if (!ESW_ALLOWED(esw) ||
81848731 2014 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
f6455de0 2015 esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
9eb78923 2016 return -EOPNOTSUPP;
81848731
SM
2017 }
2018
5742df0f 2019 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
f6455de0 2020 esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
5742df0f
MHY
2021
2022 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
f6455de0 2023 esw_warn(esw->dev, "engress ACL is not supported by FW\n");
a3888f33 2024
610090eb
PP
2025 esw_create_tsar(esw);
2026
6ab36e35 2027 esw->mode = mode;
81848731 2028
eff849b2
RL
2029 mlx5_lag_update(esw->dev);
2030
f6455de0 2031 if (mode == MLX5_ESWITCH_LEGACY) {
5896b972 2032 err = esw_legacy_enable(esw);
c5447c70 2033 } else {
aec002f6 2034 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
c5447c70 2035 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
5896b972 2036 err = esw_offloads_enable(esw);
c5447c70
MB
2037 }
2038
81848731
SM
2039 if (err)
2040 goto abort;
2041
16fff98a 2042 mlx5_eswitch_event_handlers_register(esw);
6933a937 2043
f6455de0
BW
2044 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
2045 mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
062f4bf4 2046 esw->esw_funcs.num_vfs, esw->enabled_vports);
6933a937 2047
81848731
SM
2048 return 0;
2049
2050abort:
f6455de0 2051 esw->mode = MLX5_ESWITCH_NONE;
c5447c70 2052
f6455de0 2053 if (mode == MLX5_ESWITCH_OFFLOADS) {
c5447c70 2054 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
aec002f6
OG
2055 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
2056 }
c5447c70 2057
81848731
SM
2058 return err;
2059}
2060
556b9d16 2061void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
81848731 2062{
c5447c70 2063 int old_mode;
81848731 2064
f6455de0 2065 if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
81848731
SM
2066 return;
2067
f6455de0
BW
2068 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
2069 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
062f4bf4 2070 esw->esw_funcs.num_vfs, esw->enabled_vports);
81848731 2071
16fff98a 2072 mlx5_eswitch_event_handlers_unregister(esw);
6933a937 2073
f6455de0 2074 if (esw->mode == MLX5_ESWITCH_LEGACY)
5896b972 2075 esw_legacy_disable(esw);
f6455de0 2076 else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
5896b972 2077 esw_offloads_disable(esw);
610090eb
PP
2078
2079 esw_destroy_tsar(esw);
81848731 2080
c5447c70 2081 old_mode = esw->mode;
f6455de0 2082 esw->mode = MLX5_ESWITCH_NONE;
c5447c70 2083
eff849b2
RL
2084 mlx5_lag_update(esw->dev);
2085
f6455de0 2086 if (old_mode == MLX5_ESWITCH_OFFLOADS) {
c5447c70 2087 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
aec002f6
OG
2088 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
2089 }
556b9d16
AL
2090 if (clear_vf)
2091 mlx5_eswitch_clear_vf_vports_info(esw);
62a9b90a
MHY
2092}
2093
073bb189
SM
2094int mlx5_eswitch_init(struct mlx5_core_dev *dev)
2095{
073bb189 2096 struct mlx5_eswitch *esw;
879c8f84 2097 struct mlx5_vport *vport;
2752b823 2098 int total_vports;
5ae51620 2099 int err, i;
073bb189 2100
4e046de0 2101 if (!MLX5_VPORT_MANAGER(dev))
073bb189
SM
2102 return 0;
2103
2752b823
PP
2104 total_vports = mlx5_eswitch_get_total_vports(dev);
2105
073bb189 2106 esw_info(dev,
eeb66cdb
SM
2107 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
2108 total_vports,
073bb189
SM
2109 MLX5_MAX_UC_PER_VPORT(dev),
2110 MLX5_MAX_MC_PER_VPORT(dev));
2111
2112 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
2113 if (!esw)
2114 return -ENOMEM;
2115
2116 esw->dev = dev;
a1b3839a 2117 esw->manager_vport = mlx5_eswitch_manager_vport(dev);
411ec9e0 2118 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
073bb189 2119
073bb189
SM
2120 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
2121 if (!esw->work_queue) {
2122 err = -ENOMEM;
2123 goto abort;
2124 }
2125
2126 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
2127 GFP_KERNEL);
2128 if (!esw->vports) {
2129 err = -ENOMEM;
2130 goto abort;
2131 }
2132
879c8f84
BW
2133 esw->total_vports = total_vports;
2134
e8d31c4d
MB
2135 err = esw_offloads_init_reps(esw);
2136 if (err)
127ea380 2137 goto abort;
127ea380 2138
61086f39 2139 mutex_init(&esw->offloads.encap_tbl_lock);
a54e20b4 2140 hash_init(esw->offloads.encap_tbl);
d2faae25 2141 mutex_init(&esw->offloads.mod_hdr.lock);
dd58edc3 2142 hash_init(esw->offloads.mod_hdr.hlist);
525e84be 2143 atomic64_set(&esw->offloads.num_flows, 0);
dfcb1ed3
MHY
2144 mutex_init(&esw->state_lock);
2145
5ae51620
BW
2146 mlx5_esw_for_all_vports(esw, i, vport) {
2147 vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
cc9c82a8 2148 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
073bb189
SM
2149 vport->dev = dev;
2150 INIT_WORK(&vport->vport_change_handler,
2151 esw_vport_change_handler);
073bb189
SM
2152 }
2153
81848731 2154 esw->enabled_vports = 0;
f6455de0 2155 esw->mode = MLX5_ESWITCH_NONE;
bffaa916 2156 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
073bb189 2157
81848731 2158 dev->priv.eswitch = esw;
073bb189
SM
2159 return 0;
2160abort:
2161 if (esw->work_queue)
2162 destroy_workqueue(esw->work_queue);
e8d31c4d 2163 esw_offloads_cleanup_reps(esw);
073bb189
SM
2164 kfree(esw->vports);
2165 kfree(esw);
2166 return err;
2167}
2168
2169void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
2170{
4e046de0 2171 if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
073bb189
SM
2172 return;
2173
2174 esw_info(esw->dev, "cleanup\n");
073bb189
SM
2175
2176 esw->dev->priv.eswitch = NULL;
2177 destroy_workqueue(esw->work_queue);
e8d31c4d 2178 esw_offloads_cleanup_reps(esw);
d2faae25 2179 mutex_destroy(&esw->offloads.mod_hdr.lock);
61086f39 2180 mutex_destroy(&esw->offloads.encap_tbl_lock);
073bb189
SM
2181 kfree(esw->vports);
2182 kfree(esw);
2183}
2184
77256579 2185/* Vport Administration */
77256579 2186int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
02f3afd9 2187 u16 vport, u8 mac[ETH_ALEN])
77256579 2188{
5d9986a3 2189 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
23898c76
NO
2190 u64 node_guid;
2191 int err = 0;
77256579 2192
5d9986a3
BW
2193 if (IS_ERR(evport))
2194 return PTR_ERR(evport);
2195 if (is_multicast_ether_addr(mac))
77256579
SM
2196 return -EINVAL;
2197
1ab2068a 2198 mutex_lock(&esw->state_lock);
f942380c 2199
9d2cbdc5 2200 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
f942380c 2201 mlx5_core_warn(esw->dev,
9d2cbdc5 2202 "Set invalid MAC while spoofchk is on, vport(%d)\n",
f942380c 2203 vport);
f942380c 2204
77256579
SM
2205 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
2206 if (err) {
2207 mlx5_core_warn(esw->dev,
2208 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
2209 vport, err);
1ab2068a 2210 goto unlock;
77256579
SM
2211 }
2212
23898c76
NO
2213 node_guid_gen_from_mac(&node_guid, mac);
2214 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
2215 if (err)
2216 mlx5_core_warn(esw->dev,
2217 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
2218 vport, err);
2219
1ab2068a
MHY
2220 ether_addr_copy(evport->info.mac, mac);
2221 evport->info.node_guid = node_guid;
f6455de0 2222 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
f942380c 2223 err = esw_vport_ingress_config(esw, evport);
1ab2068a
MHY
2224
2225unlock:
f942380c 2226 mutex_unlock(&esw->state_lock);
77256579
SM
2227 return err;
2228}
2229
2230int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
02f3afd9 2231 u16 vport, int link_state)
77256579 2232{
5d9986a3 2233 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1ab2068a
MHY
2234 int err = 0;
2235
77256579
SM
2236 if (!ESW_ALLOWED(esw))
2237 return -EPERM;
5d9986a3
BW
2238 if (IS_ERR(evport))
2239 return PTR_ERR(evport);
77256579 2240
1ab2068a 2241 mutex_lock(&esw->state_lock);
1ab2068a
MHY
2242
2243 err = mlx5_modify_vport_admin_state(esw->dev,
cc9c82a8 2244 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
cbc44e76 2245 vport, 1, link_state);
1ab2068a
MHY
2246 if (err) {
2247 mlx5_core_warn(esw->dev,
2248 "Failed to set vport %d link state, err = %d",
2249 vport, err);
2250 goto unlock;
2251 }
2252
2253 evport->info.link_state = link_state;
2254
2255unlock:
2256 mutex_unlock(&esw->state_lock);
75102121 2257 return err;
77256579
SM
2258}
2259
2260int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
02f3afd9 2261 u16 vport, struct ifla_vf_info *ivi)
77256579 2262{
5d9986a3 2263 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
9e7ea352 2264
5d9986a3
BW
2265 if (IS_ERR(evport))
2266 return PTR_ERR(evport);
f942380c 2267
77256579
SM
2268 memset(ivi, 0, sizeof(*ivi));
2269 ivi->vf = vport - 1;
2270
1ab2068a
MHY
2271 mutex_lock(&esw->state_lock);
2272 ether_addr_copy(ivi->mac, evport->info.mac);
2273 ivi->linkstate = evport->info.link_state;
2274 ivi->vlan = evport->info.vlan;
2275 ivi->qos = evport->info.qos;
2276 ivi->spoofchk = evport->info.spoofchk;
2277 ivi->trusted = evport->info.trusted;
c9497c98 2278 ivi->min_tx_rate = evport->info.min_rate;
bd77bf1c 2279 ivi->max_tx_rate = evport->info.max_rate;
1ab2068a 2280 mutex_unlock(&esw->state_lock);
77256579
SM
2281
2282 return 0;
2283}
9e7ea352 2284
e33dfe31 2285int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
02f3afd9 2286 u16 vport, u16 vlan, u8 qos, u8 set_flags)
9e7ea352 2287{
5d9986a3 2288 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
dfcb1ed3 2289 int err = 0;
9e7ea352
SM
2290
2291 if (!ESW_ALLOWED(esw))
2292 return -EPERM;
5d9986a3
BW
2293 if (IS_ERR(evport))
2294 return PTR_ERR(evport);
2295 if (vlan > 4095 || qos > 7)
9e7ea352
SM
2296 return -EINVAL;
2297
e33dfe31 2298 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
dfcb1ed3 2299 if (err)
0e18134f 2300 return err;
dfcb1ed3 2301
1ab2068a
MHY
2302 evport->info.vlan = vlan;
2303 evport->info.qos = qos;
f6455de0 2304 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
dfcb1ed3
MHY
2305 err = esw_vport_ingress_config(esw, evport);
2306 if (err)
0e18134f 2307 return err;
dfcb1ed3
MHY
2308 err = esw_vport_egress_config(esw, evport);
2309 }
2310
dfcb1ed3 2311 return err;
9e7ea352 2312}
3b751a2a 2313
e33dfe31 2314int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
02f3afd9 2315 u16 vport, u16 vlan, u8 qos)
e33dfe31
OG
2316{
2317 u8 set_flags = 0;
0e18134f 2318 int err;
e33dfe31
OG
2319
2320 if (vlan || qos)
2321 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
2322
0e18134f
VB
2323 mutex_lock(&esw->state_lock);
2324 err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
2325 mutex_unlock(&esw->state_lock);
2326
2327 return err;
e33dfe31
OG
2328}
2329
f942380c 2330int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
02f3afd9 2331 u16 vport, bool spoofchk)
f942380c 2332{
5d9986a3 2333 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
f942380c
MHY
2334 bool pschk;
2335 int err = 0;
2336
2337 if (!ESW_ALLOWED(esw))
2338 return -EPERM;
5d9986a3
BW
2339 if (IS_ERR(evport))
2340 return PTR_ERR(evport);
f942380c 2341
f942380c 2342 mutex_lock(&esw->state_lock);
1ab2068a
MHY
2343 pschk = evport->info.spoofchk;
2344 evport->info.spoofchk = spoofchk;
9d2cbdc5
AL
2345 if (pschk && !is_valid_ether_addr(evport->info.mac))
2346 mlx5_core_warn(esw->dev,
2347 "Spoofchk in set while MAC is invalid, vport(%d)\n",
2348 evport->vport);
f6455de0 2349 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
f942380c 2350 err = esw_vport_ingress_config(esw, evport);
1ab2068a
MHY
2351 if (err)
2352 evport->info.spoofchk = pschk;
f942380c
MHY
2353 mutex_unlock(&esw->state_lock);
2354
2355 return err;
2356}
2357
8da202b2
HN
2358static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
2359{
2360 if (esw->fdb_table.legacy.vepa_uplink_rule)
2361 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
2362
2363 if (esw->fdb_table.legacy.vepa_star_rule)
2364 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
2365
2366 esw->fdb_table.legacy.vepa_uplink_rule = NULL;
2367 esw->fdb_table.legacy.vepa_star_rule = NULL;
2368}
2369
2370static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
2371 u8 setting)
2372{
2373 struct mlx5_flow_destination dest = {};
2374 struct mlx5_flow_act flow_act = {};
2375 struct mlx5_flow_handle *flow_rule;
2376 struct mlx5_flow_spec *spec;
2377 int err = 0;
2378 void *misc;
2379
2380 if (!setting) {
2381 esw_cleanup_vepa_rules(esw);
2382 return 0;
2383 }
2384
2385 if (esw->fdb_table.legacy.vepa_uplink_rule)
2386 return 0;
2387
2388 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2389 if (!spec)
2390 return -ENOMEM;
2391
2392 /* Uplink rule forward uplink traffic to FDB */
2393 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
2394 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2395
2396 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2397 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2398
2399 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2400 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2401 dest.ft = esw->fdb_table.legacy.fdb;
2402 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2403 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
2404 &flow_act, &dest, 1);
2405 if (IS_ERR(flow_rule)) {
2406 err = PTR_ERR(flow_rule);
2407 goto out;
2408 } else {
2409 esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
2410 }
2411
2412 /* Star rule to forward all traffic to uplink vport */
2413 memset(spec, 0, sizeof(*spec));
36acf63a 2414 memset(&dest, 0, sizeof(dest));
8da202b2
HN
2415 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2416 dest.vport.num = MLX5_VPORT_UPLINK;
2417 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2418 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
2419 &flow_act, &dest, 1);
2420 if (IS_ERR(flow_rule)) {
2421 err = PTR_ERR(flow_rule);
2422 goto out;
2423 } else {
2424 esw->fdb_table.legacy.vepa_star_rule = flow_rule;
2425 }
2426
2427out:
2428 kvfree(spec);
2429 if (err)
2430 esw_cleanup_vepa_rules(esw);
2431 return err;
2432}
2433
2434int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
2435{
2436 int err = 0;
2437
2438 if (!esw)
2439 return -EOPNOTSUPP;
2440
2441 if (!ESW_ALLOWED(esw))
2442 return -EPERM;
2443
2444 mutex_lock(&esw->state_lock);
f6455de0 2445 if (esw->mode != MLX5_ESWITCH_LEGACY) {
8da202b2
HN
2446 err = -EOPNOTSUPP;
2447 goto out;
2448 }
2449
2450 err = _mlx5_eswitch_set_vepa_locked(esw, setting);
2451
2452out:
2453 mutex_unlock(&esw->state_lock);
2454 return err;
2455}
2456
2457int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
2458{
8da202b2
HN
2459 if (!esw)
2460 return -EOPNOTSUPP;
2461
2462 if (!ESW_ALLOWED(esw))
2463 return -EPERM;
2464
3d9c5e02
HN
2465 if (esw->mode != MLX5_ESWITCH_LEGACY)
2466 return -EOPNOTSUPP;
8da202b2
HN
2467
2468 *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
3d9c5e02 2469 return 0;
8da202b2
HN
2470}
2471
1edc57e2 2472int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
02f3afd9 2473 u16 vport, bool setting)
1edc57e2 2474{
5d9986a3 2475 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1edc57e2
MHY
2476
2477 if (!ESW_ALLOWED(esw))
2478 return -EPERM;
5d9986a3
BW
2479 if (IS_ERR(evport))
2480 return PTR_ERR(evport);
1edc57e2 2481
1edc57e2 2482 mutex_lock(&esw->state_lock);
1ab2068a 2483 evport->info.trusted = setting;
1edc57e2
MHY
2484 if (evport->enabled)
2485 esw_vport_change_handle_locked(evport);
2486 mutex_unlock(&esw->state_lock);
2487
2488 return 0;
2489}
2490
c9497c98 2491static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
bd77bf1c 2492{
c9497c98 2493 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
bd77bf1c 2494 struct mlx5_vport *evport;
c9497c98
MHY
2495 u32 max_guarantee = 0;
2496 int i;
2497
879c8f84 2498 mlx5_esw_for_all_vports(esw, i, evport) {
c9497c98
MHY
2499 if (!evport->enabled || evport->info.min_rate < max_guarantee)
2500 continue;
2501 max_guarantee = evport->info.min_rate;
2502 }
2503
2504 return max_t(u32, max_guarantee / fw_max_bw_share, 1);
2505}
2506
2507static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
2508{
2509 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2510 struct mlx5_vport *evport;
2511 u32 vport_max_rate;
2512 u32 vport_min_rate;
2513 u32 bw_share;
2514 int err;
2515 int i;
2516
879c8f84 2517 mlx5_esw_for_all_vports(esw, i, evport) {
c9497c98
MHY
2518 if (!evport->enabled)
2519 continue;
2520 vport_min_rate = evport->info.min_rate;
2521 vport_max_rate = evport->info.max_rate;
2522 bw_share = MLX5_MIN_BW_SHARE;
2523
2524 if (vport_min_rate)
2525 bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
2526 divider,
2527 fw_max_bw_share);
2528
2529 if (bw_share == evport->qos.bw_share)
2530 continue;
2531
ee813f31 2532 err = esw_vport_qos_config(esw, evport, vport_max_rate,
c9497c98
MHY
2533 bw_share);
2534 if (!err)
2535 evport->qos.bw_share = bw_share;
2536 else
2537 return err;
2538 }
2539
2540 return 0;
2541}
2542
02f3afd9 2543int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
c9497c98
MHY
2544 u32 max_rate, u32 min_rate)
2545{
5d9986a3 2546 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
24319258 2547 u32 fw_max_bw_share;
c9497c98
MHY
2548 u32 previous_min_rate;
2549 u32 divider;
24319258
TZ
2550 bool min_rate_supported;
2551 bool max_rate_supported;
bd77bf1c
MHY
2552 int err = 0;
2553
2554 if (!ESW_ALLOWED(esw))
2555 return -EPERM;
5d9986a3
BW
2556 if (IS_ERR(evport))
2557 return PTR_ERR(evport);
24319258
TZ
2558
2559 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2560 min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
2561 fw_max_bw_share >= MLX5_MIN_BW_SHARE;
2562 max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
2563
c9497c98
MHY
2564 if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
2565 return -EOPNOTSUPP;
bd77bf1c
MHY
2566
2567 mutex_lock(&esw->state_lock);
c9497c98
MHY
2568
2569 if (min_rate == evport->info.min_rate)
2570 goto set_max_rate;
2571
2572 previous_min_rate = evport->info.min_rate;
2573 evport->info.min_rate = min_rate;
2574 divider = calculate_vports_min_rate_divider(esw);
2575 err = normalize_vports_min_rate(esw, divider);
2576 if (err) {
2577 evport->info.min_rate = previous_min_rate;
2578 goto unlock;
2579 }
2580
2581set_max_rate:
2582 if (max_rate == evport->info.max_rate)
2583 goto unlock;
2584
ee813f31 2585 err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
bd77bf1c
MHY
2586 if (!err)
2587 evport->info.max_rate = max_rate;
2588
c9497c98 2589unlock:
bd77bf1c
MHY
2590 mutex_unlock(&esw->state_lock);
2591 return err;
2592}
2593
aaabd078 2594static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
ee813f31 2595 struct mlx5_vport *vport,
aaabd078 2596 struct mlx5_vport_drop_stats *stats)
b8a0dbe3
EE
2597{
2598 struct mlx5_eswitch *esw = dev->priv.eswitch;
aaabd078 2599 u64 rx_discard_vport_down, tx_discard_vport_down;
b8a0dbe3 2600 u64 bytes = 0;
aaabd078 2601 int err = 0;
b8a0dbe3 2602
f6455de0 2603 if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
aaabd078 2604 return 0;
b8a0dbe3 2605
853b5352
PP
2606 if (vport->egress.legacy.drop_counter)
2607 mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
930821e3 2608 &stats->rx_dropped, &bytes);
b8a0dbe3 2609
853b5352
PP
2610 if (vport->ingress.legacy.drop_counter)
2611 mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
930821e3 2612 &stats->tx_dropped, &bytes);
aaabd078
MS
2613
2614 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
2615 !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2616 return 0;
2617
ee813f31 2618 err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
aaabd078
MS
2619 &rx_discard_vport_down,
2620 &tx_discard_vport_down);
2621 if (err)
2622 return err;
2623
2624 if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
2625 stats->rx_dropped += rx_discard_vport_down;
2626 if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2627 stats->tx_dropped += tx_discard_vport_down;
2628
2629 return 0;
b8a0dbe3
EE
2630}
2631
3b751a2a 2632int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
02f3afd9 2633 u16 vport_num,
3b751a2a
SM
2634 struct ifla_vf_stats *vf_stats)
2635{
5d9986a3 2636 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3b751a2a 2637 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
c4f287c4 2638 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
b8a0dbe3 2639 struct mlx5_vport_drop_stats stats = {0};
3b751a2a
SM
2640 int err = 0;
2641 u32 *out;
2642
5d9986a3
BW
2643 if (IS_ERR(vport))
2644 return PTR_ERR(vport);
3b751a2a 2645
1b9a07ee 2646 out = kvzalloc(outlen, GFP_KERNEL);
3b751a2a
SM
2647 if (!out)
2648 return -ENOMEM;
2649
3b751a2a
SM
2650 MLX5_SET(query_vport_counter_in, in, opcode,
2651 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
2652 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
ee813f31 2653 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
cbc44e76 2654 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
3b751a2a 2655
3b751a2a
SM
2656 err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
2657 if (err)
2658 goto free_out;
2659
2660 #define MLX5_GET_CTR(p, x) \
2661 MLX5_GET64(query_vport_counter_out, p, x)
2662
2663 memset(vf_stats, 0, sizeof(*vf_stats));
2664 vf_stats->rx_packets =
2665 MLX5_GET_CTR(out, received_eth_unicast.packets) +
88d725bb 2666 MLX5_GET_CTR(out, received_ib_unicast.packets) +
3b751a2a 2667 MLX5_GET_CTR(out, received_eth_multicast.packets) +
88d725bb 2668 MLX5_GET_CTR(out, received_ib_multicast.packets) +
3b751a2a
SM
2669 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2670
2671 vf_stats->rx_bytes =
2672 MLX5_GET_CTR(out, received_eth_unicast.octets) +
88d725bb 2673 MLX5_GET_CTR(out, received_ib_unicast.octets) +
3b751a2a 2674 MLX5_GET_CTR(out, received_eth_multicast.octets) +
88d725bb 2675 MLX5_GET_CTR(out, received_ib_multicast.octets) +
3b751a2a
SM
2676 MLX5_GET_CTR(out, received_eth_broadcast.octets);
2677
2678 vf_stats->tx_packets =
2679 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
88d725bb 2680 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
3b751a2a 2681 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
88d725bb 2682 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
3b751a2a
SM
2683 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
2684
2685 vf_stats->tx_bytes =
2686 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
88d725bb 2687 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
3b751a2a 2688 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
88d725bb 2689 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
3b751a2a
SM
2690 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
2691
2692 vf_stats->multicast =
88d725bb
AN
2693 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2694 MLX5_GET_CTR(out, received_ib_multicast.packets);
3b751a2a
SM
2695
2696 vf_stats->broadcast =
2697 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2698
aaabd078
MS
2699 err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
2700 if (err)
2701 goto free_out;
b8a0dbe3
EE
2702 vf_stats->rx_dropped = stats.rx_dropped;
2703 vf_stats->tx_dropped = stats.tx_dropped;
2704
3b751a2a
SM
2705free_out:
2706 kvfree(out);
2707 return err;
2708}
57cbd893
MB
2709
2710u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
2711{
f6455de0 2712 return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
57cbd893
MB
2713}
2714EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
eff849b2 2715
82b11f07
MG
2716enum devlink_eswitch_encap_mode
2717mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
2718{
2719 struct mlx5_eswitch *esw;
2720
2721 esw = dev->priv.eswitch;
2722 return ESW_ALLOWED(esw) ? esw->offloads.encap :
2723 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2724}
2725EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
2726
eff849b2
RL
2727bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
2728{
f6455de0
BW
2729 if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
2730 dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
2731 (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2732 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
eff849b2
RL
2733 return true;
2734
2735 return false;
2736}
544fe7c2
RD
2737
2738bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
2739 struct mlx5_core_dev *dev1)
2740{
f6455de0
BW
2741 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2742 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
544fe7c2 2743}
062f4bf4
BW
2744
2745void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
2746{
dd28087c 2747 const u32 *out;
062f4bf4
BW
2748
2749 WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
2750
2751 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
2752 esw->esw_funcs.num_vfs = num_vfs;
2753 return;
2754 }
2755
dd28087c
PP
2756 out = mlx5_esw_query_functions(esw->dev);
2757 if (IS_ERR(out))
2758 return;
2759
2760 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
2761 host_params_context.host_num_of_vfs);
2762 kvfree(out);
544fe7c2 2763}