]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
Merge tag 'tag-chrome-platform-for-v5.7' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "lib/eq.h"
40 #include "eswitch.h"
41 #include "fs_core.h"
42 #include "devlink.h"
43 #include "ecpf.h"
44
45 enum {
46 MLX5_ACTION_NONE = 0,
47 MLX5_ACTION_ADD = 1,
48 MLX5_ACTION_DEL = 2,
49 };
50
51 /* Vport UC/MC hash node */
52 struct vport_addr {
53 struct l2addr_node node;
54 u8 action;
55 u16 vport;
56 struct mlx5_flow_handle *flow_rule;
57 bool mpfs; /* UC MAC was added to MPFs */
58 /* A flag indicating that mac was added due to mc promiscuous vport */
59 bool mc_promisc;
60 };
61
62 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
63 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
64
65 struct mlx5_vport *__must_check
66 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
67 {
68 u16 idx;
69
70 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
71 return ERR_PTR(-EPERM);
72
73 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
74
75 if (idx > esw->total_vports - 1) {
76 esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
77 vport_num, idx);
78 return ERR_PTR(-EINVAL);
79 }
80
81 return &esw->vports[idx];
82 }
83
84 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
85 u32 events_mask)
86 {
87 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
88 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
89 void *nic_vport_ctx;
90
91 MLX5_SET(modify_nic_vport_context_in, in,
92 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
93 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
94 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
95 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
96 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
97 in, nic_vport_context);
98
99 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
100
101 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
102 MLX5_SET(nic_vport_context, nic_vport_ctx,
103 event_on_uc_address_change, 1);
104 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
105 MLX5_SET(nic_vport_context, nic_vport_ctx,
106 event_on_mc_address_change, 1);
107 if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
108 MLX5_SET(nic_vport_context, nic_vport_ctx,
109 event_on_promisc_change, 1);
110
111 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
112 }
113
114 /* E-Switch vport context HW commands */
115 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
116 bool other_vport,
117 void *in, int inlen)
118 {
119 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
120
121 MLX5_SET(modify_esw_vport_context_in, in, opcode,
122 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
123 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
124 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
125 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
126 }
127
128 int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
129 bool other_vport,
130 void *out, int outlen)
131 {
132 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
133
134 MLX5_SET(query_esw_vport_context_in, in, opcode,
135 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
136 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
137 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
138 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
139 }
140
141 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
142 u16 vlan, u8 qos, u8 set_flags)
143 {
144 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
145
146 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
147 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
148 return -EOPNOTSUPP;
149
150 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
151 vport, vlan, qos, set_flags);
152
153 if (set_flags & SET_VLAN_STRIP)
154 MLX5_SET(modify_esw_vport_context_in, in,
155 esw_vport_context.vport_cvlan_strip, 1);
156
157 if (set_flags & SET_VLAN_INSERT) {
158 /* insert only if no vlan in packet */
159 MLX5_SET(modify_esw_vport_context_in, in,
160 esw_vport_context.vport_cvlan_insert, 1);
161
162 MLX5_SET(modify_esw_vport_context_in, in,
163 esw_vport_context.cvlan_pcp, qos);
164 MLX5_SET(modify_esw_vport_context_in, in,
165 esw_vport_context.cvlan_id, vlan);
166 }
167
168 MLX5_SET(modify_esw_vport_context_in, in,
169 field_select.vport_cvlan_strip, 1);
170 MLX5_SET(modify_esw_vport_context_in, in,
171 field_select.vport_cvlan_insert, 1);
172
173 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true,
174 in, sizeof(in));
175 }
176
177 /* E-Switch FDB */
178 static struct mlx5_flow_handle *
179 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
180 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
181 {
182 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
183 MLX5_MATCH_OUTER_HEADERS);
184 struct mlx5_flow_handle *flow_rule = NULL;
185 struct mlx5_flow_act flow_act = {0};
186 struct mlx5_flow_destination dest = {};
187 struct mlx5_flow_spec *spec;
188 void *mv_misc = NULL;
189 void *mc_misc = NULL;
190 u8 *dmac_v = NULL;
191 u8 *dmac_c = NULL;
192
193 if (rx_rule)
194 match_header |= MLX5_MATCH_MISC_PARAMETERS;
195
196 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
197 if (!spec)
198 return NULL;
199
200 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
201 outer_headers.dmac_47_16);
202 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
203 outer_headers.dmac_47_16);
204
205 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
206 ether_addr_copy(dmac_v, mac_v);
207 ether_addr_copy(dmac_c, mac_c);
208 }
209
210 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
211 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
212 misc_parameters);
213 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
214 misc_parameters);
215 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
216 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
217 }
218
219 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
220 dest.vport.num = vport;
221
222 esw_debug(esw->dev,
223 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
224 dmac_v, dmac_c, vport);
225 spec->match_criteria_enable = match_header;
226 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
227 flow_rule =
228 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
229 &flow_act, &dest, 1);
230 if (IS_ERR(flow_rule)) {
231 esw_warn(esw->dev,
232 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
233 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
234 flow_rule = NULL;
235 }
236
237 kvfree(spec);
238 return flow_rule;
239 }
240
241 static struct mlx5_flow_handle *
242 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
243 {
244 u8 mac_c[ETH_ALEN];
245
246 eth_broadcast_addr(mac_c);
247 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
248 }
249
250 static struct mlx5_flow_handle *
251 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
252 {
253 u8 mac_c[ETH_ALEN];
254 u8 mac_v[ETH_ALEN];
255
256 eth_zero_addr(mac_c);
257 eth_zero_addr(mac_v);
258 mac_c[0] = 0x01;
259 mac_v[0] = 0x01;
260 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
261 }
262
263 static struct mlx5_flow_handle *
264 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
265 {
266 u8 mac_c[ETH_ALEN];
267 u8 mac_v[ETH_ALEN];
268
269 eth_zero_addr(mac_c);
270 eth_zero_addr(mac_v);
271 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
272 }
273
274 enum {
275 LEGACY_VEPA_PRIO = 0,
276 LEGACY_FDB_PRIO,
277 };
278
279 static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
280 {
281 struct mlx5_flow_table_attr ft_attr = {};
282 struct mlx5_core_dev *dev = esw->dev;
283 struct mlx5_flow_namespace *root_ns;
284 struct mlx5_flow_table *fdb;
285 int err;
286
287 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
288 if (!root_ns) {
289 esw_warn(dev, "Failed to get FDB flow namespace\n");
290 return -EOPNOTSUPP;
291 }
292
293 /* num FTE 2, num FG 2 */
294 ft_attr.prio = LEGACY_VEPA_PRIO;
295 ft_attr.max_fte = 2;
296 ft_attr.autogroup.max_num_groups = 2;
297 fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
298 if (IS_ERR(fdb)) {
299 err = PTR_ERR(fdb);
300 esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
301 return err;
302 }
303 esw->fdb_table.legacy.vepa_fdb = fdb;
304
305 return 0;
306 }
307
308 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
309 {
310 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
311 struct mlx5_flow_table_attr ft_attr = {};
312 struct mlx5_core_dev *dev = esw->dev;
313 struct mlx5_flow_namespace *root_ns;
314 struct mlx5_flow_table *fdb;
315 struct mlx5_flow_group *g;
316 void *match_criteria;
317 int table_size;
318 u32 *flow_group_in;
319 u8 *dmac;
320 int err = 0;
321
322 esw_debug(dev, "Create FDB log_max_size(%d)\n",
323 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
324
325 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
326 if (!root_ns) {
327 esw_warn(dev, "Failed to get FDB flow namespace\n");
328 return -EOPNOTSUPP;
329 }
330
331 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
332 if (!flow_group_in)
333 return -ENOMEM;
334
335 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
336 ft_attr.max_fte = table_size;
337 ft_attr.prio = LEGACY_FDB_PRIO;
338 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
339 if (IS_ERR(fdb)) {
340 err = PTR_ERR(fdb);
341 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
342 goto out;
343 }
344 esw->fdb_table.legacy.fdb = fdb;
345
346 /* Addresses group : Full match unicast/multicast addresses */
347 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
348 MLX5_MATCH_OUTER_HEADERS);
349 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
350 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
351 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
352 /* Preserve 2 entries for allmulti and promisc rules*/
353 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
354 eth_broadcast_addr(dmac);
355 g = mlx5_create_flow_group(fdb, flow_group_in);
356 if (IS_ERR(g)) {
357 err = PTR_ERR(g);
358 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
359 goto out;
360 }
361 esw->fdb_table.legacy.addr_grp = g;
362
363 /* Allmulti group : One rule that forwards any mcast traffic */
364 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
365 MLX5_MATCH_OUTER_HEADERS);
366 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
367 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
368 eth_zero_addr(dmac);
369 dmac[0] = 0x01;
370 g = mlx5_create_flow_group(fdb, flow_group_in);
371 if (IS_ERR(g)) {
372 err = PTR_ERR(g);
373 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
374 goto out;
375 }
376 esw->fdb_table.legacy.allmulti_grp = g;
377
378 /* Promiscuous group :
379 * One rule that forward all unmatched traffic from previous groups
380 */
381 eth_zero_addr(dmac);
382 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
383 MLX5_MATCH_MISC_PARAMETERS);
384 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
385 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
386 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
387 g = mlx5_create_flow_group(fdb, flow_group_in);
388 if (IS_ERR(g)) {
389 err = PTR_ERR(g);
390 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
391 goto out;
392 }
393 esw->fdb_table.legacy.promisc_grp = g;
394
395 out:
396 if (err)
397 esw_destroy_legacy_fdb_table(esw);
398
399 kvfree(flow_group_in);
400 return err;
401 }
402
403 static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
404 {
405 esw_debug(esw->dev, "Destroy VEPA Table\n");
406 if (!esw->fdb_table.legacy.vepa_fdb)
407 return;
408
409 mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
410 esw->fdb_table.legacy.vepa_fdb = NULL;
411 }
412
413 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
414 {
415 esw_debug(esw->dev, "Destroy FDB Table\n");
416 if (!esw->fdb_table.legacy.fdb)
417 return;
418
419 if (esw->fdb_table.legacy.promisc_grp)
420 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
421 if (esw->fdb_table.legacy.allmulti_grp)
422 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
423 if (esw->fdb_table.legacy.addr_grp)
424 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
425 mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
426
427 esw->fdb_table.legacy.fdb = NULL;
428 esw->fdb_table.legacy.addr_grp = NULL;
429 esw->fdb_table.legacy.allmulti_grp = NULL;
430 esw->fdb_table.legacy.promisc_grp = NULL;
431 }
432
433 static int esw_create_legacy_table(struct mlx5_eswitch *esw)
434 {
435 int err;
436
437 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
438
439 err = esw_create_legacy_vepa_table(esw);
440 if (err)
441 return err;
442
443 err = esw_create_legacy_fdb_table(esw);
444 if (err)
445 esw_destroy_legacy_vepa_table(esw);
446
447 return err;
448 }
449
450 static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
451 {
452 esw_cleanup_vepa_rules(esw);
453 esw_destroy_legacy_fdb_table(esw);
454 esw_destroy_legacy_vepa_table(esw);
455 }
456
457 #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
458 MLX5_VPORT_MC_ADDR_CHANGE | \
459 MLX5_VPORT_PROMISC_CHANGE)
460
461 static int esw_legacy_enable(struct mlx5_eswitch *esw)
462 {
463 struct mlx5_vport *vport;
464 int ret, i;
465
466 ret = esw_create_legacy_table(esw);
467 if (ret)
468 return ret;
469
470 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
471 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
472
473 ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
474 if (ret)
475 esw_destroy_legacy_table(esw);
476 return ret;
477 }
478
479 static void esw_legacy_disable(struct mlx5_eswitch *esw)
480 {
481 struct esw_mc_addr *mc_promisc;
482
483 mlx5_eswitch_disable_pf_vf_vports(esw);
484
485 mc_promisc = &esw->mc_promisc;
486 if (mc_promisc->uplink_rule)
487 mlx5_del_flow_rules(mc_promisc->uplink_rule);
488
489 esw_destroy_legacy_table(esw);
490 }
491
492 /* E-Switch vport UC/MC lists management */
493 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
494 struct vport_addr *vaddr);
495
496 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
497 {
498 u8 *mac = vaddr->node.addr;
499 u16 vport = vaddr->vport;
500 int err;
501
502 /* Skip mlx5_mpfs_add_mac for eswitch_managers,
503 * it is already done by its netdev in mlx5e_execute_l2_action
504 */
505 if (mlx5_esw_is_manager_vport(esw, vport))
506 goto fdb_add;
507
508 err = mlx5_mpfs_add_mac(esw->dev, mac);
509 if (err) {
510 esw_warn(esw->dev,
511 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
512 mac, vport, err);
513 return err;
514 }
515 vaddr->mpfs = true;
516
517 fdb_add:
518 /* SRIOV is enabled: Forward UC MAC to vport */
519 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
520 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
521
522 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
523 vport, mac, vaddr->flow_rule);
524
525 return 0;
526 }
527
528 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
529 {
530 u8 *mac = vaddr->node.addr;
531 u16 vport = vaddr->vport;
532 int err = 0;
533
534 /* Skip mlx5_mpfs_del_mac for eswitch managers,
535 * it is already done by its netdev in mlx5e_execute_l2_action
536 */
537 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
538 goto fdb_del;
539
540 err = mlx5_mpfs_del_mac(esw->dev, mac);
541 if (err)
542 esw_warn(esw->dev,
543 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
544 mac, vport, err);
545 vaddr->mpfs = false;
546
547 fdb_del:
548 if (vaddr->flow_rule)
549 mlx5_del_flow_rules(vaddr->flow_rule);
550 vaddr->flow_rule = NULL;
551
552 return 0;
553 }
554
555 static void update_allmulti_vports(struct mlx5_eswitch *esw,
556 struct vport_addr *vaddr,
557 struct esw_mc_addr *esw_mc)
558 {
559 u8 *mac = vaddr->node.addr;
560 struct mlx5_vport *vport;
561 u16 i, vport_num;
562
563 mlx5_esw_for_all_vports(esw, i, vport) {
564 struct hlist_head *vport_hash = vport->mc_list;
565 struct vport_addr *iter_vaddr =
566 l2addr_hash_find(vport_hash,
567 mac,
568 struct vport_addr);
569 vport_num = vport->vport;
570 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
571 vaddr->vport == vport_num)
572 continue;
573 switch (vaddr->action) {
574 case MLX5_ACTION_ADD:
575 if (iter_vaddr)
576 continue;
577 iter_vaddr = l2addr_hash_add(vport_hash, mac,
578 struct vport_addr,
579 GFP_KERNEL);
580 if (!iter_vaddr) {
581 esw_warn(esw->dev,
582 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
583 mac, vport_num);
584 continue;
585 }
586 iter_vaddr->vport = vport_num;
587 iter_vaddr->flow_rule =
588 esw_fdb_set_vport_rule(esw,
589 mac,
590 vport_num);
591 iter_vaddr->mc_promisc = true;
592 break;
593 case MLX5_ACTION_DEL:
594 if (!iter_vaddr)
595 continue;
596 mlx5_del_flow_rules(iter_vaddr->flow_rule);
597 l2addr_hash_del(iter_vaddr);
598 break;
599 }
600 }
601 }
602
603 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
604 {
605 struct hlist_head *hash = esw->mc_table;
606 struct esw_mc_addr *esw_mc;
607 u8 *mac = vaddr->node.addr;
608 u16 vport = vaddr->vport;
609
610 if (!esw->fdb_table.legacy.fdb)
611 return 0;
612
613 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
614 if (esw_mc)
615 goto add;
616
617 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
618 if (!esw_mc)
619 return -ENOMEM;
620
621 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
622 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
623
624 /* Add this multicast mac to all the mc promiscuous vports */
625 update_allmulti_vports(esw, vaddr, esw_mc);
626
627 add:
628 /* If the multicast mac is added as a result of mc promiscuous vport,
629 * don't increment the multicast ref count
630 */
631 if (!vaddr->mc_promisc)
632 esw_mc->refcnt++;
633
634 /* Forward MC MAC to vport */
635 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
636 esw_debug(esw->dev,
637 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
638 vport, mac, vaddr->flow_rule,
639 esw_mc->refcnt, esw_mc->uplink_rule);
640 return 0;
641 }
642
643 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
644 {
645 struct hlist_head *hash = esw->mc_table;
646 struct esw_mc_addr *esw_mc;
647 u8 *mac = vaddr->node.addr;
648 u16 vport = vaddr->vport;
649
650 if (!esw->fdb_table.legacy.fdb)
651 return 0;
652
653 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
654 if (!esw_mc) {
655 esw_warn(esw->dev,
656 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
657 mac, vport);
658 return -EINVAL;
659 }
660 esw_debug(esw->dev,
661 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
662 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
663 esw_mc->uplink_rule);
664
665 if (vaddr->flow_rule)
666 mlx5_del_flow_rules(vaddr->flow_rule);
667 vaddr->flow_rule = NULL;
668
669 /* If the multicast mac is added as a result of mc promiscuous vport,
670 * don't decrement the multicast ref count.
671 */
672 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
673 return 0;
674
675 /* Remove this multicast mac from all the mc promiscuous vports */
676 update_allmulti_vports(esw, vaddr, esw_mc);
677
678 if (esw_mc->uplink_rule)
679 mlx5_del_flow_rules(esw_mc->uplink_rule);
680
681 l2addr_hash_del(esw_mc);
682 return 0;
683 }
684
685 /* Apply vport UC/MC list to HW l2 table and FDB table */
686 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
687 struct mlx5_vport *vport, int list_type)
688 {
689 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
690 vport_addr_action vport_addr_add;
691 vport_addr_action vport_addr_del;
692 struct vport_addr *addr;
693 struct l2addr_node *node;
694 struct hlist_head *hash;
695 struct hlist_node *tmp;
696 int hi;
697
698 vport_addr_add = is_uc ? esw_add_uc_addr :
699 esw_add_mc_addr;
700 vport_addr_del = is_uc ? esw_del_uc_addr :
701 esw_del_mc_addr;
702
703 hash = is_uc ? vport->uc_list : vport->mc_list;
704 for_each_l2hash_node(node, tmp, hash, hi) {
705 addr = container_of(node, struct vport_addr, node);
706 switch (addr->action) {
707 case MLX5_ACTION_ADD:
708 vport_addr_add(esw, addr);
709 addr->action = MLX5_ACTION_NONE;
710 break;
711 case MLX5_ACTION_DEL:
712 vport_addr_del(esw, addr);
713 l2addr_hash_del(addr);
714 break;
715 }
716 }
717 }
718
719 /* Sync vport UC/MC list from vport context */
720 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
721 struct mlx5_vport *vport, int list_type)
722 {
723 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
724 u8 (*mac_list)[ETH_ALEN];
725 struct l2addr_node *node;
726 struct vport_addr *addr;
727 struct hlist_head *hash;
728 struct hlist_node *tmp;
729 int size;
730 int err;
731 int hi;
732 int i;
733
734 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
735 MLX5_MAX_MC_PER_VPORT(esw->dev);
736
737 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
738 if (!mac_list)
739 return;
740
741 hash = is_uc ? vport->uc_list : vport->mc_list;
742
743 for_each_l2hash_node(node, tmp, hash, hi) {
744 addr = container_of(node, struct vport_addr, node);
745 addr->action = MLX5_ACTION_DEL;
746 }
747
748 if (!vport->enabled)
749 goto out;
750
751 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
752 mac_list, &size);
753 if (err)
754 goto out;
755 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
756 vport->vport, is_uc ? "UC" : "MC", size);
757
758 for (i = 0; i < size; i++) {
759 if (is_uc && !is_valid_ether_addr(mac_list[i]))
760 continue;
761
762 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
763 continue;
764
765 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
766 if (addr) {
767 addr->action = MLX5_ACTION_NONE;
768 /* If this mac was previously added because of allmulti
769 * promiscuous rx mode, its now converted to be original
770 * vport mac.
771 */
772 if (addr->mc_promisc) {
773 struct esw_mc_addr *esw_mc =
774 l2addr_hash_find(esw->mc_table,
775 mac_list[i],
776 struct esw_mc_addr);
777 if (!esw_mc) {
778 esw_warn(esw->dev,
779 "Failed to MAC(%pM) in mcast DB\n",
780 mac_list[i]);
781 continue;
782 }
783 esw_mc->refcnt++;
784 addr->mc_promisc = false;
785 }
786 continue;
787 }
788
789 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
790 GFP_KERNEL);
791 if (!addr) {
792 esw_warn(esw->dev,
793 "Failed to add MAC(%pM) to vport[%d] DB\n",
794 mac_list[i], vport->vport);
795 continue;
796 }
797 addr->vport = vport->vport;
798 addr->action = MLX5_ACTION_ADD;
799 }
800 out:
801 kfree(mac_list);
802 }
803
804 /* Sync vport UC/MC list from vport context
805 * Must be called after esw_update_vport_addr_list
806 */
807 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
808 struct mlx5_vport *vport)
809 {
810 struct l2addr_node *node;
811 struct vport_addr *addr;
812 struct hlist_head *hash;
813 struct hlist_node *tmp;
814 int hi;
815
816 hash = vport->mc_list;
817
818 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
819 u8 *mac = node->addr;
820
821 addr = l2addr_hash_find(hash, mac, struct vport_addr);
822 if (addr) {
823 if (addr->action == MLX5_ACTION_DEL)
824 addr->action = MLX5_ACTION_NONE;
825 continue;
826 }
827 addr = l2addr_hash_add(hash, mac, struct vport_addr,
828 GFP_KERNEL);
829 if (!addr) {
830 esw_warn(esw->dev,
831 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
832 mac, vport->vport);
833 continue;
834 }
835 addr->vport = vport->vport;
836 addr->action = MLX5_ACTION_ADD;
837 addr->mc_promisc = true;
838 }
839 }
840
841 /* Apply vport rx mode to HW FDB table */
842 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
843 struct mlx5_vport *vport,
844 bool promisc, bool mc_promisc)
845 {
846 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
847
848 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
849 goto promisc;
850
851 if (mc_promisc) {
852 vport->allmulti_rule =
853 esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
854 if (!allmulti_addr->uplink_rule)
855 allmulti_addr->uplink_rule =
856 esw_fdb_set_vport_allmulti_rule(esw,
857 MLX5_VPORT_UPLINK);
858 allmulti_addr->refcnt++;
859 } else if (vport->allmulti_rule) {
860 mlx5_del_flow_rules(vport->allmulti_rule);
861 vport->allmulti_rule = NULL;
862
863 if (--allmulti_addr->refcnt > 0)
864 goto promisc;
865
866 if (allmulti_addr->uplink_rule)
867 mlx5_del_flow_rules(allmulti_addr->uplink_rule);
868 allmulti_addr->uplink_rule = NULL;
869 }
870
871 promisc:
872 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
873 return;
874
875 if (promisc) {
876 vport->promisc_rule =
877 esw_fdb_set_vport_promisc_rule(esw, vport->vport);
878 } else if (vport->promisc_rule) {
879 mlx5_del_flow_rules(vport->promisc_rule);
880 vport->promisc_rule = NULL;
881 }
882 }
883
884 /* Sync vport rx mode from vport context */
885 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
886 struct mlx5_vport *vport)
887 {
888 int promisc_all = 0;
889 int promisc_uc = 0;
890 int promisc_mc = 0;
891 int err;
892
893 err = mlx5_query_nic_vport_promisc(esw->dev,
894 vport->vport,
895 &promisc_uc,
896 &promisc_mc,
897 &promisc_all);
898 if (err)
899 return;
900 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
901 vport->vport, promisc_all, promisc_mc);
902
903 if (!vport->info.trusted || !vport->enabled) {
904 promisc_uc = 0;
905 promisc_mc = 0;
906 promisc_all = 0;
907 }
908
909 esw_apply_vport_rx_mode(esw, vport, promisc_all,
910 (promisc_all || promisc_mc));
911 }
912
913 static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
914 {
915 struct mlx5_core_dev *dev = vport->dev;
916 struct mlx5_eswitch *esw = dev->priv.eswitch;
917 u8 mac[ETH_ALEN];
918
919 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
920 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
921 vport->vport, mac);
922
923 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
924 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
925 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
926 }
927
928 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
929 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
930
931 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
932 esw_update_vport_rx_mode(esw, vport);
933 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
934 esw_update_vport_mc_promisc(esw, vport);
935 }
936
937 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
938 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
939
940 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
941 if (vport->enabled)
942 arm_vport_context_events_cmd(dev, vport->vport,
943 vport->enabled_events);
944 }
945
946 static void esw_vport_change_handler(struct work_struct *work)
947 {
948 struct mlx5_vport *vport =
949 container_of(work, struct mlx5_vport, vport_change_handler);
950 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
951
952 mutex_lock(&esw->state_lock);
953 esw_vport_change_handle_locked(vport);
954 mutex_unlock(&esw->state_lock);
955 }
956
957 int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
958 struct mlx5_vport *vport)
959 {
960 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
961 struct mlx5_flow_group *vlan_grp = NULL;
962 struct mlx5_flow_group *drop_grp = NULL;
963 struct mlx5_core_dev *dev = esw->dev;
964 struct mlx5_flow_namespace *root_ns;
965 struct mlx5_flow_table *acl;
966 void *match_criteria;
967 u32 *flow_group_in;
968 /* The egress acl table contains 2 rules:
969 * 1)Allow traffic with vlan_tag=vst_vlan_id
970 * 2)Drop all other traffic.
971 */
972 int table_size = 2;
973 int err = 0;
974
975 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
976 return -EOPNOTSUPP;
977
978 if (!IS_ERR_OR_NULL(vport->egress.acl))
979 return 0;
980
981 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
982 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
983
984 root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
985 mlx5_eswitch_vport_num_to_index(esw, vport->vport));
986 if (!root_ns) {
987 esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
988 return -EOPNOTSUPP;
989 }
990
991 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
992 if (!flow_group_in)
993 return -ENOMEM;
994
995 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
996 if (IS_ERR(acl)) {
997 err = PTR_ERR(acl);
998 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
999 vport->vport, err);
1000 goto out;
1001 }
1002
1003 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1004 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1005 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1006 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
1007 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1008 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1009
1010 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
1011 if (IS_ERR(vlan_grp)) {
1012 err = PTR_ERR(vlan_grp);
1013 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
1014 vport->vport, err);
1015 goto out;
1016 }
1017
1018 memset(flow_group_in, 0, inlen);
1019 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1020 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1021 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
1022 if (IS_ERR(drop_grp)) {
1023 err = PTR_ERR(drop_grp);
1024 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
1025 vport->vport, err);
1026 goto out;
1027 }
1028
1029 vport->egress.acl = acl;
1030 vport->egress.drop_grp = drop_grp;
1031 vport->egress.allowed_vlans_grp = vlan_grp;
1032 out:
1033 kvfree(flow_group_in);
1034 if (err && !IS_ERR_OR_NULL(vlan_grp))
1035 mlx5_destroy_flow_group(vlan_grp);
1036 if (err && !IS_ERR_OR_NULL(acl))
1037 mlx5_destroy_flow_table(acl);
1038 return err;
1039 }
1040
1041 void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
1042 struct mlx5_vport *vport)
1043 {
1044 if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
1045 mlx5_del_flow_rules(vport->egress.allowed_vlan);
1046 vport->egress.allowed_vlan = NULL;
1047 }
1048
1049 if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) {
1050 mlx5_del_flow_rules(vport->egress.legacy.drop_rule);
1051 vport->egress.legacy.drop_rule = NULL;
1052 }
1053 }
1054
1055 void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1056 struct mlx5_vport *vport)
1057 {
1058 if (IS_ERR_OR_NULL(vport->egress.acl))
1059 return;
1060
1061 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
1062
1063 esw_vport_cleanup_egress_rules(esw, vport);
1064 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
1065 mlx5_destroy_flow_group(vport->egress.drop_grp);
1066 mlx5_destroy_flow_table(vport->egress.acl);
1067 vport->egress.allowed_vlans_grp = NULL;
1068 vport->egress.drop_grp = NULL;
1069 vport->egress.acl = NULL;
1070 }
1071
1072 static int
1073 esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw,
1074 struct mlx5_vport *vport)
1075 {
1076 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1077 struct mlx5_core_dev *dev = esw->dev;
1078 struct mlx5_flow_group *g;
1079 void *match_criteria;
1080 u32 *flow_group_in;
1081 int err;
1082
1083 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1084 if (!flow_group_in)
1085 return -ENOMEM;
1086
1087 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1088
1089 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1090 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1091 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1092 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1093 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1094 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1095
1096 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1097 if (IS_ERR(g)) {
1098 err = PTR_ERR(g);
1099 esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
1100 vport->vport, err);
1101 goto spoof_err;
1102 }
1103 vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
1104
1105 memset(flow_group_in, 0, inlen);
1106 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1107 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1108 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1109 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1110
1111 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1112 if (IS_ERR(g)) {
1113 err = PTR_ERR(g);
1114 esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
1115 vport->vport, err);
1116 goto untagged_err;
1117 }
1118 vport->ingress.legacy.allow_untagged_only_grp = g;
1119
1120 memset(flow_group_in, 0, inlen);
1121 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1122 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1123 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1124 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
1125 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
1126
1127 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1128 if (IS_ERR(g)) {
1129 err = PTR_ERR(g);
1130 esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
1131 vport->vport, err);
1132 goto allow_spoof_err;
1133 }
1134 vport->ingress.legacy.allow_spoofchk_only_grp = g;
1135
1136 memset(flow_group_in, 0, inlen);
1137 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
1138 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
1139
1140 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1141 if (IS_ERR(g)) {
1142 err = PTR_ERR(g);
1143 esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
1144 vport->vport, err);
1145 goto drop_err;
1146 }
1147 vport->ingress.legacy.drop_grp = g;
1148 kvfree(flow_group_in);
1149 return 0;
1150
1151 drop_err:
1152 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
1153 mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
1154 vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
1155 }
1156 allow_spoof_err:
1157 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
1158 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
1159 vport->ingress.legacy.allow_untagged_only_grp = NULL;
1160 }
1161 untagged_err:
1162 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
1163 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
1164 vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
1165 }
1166 spoof_err:
1167 kvfree(flow_group_in);
1168 return err;
1169 }
1170
1171 int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
1172 struct mlx5_vport *vport, int table_size)
1173 {
1174 struct mlx5_core_dev *dev = esw->dev;
1175 struct mlx5_flow_namespace *root_ns;
1176 struct mlx5_flow_table *acl;
1177 int vport_index;
1178 int err;
1179
1180 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1181 return -EOPNOTSUPP;
1182
1183 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
1184 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
1185
1186 vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport);
1187 root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1188 vport_index);
1189 if (!root_ns) {
1190 esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n",
1191 vport->vport);
1192 return -EOPNOTSUPP;
1193 }
1194
1195 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1196 if (IS_ERR(acl)) {
1197 err = PTR_ERR(acl);
1198 esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n",
1199 vport->vport, err);
1200 return err;
1201 }
1202 vport->ingress.acl = acl;
1203 return 0;
1204 }
1205
1206 void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport)
1207 {
1208 if (!vport->ingress.acl)
1209 return;
1210
1211 mlx5_destroy_flow_table(vport->ingress.acl);
1212 vport->ingress.acl = NULL;
1213 }
1214
1215 void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1216 struct mlx5_vport *vport)
1217 {
1218 if (vport->ingress.legacy.drop_rule) {
1219 mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
1220 vport->ingress.legacy.drop_rule = NULL;
1221 }
1222
1223 if (vport->ingress.allow_rule) {
1224 mlx5_del_flow_rules(vport->ingress.allow_rule);
1225 vport->ingress.allow_rule = NULL;
1226 }
1227 }
1228
1229 static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
1230 struct mlx5_vport *vport)
1231 {
1232 if (!vport->ingress.acl)
1233 return;
1234
1235 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
1236
1237 esw_vport_cleanup_ingress_rules(esw, vport);
1238 if (vport->ingress.legacy.allow_spoofchk_only_grp) {
1239 mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
1240 vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
1241 }
1242 if (vport->ingress.legacy.allow_untagged_only_grp) {
1243 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
1244 vport->ingress.legacy.allow_untagged_only_grp = NULL;
1245 }
1246 if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
1247 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
1248 vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
1249 }
1250 if (vport->ingress.legacy.drop_grp) {
1251 mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
1252 vport->ingress.legacy.drop_grp = NULL;
1253 }
1254 esw_vport_destroy_ingress_acl_table(vport);
1255 }
1256
1257 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1258 struct mlx5_vport *vport)
1259 {
1260 struct mlx5_fc *counter = vport->ingress.legacy.drop_counter;
1261 struct mlx5_flow_destination drop_ctr_dst = {0};
1262 struct mlx5_flow_destination *dst = NULL;
1263 struct mlx5_flow_act flow_act = {0};
1264 struct mlx5_flow_spec *spec = NULL;
1265 int dest_num = 0;
1266 int err = 0;
1267 u8 *smac_v;
1268
1269 /* The ingress acl table contains 4 groups
1270 * (2 active rules at the same time -
1271 * 1 allow rule from one of the first 3 groups.
1272 * 1 drop rule from the last group):
1273 * 1)Allow untagged traffic with smac=original mac.
1274 * 2)Allow untagged traffic.
1275 * 3)Allow traffic with smac=original mac.
1276 * 4)Drop all other traffic.
1277 */
1278 int table_size = 4;
1279
1280 esw_vport_cleanup_ingress_rules(esw, vport);
1281
1282 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1283 esw_vport_disable_legacy_ingress_acl(esw, vport);
1284 return 0;
1285 }
1286
1287 if (!vport->ingress.acl) {
1288 err = esw_vport_create_ingress_acl_table(esw, vport, table_size);
1289 if (err) {
1290 esw_warn(esw->dev,
1291 "vport[%d] enable ingress acl err (%d)\n",
1292 err, vport->vport);
1293 return err;
1294 }
1295
1296 err = esw_vport_create_legacy_ingress_acl_groups(esw, vport);
1297 if (err)
1298 goto out;
1299 }
1300
1301 esw_debug(esw->dev,
1302 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1303 vport->vport, vport->info.vlan, vport->info.qos);
1304
1305 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1306 if (!spec) {
1307 err = -ENOMEM;
1308 goto out;
1309 }
1310
1311 if (vport->info.vlan || vport->info.qos)
1312 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1313
1314 if (vport->info.spoofchk) {
1315 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1316 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1317 smac_v = MLX5_ADDR_OF(fte_match_param,
1318 spec->match_value,
1319 outer_headers.smac_47_16);
1320 ether_addr_copy(smac_v, vport->info.mac);
1321 }
1322
1323 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1324 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1325 vport->ingress.allow_rule =
1326 mlx5_add_flow_rules(vport->ingress.acl, spec,
1327 &flow_act, NULL, 0);
1328 if (IS_ERR(vport->ingress.allow_rule)) {
1329 err = PTR_ERR(vport->ingress.allow_rule);
1330 esw_warn(esw->dev,
1331 "vport[%d] configure ingress allow rule, err(%d)\n",
1332 vport->vport, err);
1333 vport->ingress.allow_rule = NULL;
1334 goto out;
1335 }
1336
1337 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1338
1339 /* Attach drop flow counter */
1340 if (counter) {
1341 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1342 drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1343 drop_ctr_dst.counter_id = mlx5_fc_id(counter);
1344 dst = &drop_ctr_dst;
1345 dest_num++;
1346 }
1347 vport->ingress.legacy.drop_rule =
1348 mlx5_add_flow_rules(vport->ingress.acl, NULL,
1349 &flow_act, dst, dest_num);
1350 if (IS_ERR(vport->ingress.legacy.drop_rule)) {
1351 err = PTR_ERR(vport->ingress.legacy.drop_rule);
1352 esw_warn(esw->dev,
1353 "vport[%d] configure ingress drop rule, err(%d)\n",
1354 vport->vport, err);
1355 vport->ingress.legacy.drop_rule = NULL;
1356 goto out;
1357 }
1358 kvfree(spec);
1359 return 0;
1360
1361 out:
1362 esw_vport_disable_legacy_ingress_acl(esw, vport);
1363 kvfree(spec);
1364 return err;
1365 }
1366
1367 int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
1368 struct mlx5_vport *vport,
1369 u16 vlan_id, u32 flow_action)
1370 {
1371 struct mlx5_flow_act flow_act = {};
1372 struct mlx5_flow_spec *spec;
1373 int err = 0;
1374
1375 if (vport->egress.allowed_vlan)
1376 return -EEXIST;
1377
1378 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1379 if (!spec)
1380 return -ENOMEM;
1381
1382 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1383 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1384 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1385 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id);
1386
1387 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1388 flow_act.action = flow_action;
1389 vport->egress.allowed_vlan =
1390 mlx5_add_flow_rules(vport->egress.acl, spec,
1391 &flow_act, NULL, 0);
1392 if (IS_ERR(vport->egress.allowed_vlan)) {
1393 err = PTR_ERR(vport->egress.allowed_vlan);
1394 esw_warn(esw->dev,
1395 "vport[%d] configure egress vlan rule failed, err(%d)\n",
1396 vport->vport, err);
1397 vport->egress.allowed_vlan = NULL;
1398 }
1399
1400 kvfree(spec);
1401 return err;
1402 }
1403
1404 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1405 struct mlx5_vport *vport)
1406 {
1407 struct mlx5_fc *counter = vport->egress.legacy.drop_counter;
1408 struct mlx5_flow_destination drop_ctr_dst = {0};
1409 struct mlx5_flow_destination *dst = NULL;
1410 struct mlx5_flow_act flow_act = {0};
1411 int dest_num = 0;
1412 int err = 0;
1413
1414 esw_vport_cleanup_egress_rules(esw, vport);
1415
1416 if (!vport->info.vlan && !vport->info.qos) {
1417 esw_vport_disable_egress_acl(esw, vport);
1418 return 0;
1419 }
1420
1421 err = esw_vport_enable_egress_acl(esw, vport);
1422 if (err) {
1423 mlx5_core_warn(esw->dev,
1424 "failed to enable egress acl (%d) on vport[%d]\n",
1425 err, vport->vport);
1426 return err;
1427 }
1428
1429 esw_debug(esw->dev,
1430 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1431 vport->vport, vport->info.vlan, vport->info.qos);
1432
1433 /* Allowed vlan rule */
1434 err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan,
1435 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
1436 if (err)
1437 return err;
1438
1439 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1440
1441 /* Attach egress drop flow counter */
1442 if (counter) {
1443 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1444 drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1445 drop_ctr_dst.counter_id = mlx5_fc_id(counter);
1446 dst = &drop_ctr_dst;
1447 dest_num++;
1448 }
1449 vport->egress.legacy.drop_rule =
1450 mlx5_add_flow_rules(vport->egress.acl, NULL,
1451 &flow_act, dst, dest_num);
1452 if (IS_ERR(vport->egress.legacy.drop_rule)) {
1453 err = PTR_ERR(vport->egress.legacy.drop_rule);
1454 esw_warn(esw->dev,
1455 "vport[%d] configure egress drop rule failed, err(%d)\n",
1456 vport->vport, err);
1457 vport->egress.legacy.drop_rule = NULL;
1458 }
1459
1460 return err;
1461 }
1462
1463 static bool element_type_supported(struct mlx5_eswitch *esw, int type)
1464 {
1465 const struct mlx5_core_dev *dev = esw->dev;
1466
1467 switch (type) {
1468 case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
1469 return MLX5_CAP_QOS(dev, esw_element_type) &
1470 ELEMENT_TYPE_CAP_MASK_TASR;
1471 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
1472 return MLX5_CAP_QOS(dev, esw_element_type) &
1473 ELEMENT_TYPE_CAP_MASK_VPORT;
1474 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
1475 return MLX5_CAP_QOS(dev, esw_element_type) &
1476 ELEMENT_TYPE_CAP_MASK_VPORT_TC;
1477 case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
1478 return MLX5_CAP_QOS(dev, esw_element_type) &
1479 ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
1480 }
1481 return false;
1482 }
1483
1484 /* Vport QoS management */
1485 static void esw_create_tsar(struct mlx5_eswitch *esw)
1486 {
1487 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1488 struct mlx5_core_dev *dev = esw->dev;
1489 __be32 *attr;
1490 int err;
1491
1492 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1493 return;
1494
1495 if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
1496 return;
1497
1498 if (esw->qos.enabled)
1499 return;
1500
1501 MLX5_SET(scheduling_context, tsar_ctx, element_type,
1502 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
1503
1504 attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
1505 *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
1506
1507 err = mlx5_create_scheduling_element_cmd(dev,
1508 SCHEDULING_HIERARCHY_E_SWITCH,
1509 tsar_ctx,
1510 &esw->qos.root_tsar_id);
1511 if (err) {
1512 esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
1513 return;
1514 }
1515
1516 esw->qos.enabled = true;
1517 }
1518
1519 static void esw_destroy_tsar(struct mlx5_eswitch *esw)
1520 {
1521 int err;
1522
1523 if (!esw->qos.enabled)
1524 return;
1525
1526 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1527 SCHEDULING_HIERARCHY_E_SWITCH,
1528 esw->qos.root_tsar_id);
1529 if (err)
1530 esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
1531
1532 esw->qos.enabled = false;
1533 }
1534
1535 static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
1536 struct mlx5_vport *vport,
1537 u32 initial_max_rate, u32 initial_bw_share)
1538 {
1539 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1540 struct mlx5_core_dev *dev = esw->dev;
1541 void *vport_elem;
1542 int err = 0;
1543
1544 if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
1545 !MLX5_CAP_QOS(dev, esw_scheduling))
1546 return 0;
1547
1548 if (vport->qos.enabled)
1549 return -EEXIST;
1550
1551 MLX5_SET(scheduling_context, sched_ctx, element_type,
1552 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1553 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1554 element_attributes);
1555 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
1556 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1557 esw->qos.root_tsar_id);
1558 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1559 initial_max_rate);
1560 MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
1561
1562 err = mlx5_create_scheduling_element_cmd(dev,
1563 SCHEDULING_HIERARCHY_E_SWITCH,
1564 sched_ctx,
1565 &vport->qos.esw_tsar_ix);
1566 if (err) {
1567 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
1568 vport->vport, err);
1569 return err;
1570 }
1571
1572 vport->qos.enabled = true;
1573 return 0;
1574 }
1575
1576 static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
1577 struct mlx5_vport *vport)
1578 {
1579 int err;
1580
1581 if (!vport->qos.enabled)
1582 return;
1583
1584 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1585 SCHEDULING_HIERARCHY_E_SWITCH,
1586 vport->qos.esw_tsar_ix);
1587 if (err)
1588 esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
1589 vport->vport, err);
1590
1591 vport->qos.enabled = false;
1592 }
1593
1594 static int esw_vport_qos_config(struct mlx5_eswitch *esw,
1595 struct mlx5_vport *vport,
1596 u32 max_rate, u32 bw_share)
1597 {
1598 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1599 struct mlx5_core_dev *dev = esw->dev;
1600 void *vport_elem;
1601 u32 bitmask = 0;
1602 int err = 0;
1603
1604 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1605 return -EOPNOTSUPP;
1606
1607 if (!vport->qos.enabled)
1608 return -EIO;
1609
1610 MLX5_SET(scheduling_context, sched_ctx, element_type,
1611 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1612 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1613 element_attributes);
1614 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
1615 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1616 esw->qos.root_tsar_id);
1617 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1618 max_rate);
1619 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
1620 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
1621 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
1622
1623 err = mlx5_modify_scheduling_element_cmd(dev,
1624 SCHEDULING_HIERARCHY_E_SWITCH,
1625 sched_ctx,
1626 vport->qos.esw_tsar_ix,
1627 bitmask);
1628 if (err) {
1629 esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
1630 vport->vport, err);
1631 return err;
1632 }
1633
1634 return 0;
1635 }
1636
1637 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
1638 u32 rate_mbps)
1639 {
1640 u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
1641 struct mlx5_vport *vport;
1642
1643 vport = mlx5_eswitch_get_vport(esw, vport_num);
1644 MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
1645
1646 return mlx5_modify_scheduling_element_cmd(esw->dev,
1647 SCHEDULING_HIERARCHY_E_SWITCH,
1648 ctx,
1649 vport->qos.esw_tsar_ix,
1650 MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW);
1651 }
1652
1653 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1654 {
1655 ((u8 *)node_guid)[7] = mac[0];
1656 ((u8 *)node_guid)[6] = mac[1];
1657 ((u8 *)node_guid)[5] = mac[2];
1658 ((u8 *)node_guid)[4] = 0xff;
1659 ((u8 *)node_guid)[3] = 0xfe;
1660 ((u8 *)node_guid)[2] = mac[3];
1661 ((u8 *)node_guid)[1] = mac[4];
1662 ((u8 *)node_guid)[0] = mac[5];
1663 }
1664
1665 static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
1666 struct mlx5_vport *vport)
1667 {
1668 int ret;
1669
1670 /* Only non manager vports need ACL in legacy mode */
1671 if (mlx5_esw_is_manager_vport(esw, vport->vport))
1672 return 0;
1673
1674 if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
1675 vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
1676 if (IS_ERR(vport->ingress.legacy.drop_counter)) {
1677 esw_warn(esw->dev,
1678 "vport[%d] configure ingress drop rule counter failed\n",
1679 vport->vport);
1680 vport->ingress.legacy.drop_counter = NULL;
1681 }
1682 }
1683
1684 ret = esw_vport_ingress_config(esw, vport);
1685 if (ret)
1686 goto ingress_err;
1687
1688 if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
1689 vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
1690 if (IS_ERR(vport->egress.legacy.drop_counter)) {
1691 esw_warn(esw->dev,
1692 "vport[%d] configure egress drop rule counter failed\n",
1693 vport->vport);
1694 vport->egress.legacy.drop_counter = NULL;
1695 }
1696 }
1697
1698 ret = esw_vport_egress_config(esw, vport);
1699 if (ret)
1700 goto egress_err;
1701
1702 return 0;
1703
1704 egress_err:
1705 esw_vport_disable_legacy_ingress_acl(esw, vport);
1706 mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
1707 vport->egress.legacy.drop_counter = NULL;
1708
1709 ingress_err:
1710 mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
1711 vport->ingress.legacy.drop_counter = NULL;
1712 return ret;
1713 }
1714
1715 static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
1716 struct mlx5_vport *vport)
1717 {
1718 if (esw->mode == MLX5_ESWITCH_LEGACY)
1719 return esw_vport_create_legacy_acl_tables(esw, vport);
1720 else
1721 return esw_vport_create_offloads_acl_tables(esw, vport);
1722 }
1723
1724 static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
1725 struct mlx5_vport *vport)
1726
1727 {
1728 if (mlx5_esw_is_manager_vport(esw, vport->vport))
1729 return;
1730
1731 esw_vport_disable_egress_acl(esw, vport);
1732 mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
1733 vport->egress.legacy.drop_counter = NULL;
1734
1735 esw_vport_disable_legacy_ingress_acl(esw, vport);
1736 mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
1737 vport->ingress.legacy.drop_counter = NULL;
1738 }
1739
1740 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
1741 struct mlx5_vport *vport)
1742 {
1743 if (esw->mode == MLX5_ESWITCH_LEGACY)
1744 esw_vport_destroy_legacy_acl_tables(esw, vport);
1745 else
1746 esw_vport_destroy_offloads_acl_tables(esw, vport);
1747 }
1748
1749 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1750 {
1751 u16 vport_num = vport->vport;
1752 int flags;
1753 int err;
1754
1755 err = esw_vport_setup_acl(esw, vport);
1756 if (err)
1757 return err;
1758
1759 /* Attach vport to the eswitch rate limiter */
1760 esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share);
1761
1762 if (mlx5_esw_is_manager_vport(esw, vport_num))
1763 return 0;
1764
1765 mlx5_modify_vport_admin_state(esw->dev,
1766 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1767 vport_num, 1,
1768 vport->info.link_state);
1769
1770 /* Host PF has its own mac/guid. */
1771 if (vport_num) {
1772 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
1773 vport->info.mac);
1774 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
1775 vport->info.node_guid);
1776 }
1777
1778 flags = (vport->info.vlan || vport->info.qos) ?
1779 SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
1780 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
1781 vport->info.qos, flags);
1782
1783 return 0;
1784 }
1785
1786 /* Don't cleanup vport->info, it's needed to restore vport configuration */
1787 static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1788 {
1789 u16 vport_num = vport->vport;
1790
1791 if (!mlx5_esw_is_manager_vport(esw, vport_num))
1792 mlx5_modify_vport_admin_state(esw->dev,
1793 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1794 vport_num, 1,
1795 MLX5_VPORT_ADMIN_STATE_DOWN);
1796
1797 esw_vport_disable_qos(esw, vport);
1798 esw_vport_cleanup_acl(esw, vport);
1799 }
1800
1801 static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
1802 enum mlx5_eswitch_vport_event enabled_events)
1803 {
1804 struct mlx5_vport *vport;
1805 int ret;
1806
1807 vport = mlx5_eswitch_get_vport(esw, vport_num);
1808
1809 mutex_lock(&esw->state_lock);
1810 WARN_ON(vport->enabled);
1811
1812 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1813
1814 ret = esw_vport_setup(esw, vport);
1815 if (ret)
1816 goto done;
1817
1818 /* Sync with current vport context */
1819 vport->enabled_events = enabled_events;
1820 vport->enabled = true;
1821
1822 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
1823 * in smartNIC as it's a vport group manager.
1824 */
1825 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
1826 (!vport_num && mlx5_core_is_ecpf(esw->dev)))
1827 vport->info.trusted = true;
1828
1829 esw_vport_change_handle_locked(vport);
1830
1831 esw->enabled_vports++;
1832 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1833 done:
1834 mutex_unlock(&esw->state_lock);
1835 return ret;
1836 }
1837
1838 static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
1839 {
1840 struct mlx5_vport *vport;
1841
1842 vport = mlx5_eswitch_get_vport(esw, vport_num);
1843
1844 mutex_lock(&esw->state_lock);
1845 if (!vport->enabled)
1846 goto done;
1847
1848 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1849 /* Mark this vport as disabled to discard new events */
1850 vport->enabled = false;
1851
1852 /* Disable events from this vport */
1853 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1854 /* We don't assume VFs will cleanup after themselves.
1855 * Calling vport change handler while vport is disabled will cleanup
1856 * the vport resources.
1857 */
1858 esw_vport_change_handle_locked(vport);
1859 vport->enabled_events = 0;
1860 esw_vport_cleanup(esw, vport);
1861 esw->enabled_vports--;
1862
1863 done:
1864 mutex_unlock(&esw->state_lock);
1865 }
1866
1867 static int eswitch_vport_event(struct notifier_block *nb,
1868 unsigned long type, void *data)
1869 {
1870 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
1871 struct mlx5_eqe *eqe = data;
1872 struct mlx5_vport *vport;
1873 u16 vport_num;
1874
1875 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
1876 vport = mlx5_eswitch_get_vport(esw, vport_num);
1877 if (!IS_ERR(vport))
1878 queue_work(esw->work_queue, &vport->vport_change_handler);
1879 return NOTIFY_OK;
1880 }
1881
1882 /**
1883 * mlx5_esw_query_functions - Returns raw output about functions state
1884 * @dev: Pointer to device to query
1885 *
1886 * mlx5_esw_query_functions() allocates and returns functions changed
1887 * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
1888 * Caller must free the memory using kvfree() when valid pointer is returned.
1889 */
1890 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
1891 {
1892 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
1893 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
1894 u32 *out;
1895 int err;
1896
1897 out = kvzalloc(outlen, GFP_KERNEL);
1898 if (!out)
1899 return ERR_PTR(-ENOMEM);
1900
1901 MLX5_SET(query_esw_functions_in, in, opcode,
1902 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
1903
1904 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
1905 if (!err)
1906 return out;
1907
1908 kvfree(out);
1909 return ERR_PTR(err);
1910 }
1911
1912 static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
1913 {
1914 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1915 mlx5_eq_notifier_register(esw->dev, &esw->nb);
1916
1917 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
1918 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
1919 ESW_FUNCTIONS_CHANGED);
1920 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
1921 }
1922 }
1923
1924 static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
1925 {
1926 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
1927 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
1928
1929 mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1930
1931 flush_workqueue(esw->work_queue);
1932 }
1933
1934 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
1935 {
1936 struct mlx5_vport *vport;
1937 int i;
1938
1939 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
1940 memset(&vport->info, 0, sizeof(vport->info));
1941 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1942 }
1943 }
1944
1945 /* Public E-Switch API */
1946 #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
1947
1948 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
1949 enum mlx5_eswitch_vport_event enabled_events)
1950 {
1951 int err;
1952
1953 err = esw_enable_vport(esw, vport_num, enabled_events);
1954 if (err)
1955 return err;
1956
1957 err = esw_offloads_load_rep(esw, vport_num);
1958 if (err)
1959 goto err_rep;
1960
1961 return err;
1962
1963 err_rep:
1964 esw_disable_vport(esw, vport_num);
1965 return err;
1966 }
1967
1968 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
1969 {
1970 esw_offloads_unload_rep(esw, vport_num);
1971 esw_disable_vport(esw, vport_num);
1972 }
1973
1974 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
1975 {
1976 int i;
1977
1978 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
1979 mlx5_eswitch_unload_vport(esw, i);
1980 }
1981
1982 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
1983 enum mlx5_eswitch_vport_event enabled_events)
1984 {
1985 int err;
1986 int i;
1987
1988 mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) {
1989 err = mlx5_eswitch_load_vport(esw, i, enabled_events);
1990 if (err)
1991 goto vf_err;
1992 }
1993
1994 return 0;
1995
1996 vf_err:
1997 mlx5_eswitch_unload_vf_vports(esw, i - 1);
1998 return err;
1999 }
2000
2001 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
2002 * whichever are present on the eswitch.
2003 */
2004 int
2005 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
2006 enum mlx5_eswitch_vport_event enabled_events)
2007 {
2008 int ret;
2009
2010 /* Enable PF vport */
2011 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
2012 if (ret)
2013 return ret;
2014
2015 /* Enable ECPF vport */
2016 if (mlx5_ecpf_vport_exists(esw->dev)) {
2017 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
2018 if (ret)
2019 goto ecpf_err;
2020 }
2021
2022 /* Enable VF vports */
2023 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
2024 enabled_events);
2025 if (ret)
2026 goto vf_err;
2027 return 0;
2028
2029 vf_err:
2030 if (mlx5_ecpf_vport_exists(esw->dev))
2031 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
2032
2033 ecpf_err:
2034 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
2035 return ret;
2036 }
2037
2038 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
2039 * whichever are previously enabled on the eswitch.
2040 */
2041 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
2042 {
2043 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
2044
2045 if (mlx5_ecpf_vport_exists(esw->dev))
2046 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
2047
2048 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
2049 }
2050
2051 static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
2052 {
2053 struct devlink *devlink = priv_to_devlink(esw->dev);
2054 union devlink_param_value val;
2055 int err;
2056
2057 err = devlink_param_driverinit_value_get(devlink,
2058 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
2059 &val);
2060 if (!err) {
2061 esw->params.large_group_num = val.vu32;
2062 } else {
2063 esw_warn(esw->dev,
2064 "Devlink can't get param fdb_large_groups, uses default (%d).\n",
2065 ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
2066 esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
2067 }
2068 }
2069
2070 static void
2071 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
2072 {
2073 const u32 *out;
2074
2075 WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
2076
2077 if (num_vfs < 0)
2078 return;
2079
2080 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
2081 esw->esw_funcs.num_vfs = num_vfs;
2082 return;
2083 }
2084
2085 out = mlx5_esw_query_functions(esw->dev);
2086 if (IS_ERR(out))
2087 return;
2088
2089 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
2090 host_params_context.host_num_of_vfs);
2091 kvfree(out);
2092 }
2093
2094 /**
2095 * mlx5_eswitch_enable_locked - Enable eswitch
2096 * @esw: Pointer to eswitch
2097 * @mode: Eswitch mode to enable
2098 * @num_vfs: Enable eswitch for given number of VFs. This is optional.
2099 * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
2100 * Caller should pass num_vfs > 0 when enabling eswitch for
2101 * vf vports. Caller should pass num_vfs = 0, when eswitch
2102 * is enabled without sriov VFs or when caller
2103 * is unaware of the sriov state of the host PF on ECPF based
2104 * eswitch. Caller should pass < 0 when num_vfs should be
2105 * completely ignored. This is typically the case when eswitch
2106 * is enabled without sriov regardless of PF/ECPF system.
2107 * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
2108 * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
2109 * It returns 0 on success or error code on failure.
2110 */
2111 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
2112 {
2113 int err;
2114
2115 lockdep_assert_held(&esw->mode_lock);
2116
2117 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
2118 esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
2119 return -EOPNOTSUPP;
2120 }
2121
2122 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
2123 esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
2124
2125 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
2126 esw_warn(esw->dev, "engress ACL is not supported by FW\n");
2127
2128 mlx5_eswitch_get_devlink_param(esw);
2129
2130 mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
2131
2132 esw_create_tsar(esw);
2133
2134 esw->mode = mode;
2135
2136 mlx5_lag_update(esw->dev);
2137
2138 if (mode == MLX5_ESWITCH_LEGACY) {
2139 err = esw_legacy_enable(esw);
2140 } else {
2141 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
2142 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
2143 err = esw_offloads_enable(esw);
2144 }
2145
2146 if (err)
2147 goto abort;
2148
2149 mlx5_eswitch_event_handlers_register(esw);
2150
2151 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
2152 mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
2153 esw->esw_funcs.num_vfs, esw->enabled_vports);
2154
2155 return 0;
2156
2157 abort:
2158 esw->mode = MLX5_ESWITCH_NONE;
2159
2160 if (mode == MLX5_ESWITCH_OFFLOADS) {
2161 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
2162 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
2163 }
2164
2165 return err;
2166 }
2167
2168 /**
2169 * mlx5_eswitch_enable - Enable eswitch
2170 * @esw: Pointer to eswitch
2171 * @num_vfs: Enable eswitch swich for given number of VFs.
2172 * Caller must pass num_vfs > 0 when enabling eswitch for
2173 * vf vports.
2174 * mlx5_eswitch_enable() returns 0 on success or error code on failure.
2175 */
2176 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
2177 {
2178 int ret;
2179
2180 if (!ESW_ALLOWED(esw))
2181 return 0;
2182
2183 mutex_lock(&esw->mode_lock);
2184 ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
2185 mutex_unlock(&esw->mode_lock);
2186 return ret;
2187 }
2188
2189 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
2190 {
2191 int old_mode;
2192
2193 lockdep_assert_held_write(&esw->mode_lock);
2194
2195 if (esw->mode == MLX5_ESWITCH_NONE)
2196 return;
2197
2198 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
2199 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
2200 esw->esw_funcs.num_vfs, esw->enabled_vports);
2201
2202 mlx5_eswitch_event_handlers_unregister(esw);
2203
2204 if (esw->mode == MLX5_ESWITCH_LEGACY)
2205 esw_legacy_disable(esw);
2206 else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
2207 esw_offloads_disable(esw);
2208
2209 esw_destroy_tsar(esw);
2210
2211 old_mode = esw->mode;
2212 esw->mode = MLX5_ESWITCH_NONE;
2213
2214 mlx5_lag_update(esw->dev);
2215
2216 if (old_mode == MLX5_ESWITCH_OFFLOADS) {
2217 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
2218 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
2219 }
2220 if (clear_vf)
2221 mlx5_eswitch_clear_vf_vports_info(esw);
2222 }
2223
2224 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
2225 {
2226 if (!ESW_ALLOWED(esw))
2227 return;
2228
2229 mutex_lock(&esw->mode_lock);
2230 mlx5_eswitch_disable_locked(esw, clear_vf);
2231 mutex_unlock(&esw->mode_lock);
2232 }
2233
2234 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
2235 {
2236 struct mlx5_eswitch *esw;
2237 struct mlx5_vport *vport;
2238 int total_vports;
2239 int err, i;
2240
2241 if (!MLX5_VPORT_MANAGER(dev))
2242 return 0;
2243
2244 total_vports = mlx5_eswitch_get_total_vports(dev);
2245
2246 esw_info(dev,
2247 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
2248 total_vports,
2249 MLX5_MAX_UC_PER_VPORT(dev),
2250 MLX5_MAX_MC_PER_VPORT(dev));
2251
2252 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
2253 if (!esw)
2254 return -ENOMEM;
2255
2256 esw->dev = dev;
2257 esw->manager_vport = mlx5_eswitch_manager_vport(dev);
2258 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
2259
2260 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
2261 if (!esw->work_queue) {
2262 err = -ENOMEM;
2263 goto abort;
2264 }
2265
2266 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
2267 GFP_KERNEL);
2268 if (!esw->vports) {
2269 err = -ENOMEM;
2270 goto abort;
2271 }
2272
2273 esw->total_vports = total_vports;
2274
2275 err = esw_offloads_init_reps(esw);
2276 if (err)
2277 goto abort;
2278
2279 mutex_init(&esw->offloads.encap_tbl_lock);
2280 hash_init(esw->offloads.encap_tbl);
2281 mutex_init(&esw->offloads.mod_hdr.lock);
2282 hash_init(esw->offloads.mod_hdr.hlist);
2283 atomic64_set(&esw->offloads.num_flows, 0);
2284 mutex_init(&esw->state_lock);
2285 mutex_init(&esw->mode_lock);
2286
2287 mlx5_esw_for_all_vports(esw, i, vport) {
2288 vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
2289 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
2290 vport->dev = dev;
2291 INIT_WORK(&vport->vport_change_handler,
2292 esw_vport_change_handler);
2293 }
2294
2295 esw->enabled_vports = 0;
2296 esw->mode = MLX5_ESWITCH_NONE;
2297 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
2298
2299 dev->priv.eswitch = esw;
2300 return 0;
2301 abort:
2302 if (esw->work_queue)
2303 destroy_workqueue(esw->work_queue);
2304 esw_offloads_cleanup_reps(esw);
2305 kfree(esw->vports);
2306 kfree(esw);
2307 return err;
2308 }
2309
2310 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
2311 {
2312 if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
2313 return;
2314
2315 esw_info(esw->dev, "cleanup\n");
2316
2317 esw->dev->priv.eswitch = NULL;
2318 destroy_workqueue(esw->work_queue);
2319 esw_offloads_cleanup_reps(esw);
2320 mutex_destroy(&esw->mode_lock);
2321 mutex_destroy(&esw->state_lock);
2322 mutex_destroy(&esw->offloads.mod_hdr.lock);
2323 mutex_destroy(&esw->offloads.encap_tbl_lock);
2324 kfree(esw->vports);
2325 kfree(esw);
2326 }
2327
2328 /* Vport Administration */
2329 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
2330 u16 vport, u8 mac[ETH_ALEN])
2331 {
2332 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2333 u64 node_guid;
2334 int err = 0;
2335
2336 if (IS_ERR(evport))
2337 return PTR_ERR(evport);
2338 if (is_multicast_ether_addr(mac))
2339 return -EINVAL;
2340
2341 mutex_lock(&esw->state_lock);
2342
2343 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
2344 mlx5_core_warn(esw->dev,
2345 "Set invalid MAC while spoofchk is on, vport(%d)\n",
2346 vport);
2347
2348 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
2349 if (err) {
2350 mlx5_core_warn(esw->dev,
2351 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
2352 vport, err);
2353 goto unlock;
2354 }
2355
2356 node_guid_gen_from_mac(&node_guid, mac);
2357 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
2358 if (err)
2359 mlx5_core_warn(esw->dev,
2360 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
2361 vport, err);
2362
2363 ether_addr_copy(evport->info.mac, mac);
2364 evport->info.node_guid = node_guid;
2365 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
2366 err = esw_vport_ingress_config(esw, evport);
2367
2368 unlock:
2369 mutex_unlock(&esw->state_lock);
2370 return err;
2371 }
2372
2373 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
2374 u16 vport, int link_state)
2375 {
2376 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2377 int err = 0;
2378
2379 if (!ESW_ALLOWED(esw))
2380 return -EPERM;
2381 if (IS_ERR(evport))
2382 return PTR_ERR(evport);
2383
2384 mutex_lock(&esw->state_lock);
2385
2386 err = mlx5_modify_vport_admin_state(esw->dev,
2387 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
2388 vport, 1, link_state);
2389 if (err) {
2390 mlx5_core_warn(esw->dev,
2391 "Failed to set vport %d link state, err = %d",
2392 vport, err);
2393 goto unlock;
2394 }
2395
2396 evport->info.link_state = link_state;
2397
2398 unlock:
2399 mutex_unlock(&esw->state_lock);
2400 return err;
2401 }
2402
2403 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
2404 u16 vport, struct ifla_vf_info *ivi)
2405 {
2406 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2407
2408 if (IS_ERR(evport))
2409 return PTR_ERR(evport);
2410
2411 memset(ivi, 0, sizeof(*ivi));
2412 ivi->vf = vport - 1;
2413
2414 mutex_lock(&esw->state_lock);
2415 ether_addr_copy(ivi->mac, evport->info.mac);
2416 ivi->linkstate = evport->info.link_state;
2417 ivi->vlan = evport->info.vlan;
2418 ivi->qos = evport->info.qos;
2419 ivi->spoofchk = evport->info.spoofchk;
2420 ivi->trusted = evport->info.trusted;
2421 ivi->min_tx_rate = evport->info.min_rate;
2422 ivi->max_tx_rate = evport->info.max_rate;
2423 mutex_unlock(&esw->state_lock);
2424
2425 return 0;
2426 }
2427
2428 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2429 u16 vport, u16 vlan, u8 qos, u8 set_flags)
2430 {
2431 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2432 int err = 0;
2433
2434 if (!ESW_ALLOWED(esw))
2435 return -EPERM;
2436 if (IS_ERR(evport))
2437 return PTR_ERR(evport);
2438 if (vlan > 4095 || qos > 7)
2439 return -EINVAL;
2440
2441 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
2442 if (err)
2443 return err;
2444
2445 evport->info.vlan = vlan;
2446 evport->info.qos = qos;
2447 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
2448 err = esw_vport_ingress_config(esw, evport);
2449 if (err)
2450 return err;
2451 err = esw_vport_egress_config(esw, evport);
2452 }
2453
2454 return err;
2455 }
2456
2457 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2458 u16 vport, u16 vlan, u8 qos)
2459 {
2460 u8 set_flags = 0;
2461 int err;
2462
2463 if (vlan || qos)
2464 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
2465
2466 mutex_lock(&esw->state_lock);
2467 err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
2468 mutex_unlock(&esw->state_lock);
2469
2470 return err;
2471 }
2472
2473 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
2474 u16 vport, bool spoofchk)
2475 {
2476 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2477 bool pschk;
2478 int err = 0;
2479
2480 if (!ESW_ALLOWED(esw))
2481 return -EPERM;
2482 if (IS_ERR(evport))
2483 return PTR_ERR(evport);
2484
2485 mutex_lock(&esw->state_lock);
2486 pschk = evport->info.spoofchk;
2487 evport->info.spoofchk = spoofchk;
2488 if (pschk && !is_valid_ether_addr(evport->info.mac))
2489 mlx5_core_warn(esw->dev,
2490 "Spoofchk in set while MAC is invalid, vport(%d)\n",
2491 evport->vport);
2492 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
2493 err = esw_vport_ingress_config(esw, evport);
2494 if (err)
2495 evport->info.spoofchk = pschk;
2496 mutex_unlock(&esw->state_lock);
2497
2498 return err;
2499 }
2500
2501 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
2502 {
2503 if (esw->fdb_table.legacy.vepa_uplink_rule)
2504 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
2505
2506 if (esw->fdb_table.legacy.vepa_star_rule)
2507 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
2508
2509 esw->fdb_table.legacy.vepa_uplink_rule = NULL;
2510 esw->fdb_table.legacy.vepa_star_rule = NULL;
2511 }
2512
2513 static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
2514 u8 setting)
2515 {
2516 struct mlx5_flow_destination dest = {};
2517 struct mlx5_flow_act flow_act = {};
2518 struct mlx5_flow_handle *flow_rule;
2519 struct mlx5_flow_spec *spec;
2520 int err = 0;
2521 void *misc;
2522
2523 if (!setting) {
2524 esw_cleanup_vepa_rules(esw);
2525 return 0;
2526 }
2527
2528 if (esw->fdb_table.legacy.vepa_uplink_rule)
2529 return 0;
2530
2531 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2532 if (!spec)
2533 return -ENOMEM;
2534
2535 /* Uplink rule forward uplink traffic to FDB */
2536 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
2537 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2538
2539 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2540 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2541
2542 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2543 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2544 dest.ft = esw->fdb_table.legacy.fdb;
2545 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2546 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
2547 &flow_act, &dest, 1);
2548 if (IS_ERR(flow_rule)) {
2549 err = PTR_ERR(flow_rule);
2550 goto out;
2551 } else {
2552 esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
2553 }
2554
2555 /* Star rule to forward all traffic to uplink vport */
2556 memset(&dest, 0, sizeof(dest));
2557 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2558 dest.vport.num = MLX5_VPORT_UPLINK;
2559 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2560 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
2561 &flow_act, &dest, 1);
2562 if (IS_ERR(flow_rule)) {
2563 err = PTR_ERR(flow_rule);
2564 goto out;
2565 } else {
2566 esw->fdb_table.legacy.vepa_star_rule = flow_rule;
2567 }
2568
2569 out:
2570 kvfree(spec);
2571 if (err)
2572 esw_cleanup_vepa_rules(esw);
2573 return err;
2574 }
2575
2576 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
2577 {
2578 int err = 0;
2579
2580 if (!esw)
2581 return -EOPNOTSUPP;
2582
2583 if (!ESW_ALLOWED(esw))
2584 return -EPERM;
2585
2586 mutex_lock(&esw->state_lock);
2587 if (esw->mode != MLX5_ESWITCH_LEGACY) {
2588 err = -EOPNOTSUPP;
2589 goto out;
2590 }
2591
2592 err = _mlx5_eswitch_set_vepa_locked(esw, setting);
2593
2594 out:
2595 mutex_unlock(&esw->state_lock);
2596 return err;
2597 }
2598
2599 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
2600 {
2601 if (!esw)
2602 return -EOPNOTSUPP;
2603
2604 if (!ESW_ALLOWED(esw))
2605 return -EPERM;
2606
2607 if (esw->mode != MLX5_ESWITCH_LEGACY)
2608 return -EOPNOTSUPP;
2609
2610 *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
2611 return 0;
2612 }
2613
2614 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
2615 u16 vport, bool setting)
2616 {
2617 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2618
2619 if (!ESW_ALLOWED(esw))
2620 return -EPERM;
2621 if (IS_ERR(evport))
2622 return PTR_ERR(evport);
2623
2624 mutex_lock(&esw->state_lock);
2625 evport->info.trusted = setting;
2626 if (evport->enabled)
2627 esw_vport_change_handle_locked(evport);
2628 mutex_unlock(&esw->state_lock);
2629
2630 return 0;
2631 }
2632
2633 static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
2634 {
2635 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2636 struct mlx5_vport *evport;
2637 u32 max_guarantee = 0;
2638 int i;
2639
2640 mlx5_esw_for_all_vports(esw, i, evport) {
2641 if (!evport->enabled || evport->info.min_rate < max_guarantee)
2642 continue;
2643 max_guarantee = evport->info.min_rate;
2644 }
2645
2646 return max_t(u32, max_guarantee / fw_max_bw_share, 1);
2647 }
2648
2649 static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
2650 {
2651 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2652 struct mlx5_vport *evport;
2653 u32 vport_max_rate;
2654 u32 vport_min_rate;
2655 u32 bw_share;
2656 int err;
2657 int i;
2658
2659 mlx5_esw_for_all_vports(esw, i, evport) {
2660 if (!evport->enabled)
2661 continue;
2662 vport_min_rate = evport->info.min_rate;
2663 vport_max_rate = evport->info.max_rate;
2664 bw_share = MLX5_MIN_BW_SHARE;
2665
2666 if (vport_min_rate)
2667 bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
2668 divider,
2669 fw_max_bw_share);
2670
2671 if (bw_share == evport->qos.bw_share)
2672 continue;
2673
2674 err = esw_vport_qos_config(esw, evport, vport_max_rate,
2675 bw_share);
2676 if (!err)
2677 evport->qos.bw_share = bw_share;
2678 else
2679 return err;
2680 }
2681
2682 return 0;
2683 }
2684
2685 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
2686 u32 max_rate, u32 min_rate)
2687 {
2688 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2689 u32 fw_max_bw_share;
2690 u32 previous_min_rate;
2691 u32 divider;
2692 bool min_rate_supported;
2693 bool max_rate_supported;
2694 int err = 0;
2695
2696 if (!ESW_ALLOWED(esw))
2697 return -EPERM;
2698 if (IS_ERR(evport))
2699 return PTR_ERR(evport);
2700
2701 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2702 min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
2703 fw_max_bw_share >= MLX5_MIN_BW_SHARE;
2704 max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
2705
2706 if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
2707 return -EOPNOTSUPP;
2708
2709 mutex_lock(&esw->state_lock);
2710
2711 if (min_rate == evport->info.min_rate)
2712 goto set_max_rate;
2713
2714 previous_min_rate = evport->info.min_rate;
2715 evport->info.min_rate = min_rate;
2716 divider = calculate_vports_min_rate_divider(esw);
2717 err = normalize_vports_min_rate(esw, divider);
2718 if (err) {
2719 evport->info.min_rate = previous_min_rate;
2720 goto unlock;
2721 }
2722
2723 set_max_rate:
2724 if (max_rate == evport->info.max_rate)
2725 goto unlock;
2726
2727 err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
2728 if (!err)
2729 evport->info.max_rate = max_rate;
2730
2731 unlock:
2732 mutex_unlock(&esw->state_lock);
2733 return err;
2734 }
2735
2736 static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
2737 struct mlx5_vport *vport,
2738 struct mlx5_vport_drop_stats *stats)
2739 {
2740 struct mlx5_eswitch *esw = dev->priv.eswitch;
2741 u64 rx_discard_vport_down, tx_discard_vport_down;
2742 u64 bytes = 0;
2743 int err = 0;
2744
2745 if (esw->mode != MLX5_ESWITCH_LEGACY)
2746 return 0;
2747
2748 mutex_lock(&esw->state_lock);
2749 if (!vport->enabled)
2750 goto unlock;
2751
2752 if (vport->egress.legacy.drop_counter)
2753 mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
2754 &stats->rx_dropped, &bytes);
2755
2756 if (vport->ingress.legacy.drop_counter)
2757 mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
2758 &stats->tx_dropped, &bytes);
2759
2760 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
2761 !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2762 goto unlock;
2763
2764 err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
2765 &rx_discard_vport_down,
2766 &tx_discard_vport_down);
2767 if (err)
2768 goto unlock;
2769
2770 if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
2771 stats->rx_dropped += rx_discard_vport_down;
2772 if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2773 stats->tx_dropped += tx_discard_vport_down;
2774
2775 unlock:
2776 mutex_unlock(&esw->state_lock);
2777 return err;
2778 }
2779
2780 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2781 u16 vport_num,
2782 struct ifla_vf_stats *vf_stats)
2783 {
2784 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
2785 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
2786 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
2787 struct mlx5_vport_drop_stats stats = {0};
2788 int err = 0;
2789 u32 *out;
2790
2791 if (IS_ERR(vport))
2792 return PTR_ERR(vport);
2793
2794 out = kvzalloc(outlen, GFP_KERNEL);
2795 if (!out)
2796 return -ENOMEM;
2797
2798 MLX5_SET(query_vport_counter_in, in, opcode,
2799 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
2800 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
2801 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
2802 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2803
2804 err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
2805 if (err)
2806 goto free_out;
2807
2808 #define MLX5_GET_CTR(p, x) \
2809 MLX5_GET64(query_vport_counter_out, p, x)
2810
2811 memset(vf_stats, 0, sizeof(*vf_stats));
2812 vf_stats->rx_packets =
2813 MLX5_GET_CTR(out, received_eth_unicast.packets) +
2814 MLX5_GET_CTR(out, received_ib_unicast.packets) +
2815 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2816 MLX5_GET_CTR(out, received_ib_multicast.packets) +
2817 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2818
2819 vf_stats->rx_bytes =
2820 MLX5_GET_CTR(out, received_eth_unicast.octets) +
2821 MLX5_GET_CTR(out, received_ib_unicast.octets) +
2822 MLX5_GET_CTR(out, received_eth_multicast.octets) +
2823 MLX5_GET_CTR(out, received_ib_multicast.octets) +
2824 MLX5_GET_CTR(out, received_eth_broadcast.octets);
2825
2826 vf_stats->tx_packets =
2827 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
2828 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
2829 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
2830 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
2831 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
2832
2833 vf_stats->tx_bytes =
2834 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
2835 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
2836 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
2837 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
2838 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
2839
2840 vf_stats->multicast =
2841 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2842 MLX5_GET_CTR(out, received_ib_multicast.packets);
2843
2844 vf_stats->broadcast =
2845 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2846
2847 err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
2848 if (err)
2849 goto free_out;
2850 vf_stats->rx_dropped = stats.rx_dropped;
2851 vf_stats->tx_dropped = stats.tx_dropped;
2852
2853 free_out:
2854 kvfree(out);
2855 return err;
2856 }
2857
2858 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
2859 {
2860 return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
2861 }
2862 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
2863
2864 enum devlink_eswitch_encap_mode
2865 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
2866 {
2867 struct mlx5_eswitch *esw;
2868
2869 esw = dev->priv.eswitch;
2870 return ESW_ALLOWED(esw) ? esw->offloads.encap :
2871 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2872 }
2873 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
2874
2875 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
2876 {
2877 if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
2878 dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
2879 (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2880 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
2881 return true;
2882
2883 return false;
2884 }
2885
2886 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
2887 struct mlx5_core_dev *dev1)
2888 {
2889 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2890 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
2891 }
2892
2893