]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "lib/eq.h"
40 #include "eswitch.h"
41 #include "fs_core.h"
42 #include "ecpf.h"
43
44 enum {
45 MLX5_ACTION_NONE = 0,
46 MLX5_ACTION_ADD = 1,
47 MLX5_ACTION_DEL = 2,
48 };
49
50 /* Vport UC/MC hash node */
51 struct vport_addr {
52 struct l2addr_node node;
53 u8 action;
54 u16 vport;
55 struct mlx5_flow_handle *flow_rule;
56 bool mpfs; /* UC MAC was added to MPFs */
57 /* A flag indicating that mac was added due to mc promiscuous vport */
58 bool mc_promisc;
59 };
60
61 enum {
62 UC_ADDR_CHANGE = BIT(0),
63 MC_ADDR_CHANGE = BIT(1),
64 PROMISC_CHANGE = BIT(3),
65 };
66
67 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
68 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
69
70 /* Vport context events */
71 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
72 MC_ADDR_CHANGE | \
73 PROMISC_CHANGE)
74
75 /* The vport getter/iterator are only valid after esw->total_vports
76 * and vport->vport are initialized in mlx5_eswitch_init.
77 */
78 #define mlx5_esw_for_all_vports(esw, i, vport) \
79 for ((i) = MLX5_VPORT_PF; \
80 (vport) = &(esw)->vports[i], \
81 (i) < (esw)->total_vports; (i)++)
82
83 #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
84 for ((i) = MLX5_VPORT_FIRST_VF; \
85 (vport) = &(esw)->vports[i], \
86 (i) <= (nvfs); (i)++)
87
88 static struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw,
89 u16 vport_num)
90 {
91 u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
92
93 WARN_ON(idx > esw->total_vports - 1);
94 return &esw->vports[idx];
95 }
96
97 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
98 u32 events_mask)
99 {
100 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
101 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
102 void *nic_vport_ctx;
103
104 MLX5_SET(modify_nic_vport_context_in, in,
105 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
106 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
107 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
108 if (vport)
109 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
110 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
111 in, nic_vport_context);
112
113 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
114
115 if (events_mask & UC_ADDR_CHANGE)
116 MLX5_SET(nic_vport_context, nic_vport_ctx,
117 event_on_uc_address_change, 1);
118 if (events_mask & MC_ADDR_CHANGE)
119 MLX5_SET(nic_vport_context, nic_vport_ctx,
120 event_on_mc_address_change, 1);
121 if (events_mask & PROMISC_CHANGE)
122 MLX5_SET(nic_vport_context, nic_vport_ctx,
123 event_on_promisc_change, 1);
124
125 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
126 }
127
128 /* E-Switch vport context HW commands */
129 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
130 void *in, int inlen)
131 {
132 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
133
134 MLX5_SET(modify_esw_vport_context_in, in, opcode,
135 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
136 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
137 if (vport)
138 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
139 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
140 }
141
142 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
143 u16 vlan, u8 qos, u8 set_flags)
144 {
145 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
146
147 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
148 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
149 return -EOPNOTSUPP;
150
151 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
152 vport, vlan, qos, set_flags);
153
154 if (set_flags & SET_VLAN_STRIP)
155 MLX5_SET(modify_esw_vport_context_in, in,
156 esw_vport_context.vport_cvlan_strip, 1);
157
158 if (set_flags & SET_VLAN_INSERT) {
159 /* insert only if no vlan in packet */
160 MLX5_SET(modify_esw_vport_context_in, in,
161 esw_vport_context.vport_cvlan_insert, 1);
162
163 MLX5_SET(modify_esw_vport_context_in, in,
164 esw_vport_context.cvlan_pcp, qos);
165 MLX5_SET(modify_esw_vport_context_in, in,
166 esw_vport_context.cvlan_id, vlan);
167 }
168
169 MLX5_SET(modify_esw_vport_context_in, in,
170 field_select.vport_cvlan_strip, 1);
171 MLX5_SET(modify_esw_vport_context_in, in,
172 field_select.vport_cvlan_insert, 1);
173
174 return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
175 }
176
177 /* E-Switch FDB */
178 static struct mlx5_flow_handle *
179 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
180 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
181 {
182 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
183 MLX5_MATCH_OUTER_HEADERS);
184 struct mlx5_flow_handle *flow_rule = NULL;
185 struct mlx5_flow_act flow_act = {0};
186 struct mlx5_flow_destination dest = {};
187 struct mlx5_flow_spec *spec;
188 void *mv_misc = NULL;
189 void *mc_misc = NULL;
190 u8 *dmac_v = NULL;
191 u8 *dmac_c = NULL;
192
193 if (rx_rule)
194 match_header |= MLX5_MATCH_MISC_PARAMETERS;
195
196 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
197 if (!spec)
198 return NULL;
199
200 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
201 outer_headers.dmac_47_16);
202 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
203 outer_headers.dmac_47_16);
204
205 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
206 ether_addr_copy(dmac_v, mac_v);
207 ether_addr_copy(dmac_c, mac_c);
208 }
209
210 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
211 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
212 misc_parameters);
213 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
214 misc_parameters);
215 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
216 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
217 }
218
219 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
220 dest.vport.num = vport;
221
222 esw_debug(esw->dev,
223 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
224 dmac_v, dmac_c, vport);
225 spec->match_criteria_enable = match_header;
226 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
227 flow_rule =
228 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
229 &flow_act, &dest, 1);
230 if (IS_ERR(flow_rule)) {
231 esw_warn(esw->dev,
232 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
233 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
234 flow_rule = NULL;
235 }
236
237 kvfree(spec);
238 return flow_rule;
239 }
240
241 static struct mlx5_flow_handle *
242 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
243 {
244 u8 mac_c[ETH_ALEN];
245
246 eth_broadcast_addr(mac_c);
247 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
248 }
249
250 static struct mlx5_flow_handle *
251 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
252 {
253 u8 mac_c[ETH_ALEN];
254 u8 mac_v[ETH_ALEN];
255
256 eth_zero_addr(mac_c);
257 eth_zero_addr(mac_v);
258 mac_c[0] = 0x01;
259 mac_v[0] = 0x01;
260 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
261 }
262
263 static struct mlx5_flow_handle *
264 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
265 {
266 u8 mac_c[ETH_ALEN];
267 u8 mac_v[ETH_ALEN];
268
269 eth_zero_addr(mac_c);
270 eth_zero_addr(mac_v);
271 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
272 }
273
274 enum {
275 LEGACY_VEPA_PRIO = 0,
276 LEGACY_FDB_PRIO,
277 };
278
279 static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
280 {
281 struct mlx5_core_dev *dev = esw->dev;
282 struct mlx5_flow_namespace *root_ns;
283 struct mlx5_flow_table *fdb;
284 int err;
285
286 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
287 if (!root_ns) {
288 esw_warn(dev, "Failed to get FDB flow namespace\n");
289 return -EOPNOTSUPP;
290 }
291
292 /* num FTE 2, num FG 2 */
293 fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO,
294 2, 2, 0, 0);
295 if (IS_ERR(fdb)) {
296 err = PTR_ERR(fdb);
297 esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
298 return err;
299 }
300 esw->fdb_table.legacy.vepa_fdb = fdb;
301
302 return 0;
303 }
304
305 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
306 {
307 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
308 struct mlx5_flow_table_attr ft_attr = {};
309 struct mlx5_core_dev *dev = esw->dev;
310 struct mlx5_flow_namespace *root_ns;
311 struct mlx5_flow_table *fdb;
312 struct mlx5_flow_group *g;
313 void *match_criteria;
314 int table_size;
315 u32 *flow_group_in;
316 u8 *dmac;
317 int err = 0;
318
319 esw_debug(dev, "Create FDB log_max_size(%d)\n",
320 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
321
322 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
323 if (!root_ns) {
324 esw_warn(dev, "Failed to get FDB flow namespace\n");
325 return -EOPNOTSUPP;
326 }
327
328 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
329 if (!flow_group_in)
330 return -ENOMEM;
331
332 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
333 ft_attr.max_fte = table_size;
334 ft_attr.prio = LEGACY_FDB_PRIO;
335 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
336 if (IS_ERR(fdb)) {
337 err = PTR_ERR(fdb);
338 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
339 goto out;
340 }
341 esw->fdb_table.legacy.fdb = fdb;
342
343 /* Addresses group : Full match unicast/multicast addresses */
344 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
345 MLX5_MATCH_OUTER_HEADERS);
346 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
347 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
348 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
349 /* Preserve 2 entries for allmulti and promisc rules*/
350 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
351 eth_broadcast_addr(dmac);
352 g = mlx5_create_flow_group(fdb, flow_group_in);
353 if (IS_ERR(g)) {
354 err = PTR_ERR(g);
355 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
356 goto out;
357 }
358 esw->fdb_table.legacy.addr_grp = g;
359
360 /* Allmulti group : One rule that forwards any mcast traffic */
361 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
362 MLX5_MATCH_OUTER_HEADERS);
363 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
364 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
365 eth_zero_addr(dmac);
366 dmac[0] = 0x01;
367 g = mlx5_create_flow_group(fdb, flow_group_in);
368 if (IS_ERR(g)) {
369 err = PTR_ERR(g);
370 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
371 goto out;
372 }
373 esw->fdb_table.legacy.allmulti_grp = g;
374
375 /* Promiscuous group :
376 * One rule that forward all unmatched traffic from previous groups
377 */
378 eth_zero_addr(dmac);
379 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
380 MLX5_MATCH_MISC_PARAMETERS);
381 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
382 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
383 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
384 g = mlx5_create_flow_group(fdb, flow_group_in);
385 if (IS_ERR(g)) {
386 err = PTR_ERR(g);
387 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
388 goto out;
389 }
390 esw->fdb_table.legacy.promisc_grp = g;
391
392 out:
393 if (err)
394 esw_destroy_legacy_fdb_table(esw);
395
396 kvfree(flow_group_in);
397 return err;
398 }
399
400 static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
401 {
402 esw_debug(esw->dev, "Destroy VEPA Table\n");
403 if (!esw->fdb_table.legacy.vepa_fdb)
404 return;
405
406 mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
407 esw->fdb_table.legacy.vepa_fdb = NULL;
408 }
409
410 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
411 {
412 esw_debug(esw->dev, "Destroy FDB Table\n");
413 if (!esw->fdb_table.legacy.fdb)
414 return;
415
416 if (esw->fdb_table.legacy.promisc_grp)
417 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
418 if (esw->fdb_table.legacy.allmulti_grp)
419 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
420 if (esw->fdb_table.legacy.addr_grp)
421 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
422 mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
423
424 esw->fdb_table.legacy.fdb = NULL;
425 esw->fdb_table.legacy.addr_grp = NULL;
426 esw->fdb_table.legacy.allmulti_grp = NULL;
427 esw->fdb_table.legacy.promisc_grp = NULL;
428 }
429
430 static int esw_create_legacy_table(struct mlx5_eswitch *esw)
431 {
432 int err;
433
434 err = esw_create_legacy_vepa_table(esw);
435 if (err)
436 return err;
437
438 err = esw_create_legacy_fdb_table(esw);
439 if (err)
440 esw_destroy_legacy_vepa_table(esw);
441
442 return err;
443 }
444
445 static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
446 {
447 esw_cleanup_vepa_rules(esw);
448 esw_destroy_legacy_fdb_table(esw);
449 esw_destroy_legacy_vepa_table(esw);
450 }
451
452 /* E-Switch vport UC/MC lists management */
453 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
454 struct vport_addr *vaddr);
455
456 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
457 {
458 u8 *mac = vaddr->node.addr;
459 u16 vport = vaddr->vport;
460 int err;
461
462 /* Skip mlx5_mpfs_add_mac for eswitch_managers,
463 * it is already done by its netdev in mlx5e_execute_l2_action
464 */
465 if (esw->manager_vport == vport)
466 goto fdb_add;
467
468 err = mlx5_mpfs_add_mac(esw->dev, mac);
469 if (err) {
470 esw_warn(esw->dev,
471 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
472 mac, vport, err);
473 return err;
474 }
475 vaddr->mpfs = true;
476
477 fdb_add:
478 /* SRIOV is enabled: Forward UC MAC to vport */
479 if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY)
480 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
481
482 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
483 vport, mac, vaddr->flow_rule);
484
485 return 0;
486 }
487
488 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
489 {
490 u8 *mac = vaddr->node.addr;
491 u16 vport = vaddr->vport;
492 int err = 0;
493
494 /* Skip mlx5_mpfs_del_mac for eswitch managerss,
495 * it is already done by its netdev in mlx5e_execute_l2_action
496 */
497 if (!vaddr->mpfs || esw->manager_vport == vport)
498 goto fdb_del;
499
500 err = mlx5_mpfs_del_mac(esw->dev, mac);
501 if (err)
502 esw_warn(esw->dev,
503 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
504 mac, vport, err);
505 vaddr->mpfs = false;
506
507 fdb_del:
508 if (vaddr->flow_rule)
509 mlx5_del_flow_rules(vaddr->flow_rule);
510 vaddr->flow_rule = NULL;
511
512 return 0;
513 }
514
515 static void update_allmulti_vports(struct mlx5_eswitch *esw,
516 struct vport_addr *vaddr,
517 struct esw_mc_addr *esw_mc)
518 {
519 u8 *mac = vaddr->node.addr;
520 struct mlx5_vport *vport;
521 u16 i, vport_num;
522
523 mlx5_esw_for_all_vports(esw, i, vport) {
524 struct hlist_head *vport_hash = vport->mc_list;
525 struct vport_addr *iter_vaddr =
526 l2addr_hash_find(vport_hash,
527 mac,
528 struct vport_addr);
529 vport_num = vport->vport;
530 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
531 vaddr->vport == vport_num)
532 continue;
533 switch (vaddr->action) {
534 case MLX5_ACTION_ADD:
535 if (iter_vaddr)
536 continue;
537 iter_vaddr = l2addr_hash_add(vport_hash, mac,
538 struct vport_addr,
539 GFP_KERNEL);
540 if (!iter_vaddr) {
541 esw_warn(esw->dev,
542 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
543 mac, vport_num);
544 continue;
545 }
546 iter_vaddr->vport = vport_num;
547 iter_vaddr->flow_rule =
548 esw_fdb_set_vport_rule(esw,
549 mac,
550 vport_num);
551 iter_vaddr->mc_promisc = true;
552 break;
553 case MLX5_ACTION_DEL:
554 if (!iter_vaddr)
555 continue;
556 mlx5_del_flow_rules(iter_vaddr->flow_rule);
557 l2addr_hash_del(iter_vaddr);
558 break;
559 }
560 }
561 }
562
563 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
564 {
565 struct hlist_head *hash = esw->mc_table;
566 struct esw_mc_addr *esw_mc;
567 u8 *mac = vaddr->node.addr;
568 u16 vport = vaddr->vport;
569
570 if (!esw->fdb_table.legacy.fdb)
571 return 0;
572
573 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
574 if (esw_mc)
575 goto add;
576
577 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
578 if (!esw_mc)
579 return -ENOMEM;
580
581 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
582 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
583
584 /* Add this multicast mac to all the mc promiscuous vports */
585 update_allmulti_vports(esw, vaddr, esw_mc);
586
587 add:
588 /* If the multicast mac is added as a result of mc promiscuous vport,
589 * don't increment the multicast ref count
590 */
591 if (!vaddr->mc_promisc)
592 esw_mc->refcnt++;
593
594 /* Forward MC MAC to vport */
595 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
596 esw_debug(esw->dev,
597 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
598 vport, mac, vaddr->flow_rule,
599 esw_mc->refcnt, esw_mc->uplink_rule);
600 return 0;
601 }
602
603 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
604 {
605 struct hlist_head *hash = esw->mc_table;
606 struct esw_mc_addr *esw_mc;
607 u8 *mac = vaddr->node.addr;
608 u16 vport = vaddr->vport;
609
610 if (!esw->fdb_table.legacy.fdb)
611 return 0;
612
613 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
614 if (!esw_mc) {
615 esw_warn(esw->dev,
616 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
617 mac, vport);
618 return -EINVAL;
619 }
620 esw_debug(esw->dev,
621 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
622 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
623 esw_mc->uplink_rule);
624
625 if (vaddr->flow_rule)
626 mlx5_del_flow_rules(vaddr->flow_rule);
627 vaddr->flow_rule = NULL;
628
629 /* If the multicast mac is added as a result of mc promiscuous vport,
630 * don't decrement the multicast ref count.
631 */
632 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
633 return 0;
634
635 /* Remove this multicast mac from all the mc promiscuous vports */
636 update_allmulti_vports(esw, vaddr, esw_mc);
637
638 if (esw_mc->uplink_rule)
639 mlx5_del_flow_rules(esw_mc->uplink_rule);
640
641 l2addr_hash_del(esw_mc);
642 return 0;
643 }
644
645 /* Apply vport UC/MC list to HW l2 table and FDB table */
646 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
647 u16 vport_num, int list_type)
648 {
649 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
650 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
651 vport_addr_action vport_addr_add;
652 vport_addr_action vport_addr_del;
653 struct vport_addr *addr;
654 struct l2addr_node *node;
655 struct hlist_head *hash;
656 struct hlist_node *tmp;
657 int hi;
658
659 vport_addr_add = is_uc ? esw_add_uc_addr :
660 esw_add_mc_addr;
661 vport_addr_del = is_uc ? esw_del_uc_addr :
662 esw_del_mc_addr;
663
664 hash = is_uc ? vport->uc_list : vport->mc_list;
665 for_each_l2hash_node(node, tmp, hash, hi) {
666 addr = container_of(node, struct vport_addr, node);
667 switch (addr->action) {
668 case MLX5_ACTION_ADD:
669 vport_addr_add(esw, addr);
670 addr->action = MLX5_ACTION_NONE;
671 break;
672 case MLX5_ACTION_DEL:
673 vport_addr_del(esw, addr);
674 l2addr_hash_del(addr);
675 break;
676 }
677 }
678 }
679
680 /* Sync vport UC/MC list from vport context */
681 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
682 u16 vport_num, int list_type)
683 {
684 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
685 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
686 u8 (*mac_list)[ETH_ALEN];
687 struct l2addr_node *node;
688 struct vport_addr *addr;
689 struct hlist_head *hash;
690 struct hlist_node *tmp;
691 int size;
692 int err;
693 int hi;
694 int i;
695
696 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
697 MLX5_MAX_MC_PER_VPORT(esw->dev);
698
699 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
700 if (!mac_list)
701 return;
702
703 hash = is_uc ? vport->uc_list : vport->mc_list;
704
705 for_each_l2hash_node(node, tmp, hash, hi) {
706 addr = container_of(node, struct vport_addr, node);
707 addr->action = MLX5_ACTION_DEL;
708 }
709
710 if (!vport->enabled)
711 goto out;
712
713 err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
714 mac_list, &size);
715 if (err)
716 goto out;
717 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
718 vport_num, is_uc ? "UC" : "MC", size);
719
720 for (i = 0; i < size; i++) {
721 if (is_uc && !is_valid_ether_addr(mac_list[i]))
722 continue;
723
724 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
725 continue;
726
727 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
728 if (addr) {
729 addr->action = MLX5_ACTION_NONE;
730 /* If this mac was previously added because of allmulti
731 * promiscuous rx mode, its now converted to be original
732 * vport mac.
733 */
734 if (addr->mc_promisc) {
735 struct esw_mc_addr *esw_mc =
736 l2addr_hash_find(esw->mc_table,
737 mac_list[i],
738 struct esw_mc_addr);
739 if (!esw_mc) {
740 esw_warn(esw->dev,
741 "Failed to MAC(%pM) in mcast DB\n",
742 mac_list[i]);
743 continue;
744 }
745 esw_mc->refcnt++;
746 addr->mc_promisc = false;
747 }
748 continue;
749 }
750
751 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
752 GFP_KERNEL);
753 if (!addr) {
754 esw_warn(esw->dev,
755 "Failed to add MAC(%pM) to vport[%d] DB\n",
756 mac_list[i], vport_num);
757 continue;
758 }
759 addr->vport = vport_num;
760 addr->action = MLX5_ACTION_ADD;
761 }
762 out:
763 kfree(mac_list);
764 }
765
766 /* Sync vport UC/MC list from vport context
767 * Must be called after esw_update_vport_addr_list
768 */
769 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num)
770 {
771 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
772 struct l2addr_node *node;
773 struct vport_addr *addr;
774 struct hlist_head *hash;
775 struct hlist_node *tmp;
776 int hi;
777
778 hash = vport->mc_list;
779
780 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
781 u8 *mac = node->addr;
782
783 addr = l2addr_hash_find(hash, mac, struct vport_addr);
784 if (addr) {
785 if (addr->action == MLX5_ACTION_DEL)
786 addr->action = MLX5_ACTION_NONE;
787 continue;
788 }
789 addr = l2addr_hash_add(hash, mac, struct vport_addr,
790 GFP_KERNEL);
791 if (!addr) {
792 esw_warn(esw->dev,
793 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
794 mac, vport_num);
795 continue;
796 }
797 addr->vport = vport_num;
798 addr->action = MLX5_ACTION_ADD;
799 addr->mc_promisc = true;
800 }
801 }
802
803 /* Apply vport rx mode to HW FDB table */
804 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num,
805 bool promisc, bool mc_promisc)
806 {
807 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
808 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
809
810 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
811 goto promisc;
812
813 if (mc_promisc) {
814 vport->allmulti_rule =
815 esw_fdb_set_vport_allmulti_rule(esw, vport_num);
816 if (!allmulti_addr->uplink_rule)
817 allmulti_addr->uplink_rule =
818 esw_fdb_set_vport_allmulti_rule(esw,
819 MLX5_VPORT_UPLINK);
820 allmulti_addr->refcnt++;
821 } else if (vport->allmulti_rule) {
822 mlx5_del_flow_rules(vport->allmulti_rule);
823 vport->allmulti_rule = NULL;
824
825 if (--allmulti_addr->refcnt > 0)
826 goto promisc;
827
828 if (allmulti_addr->uplink_rule)
829 mlx5_del_flow_rules(allmulti_addr->uplink_rule);
830 allmulti_addr->uplink_rule = NULL;
831 }
832
833 promisc:
834 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
835 return;
836
837 if (promisc) {
838 vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
839 vport_num);
840 } else if (vport->promisc_rule) {
841 mlx5_del_flow_rules(vport->promisc_rule);
842 vport->promisc_rule = NULL;
843 }
844 }
845
846 /* Sync vport rx mode from vport context */
847 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num)
848 {
849 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
850 int promisc_all = 0;
851 int promisc_uc = 0;
852 int promisc_mc = 0;
853 int err;
854
855 err = mlx5_query_nic_vport_promisc(esw->dev,
856 vport_num,
857 &promisc_uc,
858 &promisc_mc,
859 &promisc_all);
860 if (err)
861 return;
862 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
863 vport_num, promisc_all, promisc_mc);
864
865 if (!vport->info.trusted || !vport->enabled) {
866 promisc_uc = 0;
867 promisc_mc = 0;
868 promisc_all = 0;
869 }
870
871 esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
872 (promisc_all || promisc_mc));
873 }
874
875 static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
876 {
877 struct mlx5_core_dev *dev = vport->dev;
878 struct mlx5_eswitch *esw = dev->priv.eswitch;
879 u8 mac[ETH_ALEN];
880
881 mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
882 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
883 vport->vport, mac);
884
885 if (vport->enabled_events & UC_ADDR_CHANGE) {
886 esw_update_vport_addr_list(esw, vport->vport,
887 MLX5_NVPRT_LIST_TYPE_UC);
888 esw_apply_vport_addr_list(esw, vport->vport,
889 MLX5_NVPRT_LIST_TYPE_UC);
890 }
891
892 if (vport->enabled_events & MC_ADDR_CHANGE) {
893 esw_update_vport_addr_list(esw, vport->vport,
894 MLX5_NVPRT_LIST_TYPE_MC);
895 }
896
897 if (vport->enabled_events & PROMISC_CHANGE) {
898 esw_update_vport_rx_mode(esw, vport->vport);
899 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
900 esw_update_vport_mc_promisc(esw, vport->vport);
901 }
902
903 if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
904 esw_apply_vport_addr_list(esw, vport->vport,
905 MLX5_NVPRT_LIST_TYPE_MC);
906 }
907
908 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
909 if (vport->enabled)
910 arm_vport_context_events_cmd(dev, vport->vport,
911 vport->enabled_events);
912 }
913
914 static void esw_vport_change_handler(struct work_struct *work)
915 {
916 struct mlx5_vport *vport =
917 container_of(work, struct mlx5_vport, vport_change_handler);
918 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
919
920 mutex_lock(&esw->state_lock);
921 esw_vport_change_handle_locked(vport);
922 mutex_unlock(&esw->state_lock);
923 }
924
925 static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
926 struct mlx5_vport *vport)
927 {
928 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
929 struct mlx5_flow_group *vlan_grp = NULL;
930 struct mlx5_flow_group *drop_grp = NULL;
931 struct mlx5_core_dev *dev = esw->dev;
932 struct mlx5_flow_namespace *root_ns;
933 struct mlx5_flow_table *acl;
934 void *match_criteria;
935 u32 *flow_group_in;
936 /* The egress acl table contains 2 rules:
937 * 1)Allow traffic with vlan_tag=vst_vlan_id
938 * 2)Drop all other traffic.
939 */
940 int table_size = 2;
941 int err = 0;
942
943 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
944 return -EOPNOTSUPP;
945
946 if (!IS_ERR_OR_NULL(vport->egress.acl))
947 return 0;
948
949 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
950 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
951
952 root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
953 vport->vport);
954 if (!root_ns) {
955 esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
956 return -EOPNOTSUPP;
957 }
958
959 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
960 if (!flow_group_in)
961 return -ENOMEM;
962
963 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
964 if (IS_ERR(acl)) {
965 err = PTR_ERR(acl);
966 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
967 vport->vport, err);
968 goto out;
969 }
970
971 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
972 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
973 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
974 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
975 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
976 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
977
978 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
979 if (IS_ERR(vlan_grp)) {
980 err = PTR_ERR(vlan_grp);
981 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
982 vport->vport, err);
983 goto out;
984 }
985
986 memset(flow_group_in, 0, inlen);
987 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
988 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
989 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
990 if (IS_ERR(drop_grp)) {
991 err = PTR_ERR(drop_grp);
992 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
993 vport->vport, err);
994 goto out;
995 }
996
997 vport->egress.acl = acl;
998 vport->egress.drop_grp = drop_grp;
999 vport->egress.allowed_vlans_grp = vlan_grp;
1000 out:
1001 kvfree(flow_group_in);
1002 if (err && !IS_ERR_OR_NULL(vlan_grp))
1003 mlx5_destroy_flow_group(vlan_grp);
1004 if (err && !IS_ERR_OR_NULL(acl))
1005 mlx5_destroy_flow_table(acl);
1006 return err;
1007 }
1008
1009 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
1010 struct mlx5_vport *vport)
1011 {
1012 if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
1013 mlx5_del_flow_rules(vport->egress.allowed_vlan);
1014
1015 if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
1016 mlx5_del_flow_rules(vport->egress.drop_rule);
1017
1018 vport->egress.allowed_vlan = NULL;
1019 vport->egress.drop_rule = NULL;
1020 }
1021
1022 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1023 struct mlx5_vport *vport)
1024 {
1025 if (IS_ERR_OR_NULL(vport->egress.acl))
1026 return;
1027
1028 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
1029
1030 esw_vport_cleanup_egress_rules(esw, vport);
1031 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
1032 mlx5_destroy_flow_group(vport->egress.drop_grp);
1033 mlx5_destroy_flow_table(vport->egress.acl);
1034 vport->egress.allowed_vlans_grp = NULL;
1035 vport->egress.drop_grp = NULL;
1036 vport->egress.acl = NULL;
1037 }
1038
1039 static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1040 struct mlx5_vport *vport)
1041 {
1042 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1043 struct mlx5_core_dev *dev = esw->dev;
1044 struct mlx5_flow_namespace *root_ns;
1045 struct mlx5_flow_table *acl;
1046 struct mlx5_flow_group *g;
1047 void *match_criteria;
1048 u32 *flow_group_in;
1049 /* The ingress acl table contains 4 groups
1050 * (2 active rules at the same time -
1051 * 1 allow rule from one of the first 3 groups.
1052 * 1 drop rule from the last group):
1053 * 1)Allow untagged traffic with smac=original mac.
1054 * 2)Allow untagged traffic.
1055 * 3)Allow traffic with smac=original mac.
1056 * 4)Drop all other traffic.
1057 */
1058 int table_size = 4;
1059 int err = 0;
1060
1061 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1062 return -EOPNOTSUPP;
1063
1064 if (!IS_ERR_OR_NULL(vport->ingress.acl))
1065 return 0;
1066
1067 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
1068 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
1069
1070 root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1071 vport->vport);
1072 if (!root_ns) {
1073 esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
1074 return -EOPNOTSUPP;
1075 }
1076
1077 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1078 if (!flow_group_in)
1079 return -ENOMEM;
1080
1081 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1082 if (IS_ERR(acl)) {
1083 err = PTR_ERR(acl);
1084 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
1085 vport->vport, err);
1086 goto out;
1087 }
1088 vport->ingress.acl = acl;
1089
1090 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1091
1092 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1093 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1094 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1095 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1096 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1097 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1098
1099 g = mlx5_create_flow_group(acl, flow_group_in);
1100 if (IS_ERR(g)) {
1101 err = PTR_ERR(g);
1102 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
1103 vport->vport, err);
1104 goto out;
1105 }
1106 vport->ingress.allow_untagged_spoofchk_grp = g;
1107
1108 memset(flow_group_in, 0, inlen);
1109 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1110 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1111 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1112 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1113
1114 g = mlx5_create_flow_group(acl, flow_group_in);
1115 if (IS_ERR(g)) {
1116 err = PTR_ERR(g);
1117 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
1118 vport->vport, err);
1119 goto out;
1120 }
1121 vport->ingress.allow_untagged_only_grp = g;
1122
1123 memset(flow_group_in, 0, inlen);
1124 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1125 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1126 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1127 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
1128 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
1129
1130 g = mlx5_create_flow_group(acl, flow_group_in);
1131 if (IS_ERR(g)) {
1132 err = PTR_ERR(g);
1133 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
1134 vport->vport, err);
1135 goto out;
1136 }
1137 vport->ingress.allow_spoofchk_only_grp = g;
1138
1139 memset(flow_group_in, 0, inlen);
1140 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
1141 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
1142
1143 g = mlx5_create_flow_group(acl, flow_group_in);
1144 if (IS_ERR(g)) {
1145 err = PTR_ERR(g);
1146 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
1147 vport->vport, err);
1148 goto out;
1149 }
1150 vport->ingress.drop_grp = g;
1151
1152 out:
1153 if (err) {
1154 if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
1155 mlx5_destroy_flow_group(
1156 vport->ingress.allow_spoofchk_only_grp);
1157 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
1158 mlx5_destroy_flow_group(
1159 vport->ingress.allow_untagged_only_grp);
1160 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
1161 mlx5_destroy_flow_group(
1162 vport->ingress.allow_untagged_spoofchk_grp);
1163 if (!IS_ERR_OR_NULL(vport->ingress.acl))
1164 mlx5_destroy_flow_table(vport->ingress.acl);
1165 }
1166
1167 kvfree(flow_group_in);
1168 return err;
1169 }
1170
1171 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1172 struct mlx5_vport *vport)
1173 {
1174 if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
1175 mlx5_del_flow_rules(vport->ingress.drop_rule);
1176
1177 if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
1178 mlx5_del_flow_rules(vport->ingress.allow_rule);
1179
1180 vport->ingress.drop_rule = NULL;
1181 vport->ingress.allow_rule = NULL;
1182 }
1183
1184 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1185 struct mlx5_vport *vport)
1186 {
1187 if (IS_ERR_OR_NULL(vport->ingress.acl))
1188 return;
1189
1190 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
1191
1192 esw_vport_cleanup_ingress_rules(esw, vport);
1193 mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
1194 mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
1195 mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
1196 mlx5_destroy_flow_group(vport->ingress.drop_grp);
1197 mlx5_destroy_flow_table(vport->ingress.acl);
1198 vport->ingress.acl = NULL;
1199 vport->ingress.drop_grp = NULL;
1200 vport->ingress.allow_spoofchk_only_grp = NULL;
1201 vport->ingress.allow_untagged_only_grp = NULL;
1202 vport->ingress.allow_untagged_spoofchk_grp = NULL;
1203 }
1204
1205 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1206 struct mlx5_vport *vport)
1207 {
1208 struct mlx5_fc *counter = vport->ingress.drop_counter;
1209 struct mlx5_flow_destination drop_ctr_dst = {0};
1210 struct mlx5_flow_destination *dst = NULL;
1211 struct mlx5_flow_act flow_act = {0};
1212 struct mlx5_flow_spec *spec;
1213 int dest_num = 0;
1214 int err = 0;
1215 u8 *smac_v;
1216
1217 esw_vport_cleanup_ingress_rules(esw, vport);
1218
1219 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1220 esw_vport_disable_ingress_acl(esw, vport);
1221 return 0;
1222 }
1223
1224 err = esw_vport_enable_ingress_acl(esw, vport);
1225 if (err) {
1226 mlx5_core_warn(esw->dev,
1227 "failed to enable ingress acl (%d) on vport[%d]\n",
1228 err, vport->vport);
1229 return err;
1230 }
1231
1232 esw_debug(esw->dev,
1233 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1234 vport->vport, vport->info.vlan, vport->info.qos);
1235
1236 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1237 if (!spec) {
1238 err = -ENOMEM;
1239 goto out;
1240 }
1241
1242 if (vport->info.vlan || vport->info.qos)
1243 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1244
1245 if (vport->info.spoofchk) {
1246 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1247 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1248 smac_v = MLX5_ADDR_OF(fte_match_param,
1249 spec->match_value,
1250 outer_headers.smac_47_16);
1251 ether_addr_copy(smac_v, vport->info.mac);
1252 }
1253
1254 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1255 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1256 vport->ingress.allow_rule =
1257 mlx5_add_flow_rules(vport->ingress.acl, spec,
1258 &flow_act, NULL, 0);
1259 if (IS_ERR(vport->ingress.allow_rule)) {
1260 err = PTR_ERR(vport->ingress.allow_rule);
1261 esw_warn(esw->dev,
1262 "vport[%d] configure ingress allow rule, err(%d)\n",
1263 vport->vport, err);
1264 vport->ingress.allow_rule = NULL;
1265 goto out;
1266 }
1267
1268 memset(spec, 0, sizeof(*spec));
1269 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1270
1271 /* Attach drop flow counter */
1272 if (counter) {
1273 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1274 drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1275 drop_ctr_dst.counter_id = mlx5_fc_id(counter);
1276 dst = &drop_ctr_dst;
1277 dest_num++;
1278 }
1279 vport->ingress.drop_rule =
1280 mlx5_add_flow_rules(vport->ingress.acl, spec,
1281 &flow_act, dst, dest_num);
1282 if (IS_ERR(vport->ingress.drop_rule)) {
1283 err = PTR_ERR(vport->ingress.drop_rule);
1284 esw_warn(esw->dev,
1285 "vport[%d] configure ingress drop rule, err(%d)\n",
1286 vport->vport, err);
1287 vport->ingress.drop_rule = NULL;
1288 goto out;
1289 }
1290
1291 out:
1292 if (err)
1293 esw_vport_cleanup_ingress_rules(esw, vport);
1294 kvfree(spec);
1295 return err;
1296 }
1297
1298 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1299 struct mlx5_vport *vport)
1300 {
1301 struct mlx5_fc *counter = vport->egress.drop_counter;
1302 struct mlx5_flow_destination drop_ctr_dst = {0};
1303 struct mlx5_flow_destination *dst = NULL;
1304 struct mlx5_flow_act flow_act = {0};
1305 struct mlx5_flow_spec *spec;
1306 int dest_num = 0;
1307 int err = 0;
1308
1309 esw_vport_cleanup_egress_rules(esw, vport);
1310
1311 if (!vport->info.vlan && !vport->info.qos) {
1312 esw_vport_disable_egress_acl(esw, vport);
1313 return 0;
1314 }
1315
1316 err = esw_vport_enable_egress_acl(esw, vport);
1317 if (err) {
1318 mlx5_core_warn(esw->dev,
1319 "failed to enable egress acl (%d) on vport[%d]\n",
1320 err, vport->vport);
1321 return err;
1322 }
1323
1324 esw_debug(esw->dev,
1325 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1326 vport->vport, vport->info.vlan, vport->info.qos);
1327
1328 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1329 if (!spec) {
1330 err = -ENOMEM;
1331 goto out;
1332 }
1333
1334 /* Allowed vlan rule */
1335 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1336 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1337 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1338 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
1339
1340 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1341 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1342 vport->egress.allowed_vlan =
1343 mlx5_add_flow_rules(vport->egress.acl, spec,
1344 &flow_act, NULL, 0);
1345 if (IS_ERR(vport->egress.allowed_vlan)) {
1346 err = PTR_ERR(vport->egress.allowed_vlan);
1347 esw_warn(esw->dev,
1348 "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1349 vport->vport, err);
1350 vport->egress.allowed_vlan = NULL;
1351 goto out;
1352 }
1353
1354 /* Drop others rule (star rule) */
1355 memset(spec, 0, sizeof(*spec));
1356 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1357
1358 /* Attach egress drop flow counter */
1359 if (counter) {
1360 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1361 drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1362 drop_ctr_dst.counter_id = mlx5_fc_id(counter);
1363 dst = &drop_ctr_dst;
1364 dest_num++;
1365 }
1366 vport->egress.drop_rule =
1367 mlx5_add_flow_rules(vport->egress.acl, spec,
1368 &flow_act, dst, dest_num);
1369 if (IS_ERR(vport->egress.drop_rule)) {
1370 err = PTR_ERR(vport->egress.drop_rule);
1371 esw_warn(esw->dev,
1372 "vport[%d] configure egress drop rule failed, err(%d)\n",
1373 vport->vport, err);
1374 vport->egress.drop_rule = NULL;
1375 }
1376 out:
1377 kvfree(spec);
1378 return err;
1379 }
1380
1381 /* Vport QoS management */
1382 static int esw_create_tsar(struct mlx5_eswitch *esw)
1383 {
1384 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1385 struct mlx5_core_dev *dev = esw->dev;
1386 int err;
1387
1388 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1389 return 0;
1390
1391 if (esw->qos.enabled)
1392 return -EEXIST;
1393
1394 err = mlx5_create_scheduling_element_cmd(dev,
1395 SCHEDULING_HIERARCHY_E_SWITCH,
1396 tsar_ctx,
1397 &esw->qos.root_tsar_id);
1398 if (err) {
1399 esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
1400 return err;
1401 }
1402
1403 esw->qos.enabled = true;
1404 return 0;
1405 }
1406
1407 static void esw_destroy_tsar(struct mlx5_eswitch *esw)
1408 {
1409 int err;
1410
1411 if (!esw->qos.enabled)
1412 return;
1413
1414 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1415 SCHEDULING_HIERARCHY_E_SWITCH,
1416 esw->qos.root_tsar_id);
1417 if (err)
1418 esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
1419
1420 esw->qos.enabled = false;
1421 }
1422
1423 static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
1424 u32 initial_max_rate, u32 initial_bw_share)
1425 {
1426 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
1427 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1428 struct mlx5_core_dev *dev = esw->dev;
1429 void *vport_elem;
1430 int err = 0;
1431
1432 if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
1433 !MLX5_CAP_QOS(dev, esw_scheduling))
1434 return 0;
1435
1436 if (vport->qos.enabled)
1437 return -EEXIST;
1438
1439 MLX5_SET(scheduling_context, sched_ctx, element_type,
1440 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1441 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1442 element_attributes);
1443 MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
1444 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1445 esw->qos.root_tsar_id);
1446 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1447 initial_max_rate);
1448 MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
1449
1450 err = mlx5_create_scheduling_element_cmd(dev,
1451 SCHEDULING_HIERARCHY_E_SWITCH,
1452 sched_ctx,
1453 &vport->qos.esw_tsar_ix);
1454 if (err) {
1455 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
1456 vport_num, err);
1457 return err;
1458 }
1459
1460 vport->qos.enabled = true;
1461 return 0;
1462 }
1463
1464 static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
1465 {
1466 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
1467 int err = 0;
1468
1469 if (!vport->qos.enabled)
1470 return;
1471
1472 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1473 SCHEDULING_HIERARCHY_E_SWITCH,
1474 vport->qos.esw_tsar_ix);
1475 if (err)
1476 esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
1477 vport_num, err);
1478
1479 vport->qos.enabled = false;
1480 }
1481
1482 static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
1483 u32 max_rate, u32 bw_share)
1484 {
1485 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
1486 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1487 struct mlx5_core_dev *dev = esw->dev;
1488 void *vport_elem;
1489 u32 bitmask = 0;
1490 int err = 0;
1491
1492 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1493 return -EOPNOTSUPP;
1494
1495 if (!vport->qos.enabled)
1496 return -EIO;
1497
1498 MLX5_SET(scheduling_context, sched_ctx, element_type,
1499 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1500 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1501 element_attributes);
1502 MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
1503 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1504 esw->qos.root_tsar_id);
1505 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1506 max_rate);
1507 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
1508 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
1509 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
1510
1511 err = mlx5_modify_scheduling_element_cmd(dev,
1512 SCHEDULING_HIERARCHY_E_SWITCH,
1513 sched_ctx,
1514 vport->qos.esw_tsar_ix,
1515 bitmask);
1516 if (err) {
1517 esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
1518 vport_num, err);
1519 return err;
1520 }
1521
1522 return 0;
1523 }
1524
1525 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1526 {
1527 ((u8 *)node_guid)[7] = mac[0];
1528 ((u8 *)node_guid)[6] = mac[1];
1529 ((u8 *)node_guid)[5] = mac[2];
1530 ((u8 *)node_guid)[4] = 0xff;
1531 ((u8 *)node_guid)[3] = 0xfe;
1532 ((u8 *)node_guid)[2] = mac[3];
1533 ((u8 *)node_guid)[1] = mac[4];
1534 ((u8 *)node_guid)[0] = mac[5];
1535 }
1536
1537 static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
1538 struct mlx5_vport *vport)
1539 {
1540 int vport_num = vport->vport;
1541
1542 if (esw->manager_vport == vport_num)
1543 return;
1544
1545 mlx5_modify_vport_admin_state(esw->dev,
1546 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1547 vport_num, 1,
1548 vport->info.link_state);
1549
1550 /* Host PF has its own mac/guid. */
1551 if (vport_num) {
1552 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
1553 vport->info.mac);
1554 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
1555 vport->info.node_guid);
1556 }
1557
1558 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
1559 (vport->info.vlan || vport->info.qos));
1560
1561 /* Only legacy mode needs ACLs */
1562 if (esw->mode == SRIOV_LEGACY) {
1563 esw_vport_ingress_config(esw, vport);
1564 esw_vport_egress_config(esw, vport);
1565 }
1566 }
1567
1568 static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
1569 {
1570 struct mlx5_core_dev *dev = vport->dev;
1571
1572 if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
1573 vport->ingress.drop_counter = mlx5_fc_create(dev, false);
1574 if (IS_ERR(vport->ingress.drop_counter)) {
1575 esw_warn(dev,
1576 "vport[%d] configure ingress drop rule counter failed\n",
1577 vport->vport);
1578 vport->ingress.drop_counter = NULL;
1579 }
1580 }
1581
1582 if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
1583 vport->egress.drop_counter = mlx5_fc_create(dev, false);
1584 if (IS_ERR(vport->egress.drop_counter)) {
1585 esw_warn(dev,
1586 "vport[%d] configure egress drop rule counter failed\n",
1587 vport->vport);
1588 vport->egress.drop_counter = NULL;
1589 }
1590 }
1591 }
1592
1593 static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
1594 {
1595 struct mlx5_core_dev *dev = vport->dev;
1596
1597 if (vport->ingress.drop_counter)
1598 mlx5_fc_destroy(dev, vport->ingress.drop_counter);
1599 if (vport->egress.drop_counter)
1600 mlx5_fc_destroy(dev, vport->egress.drop_counter);
1601 }
1602
1603 static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
1604 int enable_events)
1605 {
1606 u16 vport_num = vport->vport;
1607
1608 mutex_lock(&esw->state_lock);
1609 WARN_ON(vport->enabled);
1610
1611 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1612
1613 /* Create steering drop counters for ingress and egress ACLs */
1614 if (vport_num && esw->mode == SRIOV_LEGACY)
1615 esw_vport_create_drop_counters(vport);
1616
1617 /* Restore old vport configuration */
1618 esw_apply_vport_conf(esw, vport);
1619
1620 /* Attach vport to the eswitch rate limiter */
1621 if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate,
1622 vport->qos.bw_share))
1623 esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
1624
1625 /* Sync with current vport context */
1626 vport->enabled_events = enable_events;
1627 vport->enabled = true;
1628
1629 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
1630 * in smartNIC as it's a vport group manager.
1631 */
1632 if (esw->manager_vport == vport_num ||
1633 (!vport_num && mlx5_core_is_ecpf(esw->dev)))
1634 vport->info.trusted = true;
1635
1636 esw_vport_change_handle_locked(vport);
1637
1638 esw->enabled_vports++;
1639 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1640 mutex_unlock(&esw->state_lock);
1641 }
1642
1643 static void esw_disable_vport(struct mlx5_eswitch *esw,
1644 struct mlx5_vport *vport)
1645 {
1646 u16 vport_num = vport->vport;
1647
1648 if (!vport->enabled)
1649 return;
1650
1651 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1652 /* Mark this vport as disabled to discard new events */
1653 vport->enabled = false;
1654
1655 /* Wait for current already scheduled events to complete */
1656 flush_workqueue(esw->work_queue);
1657 /* Disable events from this vport */
1658 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1659 mutex_lock(&esw->state_lock);
1660 /* We don't assume VFs will cleanup after themselves.
1661 * Calling vport change handler while vport is disabled will cleanup
1662 * the vport resources.
1663 */
1664 esw_vport_change_handle_locked(vport);
1665 vport->enabled_events = 0;
1666 esw_vport_disable_qos(esw, vport_num);
1667 if (esw->manager_vport != vport_num &&
1668 esw->mode == SRIOV_LEGACY) {
1669 mlx5_modify_vport_admin_state(esw->dev,
1670 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1671 vport_num, 1,
1672 MLX5_VPORT_ADMIN_STATE_DOWN);
1673 esw_vport_disable_egress_acl(esw, vport);
1674 esw_vport_disable_ingress_acl(esw, vport);
1675 esw_vport_destroy_drop_counters(vport);
1676 }
1677 esw->enabled_vports--;
1678 mutex_unlock(&esw->state_lock);
1679 }
1680
1681 static int eswitch_vport_event(struct notifier_block *nb,
1682 unsigned long type, void *data)
1683 {
1684 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
1685 struct mlx5_eqe *eqe = data;
1686 struct mlx5_vport *vport;
1687 u16 vport_num;
1688
1689 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
1690 vport = mlx5_eswitch_get_vport(esw, vport_num);
1691 if (vport->enabled)
1692 queue_work(esw->work_queue, &vport->vport_change_handler);
1693
1694 return NOTIFY_OK;
1695 }
1696
1697 /* Public E-Switch API */
1698 #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
1699
1700 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1701 {
1702 int vf_nvports = 0, total_nvports = 0;
1703 struct mlx5_vport *vport;
1704 int err;
1705 int i, enabled_events;
1706
1707 if (!ESW_ALLOWED(esw) ||
1708 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1709 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1710 return -EOPNOTSUPP;
1711 }
1712
1713 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1714 esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1715
1716 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1717 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1718
1719 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
1720
1721 if (mode == SRIOV_OFFLOADS) {
1722 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1723 err = mlx5_query_host_params_num_vfs(esw->dev, &vf_nvports);
1724 if (err)
1725 return err;
1726 total_nvports = esw->total_vports;
1727 } else {
1728 vf_nvports = nvfs;
1729 total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
1730 }
1731 }
1732
1733 esw->mode = mode;
1734
1735 mlx5_lag_update(esw->dev);
1736
1737 if (mode == SRIOV_LEGACY) {
1738 err = esw_create_legacy_table(esw);
1739 if (err)
1740 goto abort;
1741 } else {
1742 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1743 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1744 err = esw_offloads_init(esw, vf_nvports, total_nvports);
1745 }
1746
1747 if (err)
1748 goto abort;
1749
1750 err = esw_create_tsar(esw);
1751 if (err)
1752 esw_warn(esw->dev, "Failed to create eswitch TSAR");
1753
1754 /* Don't enable vport events when in SRIOV_OFFLOADS mode, since:
1755 * 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode
1756 * 2. FDB/Eswitch is programmed by user space tools
1757 */
1758 enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0;
1759
1760 /* Enable PF vport */
1761 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1762 esw_enable_vport(esw, vport, enabled_events);
1763
1764 /* Enable ECPF vports */
1765 if (mlx5_ecpf_vport_exists(esw->dev)) {
1766 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1767 esw_enable_vport(esw, vport, enabled_events);
1768 }
1769
1770 /* Enable VF vports */
1771 mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs)
1772 esw_enable_vport(esw, vport, enabled_events);
1773
1774 if (mode == SRIOV_LEGACY) {
1775 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1776 mlx5_eq_notifier_register(esw->dev, &esw->nb);
1777 }
1778
1779 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1780 esw->enabled_vports);
1781 return 0;
1782
1783 abort:
1784 esw->mode = SRIOV_NONE;
1785
1786 if (mode == SRIOV_OFFLOADS) {
1787 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1788 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1789 }
1790
1791 return err;
1792 }
1793
1794 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1795 {
1796 struct esw_mc_addr *mc_promisc;
1797 struct mlx5_vport *vport;
1798 int old_mode;
1799 int i;
1800
1801 if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
1802 return;
1803
1804 esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
1805 esw->enabled_vports, esw->mode);
1806
1807 mc_promisc = &esw->mc_promisc;
1808
1809 if (esw->mode == SRIOV_LEGACY)
1810 mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1811
1812 mlx5_esw_for_all_vports(esw, i, vport)
1813 esw_disable_vport(esw, vport);
1814
1815 if (mc_promisc && mc_promisc->uplink_rule)
1816 mlx5_del_flow_rules(mc_promisc->uplink_rule);
1817
1818 esw_destroy_tsar(esw);
1819
1820 if (esw->mode == SRIOV_LEGACY)
1821 esw_destroy_legacy_table(esw);
1822 else if (esw->mode == SRIOV_OFFLOADS)
1823 esw_offloads_cleanup(esw);
1824
1825 old_mode = esw->mode;
1826 esw->mode = SRIOV_NONE;
1827
1828 mlx5_lag_update(esw->dev);
1829
1830 if (old_mode == SRIOV_OFFLOADS) {
1831 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1832 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1833 }
1834 }
1835
1836 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1837 {
1838 int total_vports = MLX5_TOTAL_VPORTS(dev);
1839 struct mlx5_eswitch *esw;
1840 struct mlx5_vport *vport;
1841 int err, i;
1842
1843 if (!MLX5_VPORT_MANAGER(dev))
1844 return 0;
1845
1846 esw_info(dev,
1847 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1848 total_vports,
1849 MLX5_MAX_UC_PER_VPORT(dev),
1850 MLX5_MAX_MC_PER_VPORT(dev));
1851
1852 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1853 if (!esw)
1854 return -ENOMEM;
1855
1856 esw->dev = dev;
1857 esw->manager_vport = mlx5_eswitch_manager_vport(dev);
1858
1859 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1860 if (!esw->work_queue) {
1861 err = -ENOMEM;
1862 goto abort;
1863 }
1864
1865 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1866 GFP_KERNEL);
1867 if (!esw->vports) {
1868 err = -ENOMEM;
1869 goto abort;
1870 }
1871
1872 esw->total_vports = total_vports;
1873
1874 err = esw_offloads_init_reps(esw);
1875 if (err)
1876 goto abort;
1877
1878 hash_init(esw->offloads.encap_tbl);
1879 hash_init(esw->offloads.mod_hdr_tbl);
1880 mutex_init(&esw->state_lock);
1881
1882 mlx5_esw_for_all_vports(esw, i, vport) {
1883 vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
1884 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1885 vport->dev = dev;
1886 INIT_WORK(&vport->vport_change_handler,
1887 esw_vport_change_handler);
1888 }
1889
1890 esw->enabled_vports = 0;
1891 esw->mode = SRIOV_NONE;
1892 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
1893 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
1894 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
1895 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
1896 else
1897 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1898
1899 dev->priv.eswitch = esw;
1900 return 0;
1901 abort:
1902 if (esw->work_queue)
1903 destroy_workqueue(esw->work_queue);
1904 esw_offloads_cleanup_reps(esw);
1905 kfree(esw->vports);
1906 kfree(esw);
1907 return err;
1908 }
1909
1910 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1911 {
1912 if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
1913 return;
1914
1915 esw_info(esw->dev, "cleanup\n");
1916
1917 esw->dev->priv.eswitch = NULL;
1918 destroy_workqueue(esw->work_queue);
1919 esw_offloads_cleanup_reps(esw);
1920 kfree(esw->vports);
1921 kfree(esw);
1922 }
1923
1924 /* Vport Administration */
1925 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1926
1927 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1928 int vport, u8 mac[ETH_ALEN])
1929 {
1930 struct mlx5_vport *evport;
1931 u64 node_guid;
1932 int err = 0;
1933
1934 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
1935 return -EPERM;
1936 if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
1937 return -EINVAL;
1938
1939 mutex_lock(&esw->state_lock);
1940 evport = &esw->vports[vport];
1941
1942 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1943 mlx5_core_warn(esw->dev,
1944 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1945 vport);
1946
1947 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1948 if (err) {
1949 mlx5_core_warn(esw->dev,
1950 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1951 vport, err);
1952 goto unlock;
1953 }
1954
1955 node_guid_gen_from_mac(&node_guid, mac);
1956 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1957 if (err)
1958 mlx5_core_warn(esw->dev,
1959 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1960 vport, err);
1961
1962 ether_addr_copy(evport->info.mac, mac);
1963 evport->info.node_guid = node_guid;
1964 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1965 err = esw_vport_ingress_config(esw, evport);
1966
1967 unlock:
1968 mutex_unlock(&esw->state_lock);
1969 return err;
1970 }
1971
1972 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1973 int vport, int link_state)
1974 {
1975 struct mlx5_vport *evport;
1976 int err = 0;
1977
1978 if (!ESW_ALLOWED(esw))
1979 return -EPERM;
1980 if (!LEGAL_VPORT(esw, vport))
1981 return -EINVAL;
1982
1983 mutex_lock(&esw->state_lock);
1984 evport = &esw->vports[vport];
1985
1986 err = mlx5_modify_vport_admin_state(esw->dev,
1987 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1988 vport, 1, link_state);
1989 if (err) {
1990 mlx5_core_warn(esw->dev,
1991 "Failed to set vport %d link state, err = %d",
1992 vport, err);
1993 goto unlock;
1994 }
1995
1996 evport->info.link_state = link_state;
1997
1998 unlock:
1999 mutex_unlock(&esw->state_lock);
2000 return 0;
2001 }
2002
2003 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
2004 int vport, struct ifla_vf_info *ivi)
2005 {
2006 struct mlx5_vport *evport;
2007
2008 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
2009 return -EPERM;
2010 if (!LEGAL_VPORT(esw, vport))
2011 return -EINVAL;
2012
2013 evport = &esw->vports[vport];
2014
2015 memset(ivi, 0, sizeof(*ivi));
2016 ivi->vf = vport - 1;
2017
2018 mutex_lock(&esw->state_lock);
2019 ether_addr_copy(ivi->mac, evport->info.mac);
2020 ivi->linkstate = evport->info.link_state;
2021 ivi->vlan = evport->info.vlan;
2022 ivi->qos = evport->info.qos;
2023 ivi->spoofchk = evport->info.spoofchk;
2024 ivi->trusted = evport->info.trusted;
2025 ivi->min_tx_rate = evport->info.min_rate;
2026 ivi->max_tx_rate = evport->info.max_rate;
2027 mutex_unlock(&esw->state_lock);
2028
2029 return 0;
2030 }
2031
2032 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2033 int vport, u16 vlan, u8 qos, u8 set_flags)
2034 {
2035 struct mlx5_vport *evport;
2036 int err = 0;
2037
2038 if (!ESW_ALLOWED(esw))
2039 return -EPERM;
2040 if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
2041 return -EINVAL;
2042
2043 mutex_lock(&esw->state_lock);
2044 evport = &esw->vports[vport];
2045
2046 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
2047 if (err)
2048 goto unlock;
2049
2050 evport->info.vlan = vlan;
2051 evport->info.qos = qos;
2052 if (evport->enabled && esw->mode == SRIOV_LEGACY) {
2053 err = esw_vport_ingress_config(esw, evport);
2054 if (err)
2055 goto unlock;
2056 err = esw_vport_egress_config(esw, evport);
2057 }
2058
2059 unlock:
2060 mutex_unlock(&esw->state_lock);
2061 return err;
2062 }
2063
2064 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2065 int vport, u16 vlan, u8 qos)
2066 {
2067 u8 set_flags = 0;
2068
2069 if (vlan || qos)
2070 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
2071
2072 return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
2073 }
2074
2075 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
2076 int vport, bool spoofchk)
2077 {
2078 struct mlx5_vport *evport;
2079 bool pschk;
2080 int err = 0;
2081
2082 if (!ESW_ALLOWED(esw))
2083 return -EPERM;
2084 if (!LEGAL_VPORT(esw, vport))
2085 return -EINVAL;
2086
2087 mutex_lock(&esw->state_lock);
2088 evport = &esw->vports[vport];
2089 pschk = evport->info.spoofchk;
2090 evport->info.spoofchk = spoofchk;
2091 if (pschk && !is_valid_ether_addr(evport->info.mac))
2092 mlx5_core_warn(esw->dev,
2093 "Spoofchk in set while MAC is invalid, vport(%d)\n",
2094 evport->vport);
2095 if (evport->enabled && esw->mode == SRIOV_LEGACY)
2096 err = esw_vport_ingress_config(esw, evport);
2097 if (err)
2098 evport->info.spoofchk = pschk;
2099 mutex_unlock(&esw->state_lock);
2100
2101 return err;
2102 }
2103
2104 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
2105 {
2106 if (esw->fdb_table.legacy.vepa_uplink_rule)
2107 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
2108
2109 if (esw->fdb_table.legacy.vepa_star_rule)
2110 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
2111
2112 esw->fdb_table.legacy.vepa_uplink_rule = NULL;
2113 esw->fdb_table.legacy.vepa_star_rule = NULL;
2114 }
2115
2116 static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
2117 u8 setting)
2118 {
2119 struct mlx5_flow_destination dest = {};
2120 struct mlx5_flow_act flow_act = {};
2121 struct mlx5_flow_handle *flow_rule;
2122 struct mlx5_flow_spec *spec;
2123 int err = 0;
2124 void *misc;
2125
2126 if (!setting) {
2127 esw_cleanup_vepa_rules(esw);
2128 return 0;
2129 }
2130
2131 if (esw->fdb_table.legacy.vepa_uplink_rule)
2132 return 0;
2133
2134 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2135 if (!spec)
2136 return -ENOMEM;
2137
2138 /* Uplink rule forward uplink traffic to FDB */
2139 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
2140 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2141
2142 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2143 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2144
2145 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2146 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2147 dest.ft = esw->fdb_table.legacy.fdb;
2148 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2149 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
2150 &flow_act, &dest, 1);
2151 if (IS_ERR(flow_rule)) {
2152 err = PTR_ERR(flow_rule);
2153 goto out;
2154 } else {
2155 esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
2156 }
2157
2158 /* Star rule to forward all traffic to uplink vport */
2159 memset(spec, 0, sizeof(*spec));
2160 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2161 dest.vport.num = MLX5_VPORT_UPLINK;
2162 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2163 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
2164 &flow_act, &dest, 1);
2165 if (IS_ERR(flow_rule)) {
2166 err = PTR_ERR(flow_rule);
2167 goto out;
2168 } else {
2169 esw->fdb_table.legacy.vepa_star_rule = flow_rule;
2170 }
2171
2172 out:
2173 kvfree(spec);
2174 if (err)
2175 esw_cleanup_vepa_rules(esw);
2176 return err;
2177 }
2178
2179 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
2180 {
2181 int err = 0;
2182
2183 if (!esw)
2184 return -EOPNOTSUPP;
2185
2186 if (!ESW_ALLOWED(esw))
2187 return -EPERM;
2188
2189 mutex_lock(&esw->state_lock);
2190 if (esw->mode != SRIOV_LEGACY) {
2191 err = -EOPNOTSUPP;
2192 goto out;
2193 }
2194
2195 err = _mlx5_eswitch_set_vepa_locked(esw, setting);
2196
2197 out:
2198 mutex_unlock(&esw->state_lock);
2199 return err;
2200 }
2201
2202 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
2203 {
2204 int err = 0;
2205
2206 if (!esw)
2207 return -EOPNOTSUPP;
2208
2209 if (!ESW_ALLOWED(esw))
2210 return -EPERM;
2211
2212 mutex_lock(&esw->state_lock);
2213 if (esw->mode != SRIOV_LEGACY) {
2214 err = -EOPNOTSUPP;
2215 goto out;
2216 }
2217
2218 *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
2219
2220 out:
2221 mutex_unlock(&esw->state_lock);
2222 return err;
2223 }
2224
2225 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
2226 int vport, bool setting)
2227 {
2228 struct mlx5_vport *evport;
2229
2230 if (!ESW_ALLOWED(esw))
2231 return -EPERM;
2232 if (!LEGAL_VPORT(esw, vport))
2233 return -EINVAL;
2234
2235 mutex_lock(&esw->state_lock);
2236 evport = &esw->vports[vport];
2237 evport->info.trusted = setting;
2238 if (evport->enabled)
2239 esw_vport_change_handle_locked(evport);
2240 mutex_unlock(&esw->state_lock);
2241
2242 return 0;
2243 }
2244
2245 static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
2246 {
2247 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2248 struct mlx5_vport *evport;
2249 u32 max_guarantee = 0;
2250 int i;
2251
2252 mlx5_esw_for_all_vports(esw, i, evport) {
2253 if (!evport->enabled || evport->info.min_rate < max_guarantee)
2254 continue;
2255 max_guarantee = evport->info.min_rate;
2256 }
2257
2258 return max_t(u32, max_guarantee / fw_max_bw_share, 1);
2259 }
2260
2261 static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
2262 {
2263 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2264 struct mlx5_vport *evport;
2265 u32 vport_max_rate;
2266 u32 vport_min_rate;
2267 u32 bw_share;
2268 int err;
2269 int i;
2270
2271 mlx5_esw_for_all_vports(esw, i, evport) {
2272 if (!evport->enabled)
2273 continue;
2274 vport_min_rate = evport->info.min_rate;
2275 vport_max_rate = evport->info.max_rate;
2276 bw_share = MLX5_MIN_BW_SHARE;
2277
2278 if (vport_min_rate)
2279 bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
2280 divider,
2281 fw_max_bw_share);
2282
2283 if (bw_share == evport->qos.bw_share)
2284 continue;
2285
2286 err = esw_vport_qos_config(esw, evport->vport, vport_max_rate,
2287 bw_share);
2288 if (!err)
2289 evport->qos.bw_share = bw_share;
2290 else
2291 return err;
2292 }
2293
2294 return 0;
2295 }
2296
2297 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
2298 u32 max_rate, u32 min_rate)
2299 {
2300 struct mlx5_vport *evport;
2301 u32 fw_max_bw_share;
2302 u32 previous_min_rate;
2303 u32 divider;
2304 bool min_rate_supported;
2305 bool max_rate_supported;
2306 int err = 0;
2307
2308 if (!ESW_ALLOWED(esw))
2309 return -EPERM;
2310 if (!LEGAL_VPORT(esw, vport))
2311 return -EINVAL;
2312
2313 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2314 min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
2315 fw_max_bw_share >= MLX5_MIN_BW_SHARE;
2316 max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
2317
2318 if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
2319 return -EOPNOTSUPP;
2320
2321 mutex_lock(&esw->state_lock);
2322 evport = &esw->vports[vport];
2323
2324 if (min_rate == evport->info.min_rate)
2325 goto set_max_rate;
2326
2327 previous_min_rate = evport->info.min_rate;
2328 evport->info.min_rate = min_rate;
2329 divider = calculate_vports_min_rate_divider(esw);
2330 err = normalize_vports_min_rate(esw, divider);
2331 if (err) {
2332 evport->info.min_rate = previous_min_rate;
2333 goto unlock;
2334 }
2335
2336 set_max_rate:
2337 if (max_rate == evport->info.max_rate)
2338 goto unlock;
2339
2340 err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share);
2341 if (!err)
2342 evport->info.max_rate = max_rate;
2343
2344 unlock:
2345 mutex_unlock(&esw->state_lock);
2346 return err;
2347 }
2348
2349 static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
2350 int vport_idx,
2351 struct mlx5_vport_drop_stats *stats)
2352 {
2353 struct mlx5_eswitch *esw = dev->priv.eswitch;
2354 struct mlx5_vport *vport = &esw->vports[vport_idx];
2355 u64 rx_discard_vport_down, tx_discard_vport_down;
2356 u64 bytes = 0;
2357 int err = 0;
2358
2359 if (!vport->enabled || esw->mode != SRIOV_LEGACY)
2360 return 0;
2361
2362 if (vport->egress.drop_counter)
2363 mlx5_fc_query(dev, vport->egress.drop_counter,
2364 &stats->rx_dropped, &bytes);
2365
2366 if (vport->ingress.drop_counter)
2367 mlx5_fc_query(dev, vport->ingress.drop_counter,
2368 &stats->tx_dropped, &bytes);
2369
2370 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
2371 !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2372 return 0;
2373
2374 err = mlx5_query_vport_down_stats(dev, vport_idx, 1,
2375 &rx_discard_vport_down,
2376 &tx_discard_vport_down);
2377 if (err)
2378 return err;
2379
2380 if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
2381 stats->rx_dropped += rx_discard_vport_down;
2382 if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2383 stats->tx_dropped += tx_discard_vport_down;
2384
2385 return 0;
2386 }
2387
2388 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2389 int vport,
2390 struct ifla_vf_stats *vf_stats)
2391 {
2392 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
2393 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
2394 struct mlx5_vport_drop_stats stats = {0};
2395 int err = 0;
2396 u32 *out;
2397
2398 if (!ESW_ALLOWED(esw))
2399 return -EPERM;
2400 if (!LEGAL_VPORT(esw, vport))
2401 return -EINVAL;
2402
2403 out = kvzalloc(outlen, GFP_KERNEL);
2404 if (!out)
2405 return -ENOMEM;
2406
2407 MLX5_SET(query_vport_counter_in, in, opcode,
2408 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
2409 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
2410 MLX5_SET(query_vport_counter_in, in, vport_number, vport);
2411 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2412
2413 memset(out, 0, outlen);
2414 err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
2415 if (err)
2416 goto free_out;
2417
2418 #define MLX5_GET_CTR(p, x) \
2419 MLX5_GET64(query_vport_counter_out, p, x)
2420
2421 memset(vf_stats, 0, sizeof(*vf_stats));
2422 vf_stats->rx_packets =
2423 MLX5_GET_CTR(out, received_eth_unicast.packets) +
2424 MLX5_GET_CTR(out, received_ib_unicast.packets) +
2425 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2426 MLX5_GET_CTR(out, received_ib_multicast.packets) +
2427 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2428
2429 vf_stats->rx_bytes =
2430 MLX5_GET_CTR(out, received_eth_unicast.octets) +
2431 MLX5_GET_CTR(out, received_ib_unicast.octets) +
2432 MLX5_GET_CTR(out, received_eth_multicast.octets) +
2433 MLX5_GET_CTR(out, received_ib_multicast.octets) +
2434 MLX5_GET_CTR(out, received_eth_broadcast.octets);
2435
2436 vf_stats->tx_packets =
2437 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
2438 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
2439 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
2440 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
2441 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
2442
2443 vf_stats->tx_bytes =
2444 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
2445 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
2446 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
2447 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
2448 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
2449
2450 vf_stats->multicast =
2451 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2452 MLX5_GET_CTR(out, received_ib_multicast.packets);
2453
2454 vf_stats->broadcast =
2455 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2456
2457 err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
2458 if (err)
2459 goto free_out;
2460 vf_stats->rx_dropped = stats.rx_dropped;
2461 vf_stats->tx_dropped = stats.tx_dropped;
2462
2463 free_out:
2464 kvfree(out);
2465 return err;
2466 }
2467
2468 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
2469 {
2470 return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
2471 }
2472 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
2473
2474 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
2475 {
2476 if ((dev0->priv.eswitch->mode == SRIOV_NONE &&
2477 dev1->priv.eswitch->mode == SRIOV_NONE) ||
2478 (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
2479 dev1->priv.eswitch->mode == SRIOV_OFFLOADS))
2480 return true;
2481
2482 return false;
2483 }
2484
2485 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
2486 struct mlx5_core_dev *dev1)
2487 {
2488 return (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
2489 dev1->priv.eswitch->mode == SRIOV_OFFLOADS);
2490 }