]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
ASoC: max98504: Add missing MAX98504 on SND_SOC_ALL_CODECS
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40
41 #define UPLINK_VPORT 0xFFFF
42
43 enum {
44 MLX5_ACTION_NONE = 0,
45 MLX5_ACTION_ADD = 1,
46 MLX5_ACTION_DEL = 2,
47 };
48
49 /* E-Switch UC L2 table hash node */
50 struct esw_uc_addr {
51 struct l2addr_node node;
52 u32 table_index;
53 u32 vport;
54 };
55
56 /* E-Switch MC FDB table hash node */
57 struct esw_mc_addr { /* SRIOV only */
58 struct l2addr_node node;
59 struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */
60 u32 refcnt;
61 };
62
63 /* Vport UC/MC hash node */
64 struct vport_addr {
65 struct l2addr_node node;
66 u8 action;
67 u32 vport;
68 struct mlx5_flow_rule *flow_rule; /* SRIOV only */
69 /* A flag indicating that mac was added due to mc promiscuous vport */
70 bool mc_promisc;
71 };
72
73 enum {
74 UC_ADDR_CHANGE = BIT(0),
75 MC_ADDR_CHANGE = BIT(1),
76 PROMISC_CHANGE = BIT(3),
77 };
78
79 /* Vport context events */
80 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
81 MC_ADDR_CHANGE | \
82 PROMISC_CHANGE)
83
84 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
85 u32 events_mask)
86 {
87 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
88 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
89 void *nic_vport_ctx;
90
91 MLX5_SET(modify_nic_vport_context_in, in,
92 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
93 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
94 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
95 if (vport)
96 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
97 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
98 in, nic_vport_context);
99
100 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
101
102 if (events_mask & UC_ADDR_CHANGE)
103 MLX5_SET(nic_vport_context, nic_vport_ctx,
104 event_on_uc_address_change, 1);
105 if (events_mask & MC_ADDR_CHANGE)
106 MLX5_SET(nic_vport_context, nic_vport_ctx,
107 event_on_mc_address_change, 1);
108 if (events_mask & PROMISC_CHANGE)
109 MLX5_SET(nic_vport_context, nic_vport_ctx,
110 event_on_promisc_change, 1);
111
112 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
113 }
114
115 /* E-Switch vport context HW commands */
116 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
117 void *in, int inlen)
118 {
119 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
120
121 MLX5_SET(modify_esw_vport_context_in, in, opcode,
122 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
123 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
124 if (vport)
125 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
126 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
127 }
128
129 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
130 u16 vlan, u8 qos, u8 set_flags)
131 {
132 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
133
134 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
135 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
136 return -ENOTSUPP;
137
138 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
139 vport, vlan, qos, set_flags);
140
141 if (set_flags & SET_VLAN_STRIP)
142 MLX5_SET(modify_esw_vport_context_in, in,
143 esw_vport_context.vport_cvlan_strip, 1);
144
145 if (set_flags & SET_VLAN_INSERT) {
146 /* insert only if no vlan in packet */
147 MLX5_SET(modify_esw_vport_context_in, in,
148 esw_vport_context.vport_cvlan_insert, 1);
149
150 MLX5_SET(modify_esw_vport_context_in, in,
151 esw_vport_context.cvlan_pcp, qos);
152 MLX5_SET(modify_esw_vport_context_in, in,
153 esw_vport_context.cvlan_id, vlan);
154 }
155
156 MLX5_SET(modify_esw_vport_context_in, in,
157 field_select.vport_cvlan_strip, 1);
158 MLX5_SET(modify_esw_vport_context_in, in,
159 field_select.vport_cvlan_insert, 1);
160
161 return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
162 }
163
164 /* HW L2 Table (MPFS) management */
165 static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
166 u8 *mac, u8 vlan_valid, u16 vlan)
167 {
168 u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0};
169 u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
170 u8 *in_mac_addr;
171
172 MLX5_SET(set_l2_table_entry_in, in, opcode,
173 MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
174 MLX5_SET(set_l2_table_entry_in, in, table_index, index);
175 MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid);
176 MLX5_SET(set_l2_table_entry_in, in, vlan, vlan);
177
178 in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
179 ether_addr_copy(&in_mac_addr[2], mac);
180
181 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
182 }
183
184 static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
185 {
186 u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0};
187 u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
188
189 MLX5_SET(delete_l2_table_entry_in, in, opcode,
190 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
191 MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
192 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
193 }
194
195 static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
196 {
197 int err = 0;
198
199 *ix = find_first_zero_bit(l2_table->bitmap, l2_table->size);
200 if (*ix >= l2_table->size)
201 err = -ENOSPC;
202 else
203 __set_bit(*ix, l2_table->bitmap);
204
205 return err;
206 }
207
208 static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix)
209 {
210 __clear_bit(ix, l2_table->bitmap);
211 }
212
213 static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac,
214 u8 vlan_valid, u16 vlan,
215 u32 *index)
216 {
217 struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
218 int err;
219
220 err = alloc_l2_table_index(l2_table, index);
221 if (err)
222 return err;
223
224 err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan);
225 if (err)
226 free_l2_table_index(l2_table, *index);
227
228 return err;
229 }
230
231 static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
232 {
233 struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
234
235 del_l2_table_entry_cmd(dev, index);
236 free_l2_table_index(l2_table, index);
237 }
238
239 /* E-Switch FDB */
240 static struct mlx5_flow_rule *
241 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
242 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
243 {
244 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
245 MLX5_MATCH_OUTER_HEADERS);
246 struct mlx5_flow_rule *flow_rule = NULL;
247 struct mlx5_flow_destination dest;
248 struct mlx5_flow_spec *spec;
249 void *mv_misc = NULL;
250 void *mc_misc = NULL;
251 u8 *dmac_v = NULL;
252 u8 *dmac_c = NULL;
253
254 if (rx_rule)
255 match_header |= MLX5_MATCH_MISC_PARAMETERS;
256
257 spec = mlx5_vzalloc(sizeof(*spec));
258 if (!spec) {
259 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
260 return NULL;
261 }
262 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
263 outer_headers.dmac_47_16);
264 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
265 outer_headers.dmac_47_16);
266
267 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
268 ether_addr_copy(dmac_v, mac_v);
269 ether_addr_copy(dmac_c, mac_c);
270 }
271
272 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
273 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
274 misc_parameters);
275 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
276 misc_parameters);
277 MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
278 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
279 }
280
281 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
282 dest.vport_num = vport;
283
284 esw_debug(esw->dev,
285 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
286 dmac_v, dmac_c, vport);
287 spec->match_criteria_enable = match_header;
288 flow_rule =
289 mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
290 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
291 0, &dest);
292 if (IS_ERR(flow_rule)) {
293 esw_warn(esw->dev,
294 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
295 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
296 flow_rule = NULL;
297 }
298
299 kvfree(spec);
300 return flow_rule;
301 }
302
303 static struct mlx5_flow_rule *
304 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
305 {
306 u8 mac_c[ETH_ALEN];
307
308 eth_broadcast_addr(mac_c);
309 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
310 }
311
312 static struct mlx5_flow_rule *
313 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
314 {
315 u8 mac_c[ETH_ALEN];
316 u8 mac_v[ETH_ALEN];
317
318 eth_zero_addr(mac_c);
319 eth_zero_addr(mac_v);
320 mac_c[0] = 0x01;
321 mac_v[0] = 0x01;
322 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
323 }
324
325 static struct mlx5_flow_rule *
326 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
327 {
328 u8 mac_c[ETH_ALEN];
329 u8 mac_v[ETH_ALEN];
330
331 eth_zero_addr(mac_c);
332 eth_zero_addr(mac_v);
333 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
334 }
335
336 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
337 {
338 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
339 struct mlx5_core_dev *dev = esw->dev;
340 struct mlx5_flow_namespace *root_ns;
341 struct mlx5_flow_table *fdb;
342 struct mlx5_flow_group *g;
343 void *match_criteria;
344 int table_size;
345 u32 *flow_group_in;
346 u8 *dmac;
347 int err = 0;
348
349 esw_debug(dev, "Create FDB log_max_size(%d)\n",
350 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
351
352 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
353 if (!root_ns) {
354 esw_warn(dev, "Failed to get FDB flow namespace\n");
355 return -ENOMEM;
356 }
357
358 flow_group_in = mlx5_vzalloc(inlen);
359 if (!flow_group_in)
360 return -ENOMEM;
361 memset(flow_group_in, 0, inlen);
362
363 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
364 fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
365 if (IS_ERR(fdb)) {
366 err = PTR_ERR(fdb);
367 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
368 goto out;
369 }
370 esw->fdb_table.fdb = fdb;
371
372 /* Addresses group : Full match unicast/multicast addresses */
373 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
374 MLX5_MATCH_OUTER_HEADERS);
375 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
376 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
377 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
378 /* Preserve 2 entries for allmulti and promisc rules*/
379 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
380 eth_broadcast_addr(dmac);
381 g = mlx5_create_flow_group(fdb, flow_group_in);
382 if (IS_ERR(g)) {
383 err = PTR_ERR(g);
384 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
385 goto out;
386 }
387 esw->fdb_table.legacy.addr_grp = g;
388
389 /* Allmulti group : One rule that forwards any mcast traffic */
390 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
391 MLX5_MATCH_OUTER_HEADERS);
392 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
393 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
394 eth_zero_addr(dmac);
395 dmac[0] = 0x01;
396 g = mlx5_create_flow_group(fdb, flow_group_in);
397 if (IS_ERR(g)) {
398 err = PTR_ERR(g);
399 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
400 goto out;
401 }
402 esw->fdb_table.legacy.allmulti_grp = g;
403
404 /* Promiscuous group :
405 * One rule that forward all unmatched traffic from previous groups
406 */
407 eth_zero_addr(dmac);
408 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
409 MLX5_MATCH_MISC_PARAMETERS);
410 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
411 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
412 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
413 g = mlx5_create_flow_group(fdb, flow_group_in);
414 if (IS_ERR(g)) {
415 err = PTR_ERR(g);
416 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
417 goto out;
418 }
419 esw->fdb_table.legacy.promisc_grp = g;
420
421 out:
422 if (err) {
423 if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
424 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
425 esw->fdb_table.legacy.allmulti_grp = NULL;
426 }
427 if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
428 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
429 esw->fdb_table.legacy.addr_grp = NULL;
430 }
431 if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
432 mlx5_destroy_flow_table(esw->fdb_table.fdb);
433 esw->fdb_table.fdb = NULL;
434 }
435 }
436
437 kvfree(flow_group_in);
438 return err;
439 }
440
441 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
442 {
443 if (!esw->fdb_table.fdb)
444 return;
445
446 esw_debug(esw->dev, "Destroy FDB Table\n");
447 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
448 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
449 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
450 mlx5_destroy_flow_table(esw->fdb_table.fdb);
451 esw->fdb_table.fdb = NULL;
452 esw->fdb_table.legacy.addr_grp = NULL;
453 esw->fdb_table.legacy.allmulti_grp = NULL;
454 esw->fdb_table.legacy.promisc_grp = NULL;
455 }
456
457 /* E-Switch vport UC/MC lists management */
458 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
459 struct vport_addr *vaddr);
460
461 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
462 {
463 struct hlist_head *hash = esw->l2_table.l2_hash;
464 struct esw_uc_addr *esw_uc;
465 u8 *mac = vaddr->node.addr;
466 u32 vport = vaddr->vport;
467 int err;
468
469 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
470 if (esw_uc) {
471 esw_warn(esw->dev,
472 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
473 mac, vport, esw_uc->vport);
474 return -EEXIST;
475 }
476
477 esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
478 if (!esw_uc)
479 return -ENOMEM;
480 esw_uc->vport = vport;
481
482 err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index);
483 if (err)
484 goto abort;
485
486 /* SRIOV is enabled: Forward UC MAC to vport */
487 if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
488 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
489
490 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
491 vport, mac, esw_uc->table_index, vaddr->flow_rule);
492 return err;
493 abort:
494 l2addr_hash_del(esw_uc);
495 return err;
496 }
497
498 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
499 {
500 struct hlist_head *hash = esw->l2_table.l2_hash;
501 struct esw_uc_addr *esw_uc;
502 u8 *mac = vaddr->node.addr;
503 u32 vport = vaddr->vport;
504
505 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
506 if (!esw_uc || esw_uc->vport != vport) {
507 esw_debug(esw->dev,
508 "MAC(%pM) doesn't belong to vport (%d)\n",
509 mac, vport);
510 return -EINVAL;
511 }
512 esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
513 vport, mac, esw_uc->table_index, vaddr->flow_rule);
514
515 del_l2_table_entry(esw->dev, esw_uc->table_index);
516
517 if (vaddr->flow_rule)
518 mlx5_del_flow_rule(vaddr->flow_rule);
519 vaddr->flow_rule = NULL;
520
521 l2addr_hash_del(esw_uc);
522 return 0;
523 }
524
525 static void update_allmulti_vports(struct mlx5_eswitch *esw,
526 struct vport_addr *vaddr,
527 struct esw_mc_addr *esw_mc)
528 {
529 u8 *mac = vaddr->node.addr;
530 u32 vport_idx = 0;
531
532 for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
533 struct mlx5_vport *vport = &esw->vports[vport_idx];
534 struct hlist_head *vport_hash = vport->mc_list;
535 struct vport_addr *iter_vaddr =
536 l2addr_hash_find(vport_hash,
537 mac,
538 struct vport_addr);
539 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
540 vaddr->vport == vport_idx)
541 continue;
542 switch (vaddr->action) {
543 case MLX5_ACTION_ADD:
544 if (iter_vaddr)
545 continue;
546 iter_vaddr = l2addr_hash_add(vport_hash, mac,
547 struct vport_addr,
548 GFP_KERNEL);
549 if (!iter_vaddr) {
550 esw_warn(esw->dev,
551 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
552 mac, vport_idx);
553 continue;
554 }
555 iter_vaddr->vport = vport_idx;
556 iter_vaddr->flow_rule =
557 esw_fdb_set_vport_rule(esw,
558 mac,
559 vport_idx);
560 iter_vaddr->mc_promisc = true;
561 break;
562 case MLX5_ACTION_DEL:
563 if (!iter_vaddr)
564 continue;
565 mlx5_del_flow_rule(iter_vaddr->flow_rule);
566 l2addr_hash_del(iter_vaddr);
567 break;
568 }
569 }
570 }
571
572 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
573 {
574 struct hlist_head *hash = esw->mc_table;
575 struct esw_mc_addr *esw_mc;
576 u8 *mac = vaddr->node.addr;
577 u32 vport = vaddr->vport;
578
579 if (!esw->fdb_table.fdb)
580 return 0;
581
582 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
583 if (esw_mc)
584 goto add;
585
586 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
587 if (!esw_mc)
588 return -ENOMEM;
589
590 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
591 esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
592
593 /* Add this multicast mac to all the mc promiscuous vports */
594 update_allmulti_vports(esw, vaddr, esw_mc);
595
596 add:
597 /* If the multicast mac is added as a result of mc promiscuous vport,
598 * don't increment the multicast ref count
599 */
600 if (!vaddr->mc_promisc)
601 esw_mc->refcnt++;
602
603 /* Forward MC MAC to vport */
604 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
605 esw_debug(esw->dev,
606 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
607 vport, mac, vaddr->flow_rule,
608 esw_mc->refcnt, esw_mc->uplink_rule);
609 return 0;
610 }
611
612 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
613 {
614 struct hlist_head *hash = esw->mc_table;
615 struct esw_mc_addr *esw_mc;
616 u8 *mac = vaddr->node.addr;
617 u32 vport = vaddr->vport;
618
619 if (!esw->fdb_table.fdb)
620 return 0;
621
622 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
623 if (!esw_mc) {
624 esw_warn(esw->dev,
625 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
626 mac, vport);
627 return -EINVAL;
628 }
629 esw_debug(esw->dev,
630 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
631 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
632 esw_mc->uplink_rule);
633
634 if (vaddr->flow_rule)
635 mlx5_del_flow_rule(vaddr->flow_rule);
636 vaddr->flow_rule = NULL;
637
638 /* If the multicast mac is added as a result of mc promiscuous vport,
639 * don't decrement the multicast ref count.
640 */
641 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
642 return 0;
643
644 /* Remove this multicast mac from all the mc promiscuous vports */
645 update_allmulti_vports(esw, vaddr, esw_mc);
646
647 if (esw_mc->uplink_rule)
648 mlx5_del_flow_rule(esw_mc->uplink_rule);
649
650 l2addr_hash_del(esw_mc);
651 return 0;
652 }
653
654 /* Apply vport UC/MC list to HW l2 table and FDB table */
655 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
656 u32 vport_num, int list_type)
657 {
658 struct mlx5_vport *vport = &esw->vports[vport_num];
659 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
660 vport_addr_action vport_addr_add;
661 vport_addr_action vport_addr_del;
662 struct vport_addr *addr;
663 struct l2addr_node *node;
664 struct hlist_head *hash;
665 struct hlist_node *tmp;
666 int hi;
667
668 vport_addr_add = is_uc ? esw_add_uc_addr :
669 esw_add_mc_addr;
670 vport_addr_del = is_uc ? esw_del_uc_addr :
671 esw_del_mc_addr;
672
673 hash = is_uc ? vport->uc_list : vport->mc_list;
674 for_each_l2hash_node(node, tmp, hash, hi) {
675 addr = container_of(node, struct vport_addr, node);
676 switch (addr->action) {
677 case MLX5_ACTION_ADD:
678 vport_addr_add(esw, addr);
679 addr->action = MLX5_ACTION_NONE;
680 break;
681 case MLX5_ACTION_DEL:
682 vport_addr_del(esw, addr);
683 l2addr_hash_del(addr);
684 break;
685 }
686 }
687 }
688
689 /* Sync vport UC/MC list from vport context */
690 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
691 u32 vport_num, int list_type)
692 {
693 struct mlx5_vport *vport = &esw->vports[vport_num];
694 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
695 u8 (*mac_list)[ETH_ALEN];
696 struct l2addr_node *node;
697 struct vport_addr *addr;
698 struct hlist_head *hash;
699 struct hlist_node *tmp;
700 int size;
701 int err;
702 int hi;
703 int i;
704
705 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
706 MLX5_MAX_MC_PER_VPORT(esw->dev);
707
708 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
709 if (!mac_list)
710 return;
711
712 hash = is_uc ? vport->uc_list : vport->mc_list;
713
714 for_each_l2hash_node(node, tmp, hash, hi) {
715 addr = container_of(node, struct vport_addr, node);
716 addr->action = MLX5_ACTION_DEL;
717 }
718
719 if (!vport->enabled)
720 goto out;
721
722 err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
723 mac_list, &size);
724 if (err)
725 goto out;
726 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
727 vport_num, is_uc ? "UC" : "MC", size);
728
729 for (i = 0; i < size; i++) {
730 if (is_uc && !is_valid_ether_addr(mac_list[i]))
731 continue;
732
733 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
734 continue;
735
736 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
737 if (addr) {
738 addr->action = MLX5_ACTION_NONE;
739 /* If this mac was previously added because of allmulti
740 * promiscuous rx mode, its now converted to be original
741 * vport mac.
742 */
743 if (addr->mc_promisc) {
744 struct esw_mc_addr *esw_mc =
745 l2addr_hash_find(esw->mc_table,
746 mac_list[i],
747 struct esw_mc_addr);
748 if (!esw_mc) {
749 esw_warn(esw->dev,
750 "Failed to MAC(%pM) in mcast DB\n",
751 mac_list[i]);
752 continue;
753 }
754 esw_mc->refcnt++;
755 addr->mc_promisc = false;
756 }
757 continue;
758 }
759
760 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
761 GFP_KERNEL);
762 if (!addr) {
763 esw_warn(esw->dev,
764 "Failed to add MAC(%pM) to vport[%d] DB\n",
765 mac_list[i], vport_num);
766 continue;
767 }
768 addr->vport = vport_num;
769 addr->action = MLX5_ACTION_ADD;
770 }
771 out:
772 kfree(mac_list);
773 }
774
775 /* Sync vport UC/MC list from vport context
776 * Must be called after esw_update_vport_addr_list
777 */
778 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
779 {
780 struct mlx5_vport *vport = &esw->vports[vport_num];
781 struct l2addr_node *node;
782 struct vport_addr *addr;
783 struct hlist_head *hash;
784 struct hlist_node *tmp;
785 int hi;
786
787 hash = vport->mc_list;
788
789 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
790 u8 *mac = node->addr;
791
792 addr = l2addr_hash_find(hash, mac, struct vport_addr);
793 if (addr) {
794 if (addr->action == MLX5_ACTION_DEL)
795 addr->action = MLX5_ACTION_NONE;
796 continue;
797 }
798 addr = l2addr_hash_add(hash, mac, struct vport_addr,
799 GFP_KERNEL);
800 if (!addr) {
801 esw_warn(esw->dev,
802 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
803 mac, vport_num);
804 continue;
805 }
806 addr->vport = vport_num;
807 addr->action = MLX5_ACTION_ADD;
808 addr->mc_promisc = true;
809 }
810 }
811
812 /* Apply vport rx mode to HW FDB table */
813 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
814 bool promisc, bool mc_promisc)
815 {
816 struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
817 struct mlx5_vport *vport = &esw->vports[vport_num];
818
819 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
820 goto promisc;
821
822 if (mc_promisc) {
823 vport->allmulti_rule =
824 esw_fdb_set_vport_allmulti_rule(esw, vport_num);
825 if (!allmulti_addr->uplink_rule)
826 allmulti_addr->uplink_rule =
827 esw_fdb_set_vport_allmulti_rule(esw,
828 UPLINK_VPORT);
829 allmulti_addr->refcnt++;
830 } else if (vport->allmulti_rule) {
831 mlx5_del_flow_rule(vport->allmulti_rule);
832 vport->allmulti_rule = NULL;
833
834 if (--allmulti_addr->refcnt > 0)
835 goto promisc;
836
837 if (allmulti_addr->uplink_rule)
838 mlx5_del_flow_rule(allmulti_addr->uplink_rule);
839 allmulti_addr->uplink_rule = NULL;
840 }
841
842 promisc:
843 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
844 return;
845
846 if (promisc) {
847 vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
848 vport_num);
849 } else if (vport->promisc_rule) {
850 mlx5_del_flow_rule(vport->promisc_rule);
851 vport->promisc_rule = NULL;
852 }
853 }
854
855 /* Sync vport rx mode from vport context */
856 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
857 {
858 struct mlx5_vport *vport = &esw->vports[vport_num];
859 int promisc_all = 0;
860 int promisc_uc = 0;
861 int promisc_mc = 0;
862 int err;
863
864 err = mlx5_query_nic_vport_promisc(esw->dev,
865 vport_num,
866 &promisc_uc,
867 &promisc_mc,
868 &promisc_all);
869 if (err)
870 return;
871 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
872 vport_num, promisc_all, promisc_mc);
873
874 if (!vport->info.trusted || !vport->enabled) {
875 promisc_uc = 0;
876 promisc_mc = 0;
877 promisc_all = 0;
878 }
879
880 esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
881 (promisc_all || promisc_mc));
882 }
883
884 static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
885 {
886 struct mlx5_core_dev *dev = vport->dev;
887 struct mlx5_eswitch *esw = dev->priv.eswitch;
888 u8 mac[ETH_ALEN];
889
890 mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
891 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
892 vport->vport, mac);
893
894 if (vport->enabled_events & UC_ADDR_CHANGE) {
895 esw_update_vport_addr_list(esw, vport->vport,
896 MLX5_NVPRT_LIST_TYPE_UC);
897 esw_apply_vport_addr_list(esw, vport->vport,
898 MLX5_NVPRT_LIST_TYPE_UC);
899 }
900
901 if (vport->enabled_events & MC_ADDR_CHANGE) {
902 esw_update_vport_addr_list(esw, vport->vport,
903 MLX5_NVPRT_LIST_TYPE_MC);
904 }
905
906 if (vport->enabled_events & PROMISC_CHANGE) {
907 esw_update_vport_rx_mode(esw, vport->vport);
908 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
909 esw_update_vport_mc_promisc(esw, vport->vport);
910 }
911
912 if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
913 esw_apply_vport_addr_list(esw, vport->vport,
914 MLX5_NVPRT_LIST_TYPE_MC);
915 }
916
917 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
918 if (vport->enabled)
919 arm_vport_context_events_cmd(dev, vport->vport,
920 vport->enabled_events);
921 }
922
923 static void esw_vport_change_handler(struct work_struct *work)
924 {
925 struct mlx5_vport *vport =
926 container_of(work, struct mlx5_vport, vport_change_handler);
927 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
928
929 mutex_lock(&esw->state_lock);
930 esw_vport_change_handle_locked(vport);
931 mutex_unlock(&esw->state_lock);
932 }
933
934 static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
935 struct mlx5_vport *vport)
936 {
937 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
938 struct mlx5_flow_group *vlan_grp = NULL;
939 struct mlx5_flow_group *drop_grp = NULL;
940 struct mlx5_core_dev *dev = esw->dev;
941 struct mlx5_flow_namespace *root_ns;
942 struct mlx5_flow_table *acl;
943 void *match_criteria;
944 u32 *flow_group_in;
945 /* The egress acl table contains 2 rules:
946 * 1)Allow traffic with vlan_tag=vst_vlan_id
947 * 2)Drop all other traffic.
948 */
949 int table_size = 2;
950 int err = 0;
951
952 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
953 !IS_ERR_OR_NULL(vport->egress.acl))
954 return;
955
956 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
957 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
958
959 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
960 if (!root_ns) {
961 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
962 return;
963 }
964
965 flow_group_in = mlx5_vzalloc(inlen);
966 if (!flow_group_in)
967 return;
968
969 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
970 if (IS_ERR(acl)) {
971 err = PTR_ERR(acl);
972 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
973 vport->vport, err);
974 goto out;
975 }
976
977 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
978 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
979 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
980 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
981 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
982 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
983
984 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
985 if (IS_ERR(vlan_grp)) {
986 err = PTR_ERR(vlan_grp);
987 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
988 vport->vport, err);
989 goto out;
990 }
991
992 memset(flow_group_in, 0, inlen);
993 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
994 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
995 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
996 if (IS_ERR(drop_grp)) {
997 err = PTR_ERR(drop_grp);
998 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
999 vport->vport, err);
1000 goto out;
1001 }
1002
1003 vport->egress.acl = acl;
1004 vport->egress.drop_grp = drop_grp;
1005 vport->egress.allowed_vlans_grp = vlan_grp;
1006 out:
1007 kvfree(flow_group_in);
1008 if (err && !IS_ERR_OR_NULL(vlan_grp))
1009 mlx5_destroy_flow_group(vlan_grp);
1010 if (err && !IS_ERR_OR_NULL(acl))
1011 mlx5_destroy_flow_table(acl);
1012 }
1013
1014 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
1015 struct mlx5_vport *vport)
1016 {
1017 if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
1018 mlx5_del_flow_rule(vport->egress.allowed_vlan);
1019
1020 if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
1021 mlx5_del_flow_rule(vport->egress.drop_rule);
1022
1023 vport->egress.allowed_vlan = NULL;
1024 vport->egress.drop_rule = NULL;
1025 }
1026
1027 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1028 struct mlx5_vport *vport)
1029 {
1030 if (IS_ERR_OR_NULL(vport->egress.acl))
1031 return;
1032
1033 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
1034
1035 esw_vport_cleanup_egress_rules(esw, vport);
1036 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
1037 mlx5_destroy_flow_group(vport->egress.drop_grp);
1038 mlx5_destroy_flow_table(vport->egress.acl);
1039 vport->egress.allowed_vlans_grp = NULL;
1040 vport->egress.drop_grp = NULL;
1041 vport->egress.acl = NULL;
1042 }
1043
1044 static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1045 struct mlx5_vport *vport)
1046 {
1047 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1048 struct mlx5_core_dev *dev = esw->dev;
1049 struct mlx5_flow_namespace *root_ns;
1050 struct mlx5_flow_table *acl;
1051 struct mlx5_flow_group *g;
1052 void *match_criteria;
1053 u32 *flow_group_in;
1054 /* The ingress acl table contains 4 groups
1055 * (2 active rules at the same time -
1056 * 1 allow rule from one of the first 3 groups.
1057 * 1 drop rule from the last group):
1058 * 1)Allow untagged traffic with smac=original mac.
1059 * 2)Allow untagged traffic.
1060 * 3)Allow traffic with smac=original mac.
1061 * 4)Drop all other traffic.
1062 */
1063 int table_size = 4;
1064 int err = 0;
1065
1066 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
1067 !IS_ERR_OR_NULL(vport->ingress.acl))
1068 return;
1069
1070 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
1071 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
1072
1073 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
1074 if (!root_ns) {
1075 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
1076 return;
1077 }
1078
1079 flow_group_in = mlx5_vzalloc(inlen);
1080 if (!flow_group_in)
1081 return;
1082
1083 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1084 if (IS_ERR(acl)) {
1085 err = PTR_ERR(acl);
1086 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
1087 vport->vport, err);
1088 goto out;
1089 }
1090 vport->ingress.acl = acl;
1091
1092 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1093
1094 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1095 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
1096 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1097 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1098 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1099 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1100
1101 g = mlx5_create_flow_group(acl, flow_group_in);
1102 if (IS_ERR(g)) {
1103 err = PTR_ERR(g);
1104 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
1105 vport->vport, err);
1106 goto out;
1107 }
1108 vport->ingress.allow_untagged_spoofchk_grp = g;
1109
1110 memset(flow_group_in, 0, inlen);
1111 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1112 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
1113 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1114 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1115
1116 g = mlx5_create_flow_group(acl, flow_group_in);
1117 if (IS_ERR(g)) {
1118 err = PTR_ERR(g);
1119 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
1120 vport->vport, err);
1121 goto out;
1122 }
1123 vport->ingress.allow_untagged_only_grp = g;
1124
1125 memset(flow_group_in, 0, inlen);
1126 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1127 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1128 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1129 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
1130 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
1131
1132 g = mlx5_create_flow_group(acl, flow_group_in);
1133 if (IS_ERR(g)) {
1134 err = PTR_ERR(g);
1135 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
1136 vport->vport, err);
1137 goto out;
1138 }
1139 vport->ingress.allow_spoofchk_only_grp = g;
1140
1141 memset(flow_group_in, 0, inlen);
1142 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
1143 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
1144
1145 g = mlx5_create_flow_group(acl, flow_group_in);
1146 if (IS_ERR(g)) {
1147 err = PTR_ERR(g);
1148 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
1149 vport->vport, err);
1150 goto out;
1151 }
1152 vport->ingress.drop_grp = g;
1153
1154 out:
1155 if (err) {
1156 if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
1157 mlx5_destroy_flow_group(
1158 vport->ingress.allow_spoofchk_only_grp);
1159 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
1160 mlx5_destroy_flow_group(
1161 vport->ingress.allow_untagged_only_grp);
1162 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
1163 mlx5_destroy_flow_group(
1164 vport->ingress.allow_untagged_spoofchk_grp);
1165 if (!IS_ERR_OR_NULL(vport->ingress.acl))
1166 mlx5_destroy_flow_table(vport->ingress.acl);
1167 }
1168
1169 kvfree(flow_group_in);
1170 }
1171
1172 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1173 struct mlx5_vport *vport)
1174 {
1175 if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
1176 mlx5_del_flow_rule(vport->ingress.drop_rule);
1177
1178 if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
1179 mlx5_del_flow_rule(vport->ingress.allow_rule);
1180
1181 vport->ingress.drop_rule = NULL;
1182 vport->ingress.allow_rule = NULL;
1183 }
1184
1185 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1186 struct mlx5_vport *vport)
1187 {
1188 if (IS_ERR_OR_NULL(vport->ingress.acl))
1189 return;
1190
1191 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
1192
1193 esw_vport_cleanup_ingress_rules(esw, vport);
1194 mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
1195 mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
1196 mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
1197 mlx5_destroy_flow_group(vport->ingress.drop_grp);
1198 mlx5_destroy_flow_table(vport->ingress.acl);
1199 vport->ingress.acl = NULL;
1200 vport->ingress.drop_grp = NULL;
1201 vport->ingress.allow_spoofchk_only_grp = NULL;
1202 vport->ingress.allow_untagged_only_grp = NULL;
1203 vport->ingress.allow_untagged_spoofchk_grp = NULL;
1204 }
1205
1206 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1207 struct mlx5_vport *vport)
1208 {
1209 struct mlx5_flow_spec *spec;
1210 int err = 0;
1211 u8 *smac_v;
1212
1213 if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
1214 mlx5_core_warn(esw->dev,
1215 "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
1216 vport->vport);
1217 return -EPERM;
1218
1219 }
1220
1221 esw_vport_cleanup_ingress_rules(esw, vport);
1222
1223 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1224 esw_vport_disable_ingress_acl(esw, vport);
1225 return 0;
1226 }
1227
1228 esw_vport_enable_ingress_acl(esw, vport);
1229
1230 esw_debug(esw->dev,
1231 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1232 vport->vport, vport->info.vlan, vport->info.qos);
1233
1234 spec = mlx5_vzalloc(sizeof(*spec));
1235 if (!spec) {
1236 err = -ENOMEM;
1237 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
1238 vport->vport, err);
1239 goto out;
1240 }
1241
1242 if (vport->info.vlan || vport->info.qos)
1243 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
1244
1245 if (vport->info.spoofchk) {
1246 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1247 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1248 smac_v = MLX5_ADDR_OF(fte_match_param,
1249 spec->match_value,
1250 outer_headers.smac_47_16);
1251 ether_addr_copy(smac_v, vport->info.mac);
1252 }
1253
1254 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1255 vport->ingress.allow_rule =
1256 mlx5_add_flow_rule(vport->ingress.acl, spec,
1257 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1258 0, NULL);
1259 if (IS_ERR(vport->ingress.allow_rule)) {
1260 err = PTR_ERR(vport->ingress.allow_rule);
1261 esw_warn(esw->dev,
1262 "vport[%d] configure ingress allow rule, err(%d)\n",
1263 vport->vport, err);
1264 vport->ingress.allow_rule = NULL;
1265 goto out;
1266 }
1267
1268 memset(spec, 0, sizeof(*spec));
1269 vport->ingress.drop_rule =
1270 mlx5_add_flow_rule(vport->ingress.acl, spec,
1271 MLX5_FLOW_CONTEXT_ACTION_DROP,
1272 0, NULL);
1273 if (IS_ERR(vport->ingress.drop_rule)) {
1274 err = PTR_ERR(vport->ingress.drop_rule);
1275 esw_warn(esw->dev,
1276 "vport[%d] configure ingress drop rule, err(%d)\n",
1277 vport->vport, err);
1278 vport->ingress.drop_rule = NULL;
1279 goto out;
1280 }
1281
1282 out:
1283 if (err)
1284 esw_vport_cleanup_ingress_rules(esw, vport);
1285 kvfree(spec);
1286 return err;
1287 }
1288
1289 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1290 struct mlx5_vport *vport)
1291 {
1292 struct mlx5_flow_spec *spec;
1293 int err = 0;
1294
1295 esw_vport_cleanup_egress_rules(esw, vport);
1296
1297 if (!vport->info.vlan && !vport->info.qos) {
1298 esw_vport_disable_egress_acl(esw, vport);
1299 return 0;
1300 }
1301
1302 esw_vport_enable_egress_acl(esw, vport);
1303
1304 esw_debug(esw->dev,
1305 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1306 vport->vport, vport->info.vlan, vport->info.qos);
1307
1308 spec = mlx5_vzalloc(sizeof(*spec));
1309 if (!spec) {
1310 err = -ENOMEM;
1311 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
1312 vport->vport, err);
1313 goto out;
1314 }
1315
1316 /* Allowed vlan rule */
1317 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
1318 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
1319 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1320 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
1321
1322 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1323 vport->egress.allowed_vlan =
1324 mlx5_add_flow_rule(vport->egress.acl, spec,
1325 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1326 0, NULL);
1327 if (IS_ERR(vport->egress.allowed_vlan)) {
1328 err = PTR_ERR(vport->egress.allowed_vlan);
1329 esw_warn(esw->dev,
1330 "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1331 vport->vport, err);
1332 vport->egress.allowed_vlan = NULL;
1333 goto out;
1334 }
1335
1336 /* Drop others rule (star rule) */
1337 memset(spec, 0, sizeof(*spec));
1338 vport->egress.drop_rule =
1339 mlx5_add_flow_rule(vport->egress.acl, spec,
1340 MLX5_FLOW_CONTEXT_ACTION_DROP,
1341 0, NULL);
1342 if (IS_ERR(vport->egress.drop_rule)) {
1343 err = PTR_ERR(vport->egress.drop_rule);
1344 esw_warn(esw->dev,
1345 "vport[%d] configure egress drop rule failed, err(%d)\n",
1346 vport->vport, err);
1347 vport->egress.drop_rule = NULL;
1348 }
1349 out:
1350 kvfree(spec);
1351 return err;
1352 }
1353
1354 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1355 {
1356 ((u8 *)node_guid)[7] = mac[0];
1357 ((u8 *)node_guid)[6] = mac[1];
1358 ((u8 *)node_guid)[5] = mac[2];
1359 ((u8 *)node_guid)[4] = 0xff;
1360 ((u8 *)node_guid)[3] = 0xfe;
1361 ((u8 *)node_guid)[2] = mac[3];
1362 ((u8 *)node_guid)[1] = mac[4];
1363 ((u8 *)node_guid)[0] = mac[5];
1364 }
1365
1366 static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
1367 struct mlx5_vport *vport)
1368 {
1369 int vport_num = vport->vport;
1370
1371 if (!vport_num)
1372 return;
1373
1374 mlx5_modify_vport_admin_state(esw->dev,
1375 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1376 vport_num,
1377 vport->info.link_state);
1378 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
1379 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
1380 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
1381 (vport->info.vlan || vport->info.qos));
1382
1383 /* Only legacy mode needs ACLs */
1384 if (esw->mode == SRIOV_LEGACY) {
1385 esw_vport_ingress_config(esw, vport);
1386 esw_vport_egress_config(esw, vport);
1387 }
1388 }
1389 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1390 int enable_events)
1391 {
1392 struct mlx5_vport *vport = &esw->vports[vport_num];
1393
1394 mutex_lock(&esw->state_lock);
1395 WARN_ON(vport->enabled);
1396
1397 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1398
1399 /* Restore old vport configuration */
1400 esw_apply_vport_conf(esw, vport);
1401
1402 /* Sync with current vport context */
1403 vport->enabled_events = enable_events;
1404 vport->enabled = true;
1405
1406 /* only PF is trusted by default */
1407 if (!vport_num)
1408 vport->info.trusted = true;
1409
1410 esw_vport_change_handle_locked(vport);
1411
1412 esw->enabled_vports++;
1413 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1414 mutex_unlock(&esw->state_lock);
1415 }
1416
1417 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1418 {
1419 struct mlx5_vport *vport = &esw->vports[vport_num];
1420
1421 if (!vport->enabled)
1422 return;
1423
1424 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1425 /* Mark this vport as disabled to discard new events */
1426 vport->enabled = false;
1427
1428 synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
1429 /* Wait for current already scheduled events to complete */
1430 flush_workqueue(esw->work_queue);
1431 /* Disable events from this vport */
1432 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1433 mutex_lock(&esw->state_lock);
1434 /* We don't assume VFs will cleanup after themselves.
1435 * Calling vport change handler while vport is disabled will cleanup
1436 * the vport resources.
1437 */
1438 esw_vport_change_handle_locked(vport);
1439 vport->enabled_events = 0;
1440
1441 if (vport_num && esw->mode == SRIOV_LEGACY) {
1442 mlx5_modify_vport_admin_state(esw->dev,
1443 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1444 vport_num,
1445 MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
1446 esw_vport_disable_egress_acl(esw, vport);
1447 esw_vport_disable_ingress_acl(esw, vport);
1448 }
1449 esw->enabled_vports--;
1450 mutex_unlock(&esw->state_lock);
1451 }
1452
1453 /* Public E-Switch API */
1454 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1455 {
1456 int err;
1457 int i, enabled_events;
1458
1459 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1460 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1461 return 0;
1462
1463 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1464 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1465 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1466 return -ENOTSUPP;
1467 }
1468
1469 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1470 esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1471
1472 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1473 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1474
1475 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
1476 esw->mode = mode;
1477 esw_disable_vport(esw, 0);
1478
1479 if (mode == SRIOV_LEGACY)
1480 err = esw_create_legacy_fdb_table(esw, nvfs + 1);
1481 else
1482 err = esw_offloads_init(esw, nvfs + 1);
1483 if (err)
1484 goto abort;
1485
1486 enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE;
1487 for (i = 0; i <= nvfs; i++)
1488 esw_enable_vport(esw, i, enabled_events);
1489
1490 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1491 esw->enabled_vports);
1492 return 0;
1493
1494 abort:
1495 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1496 esw->mode = SRIOV_NONE;
1497 return err;
1498 }
1499
1500 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1501 {
1502 struct esw_mc_addr *mc_promisc;
1503 int nvports;
1504 int i;
1505
1506 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1507 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1508 return;
1509
1510 esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
1511 esw->enabled_vports, esw->mode);
1512
1513 mc_promisc = esw->mc_promisc;
1514 nvports = esw->enabled_vports;
1515
1516 for (i = 0; i < esw->total_vports; i++)
1517 esw_disable_vport(esw, i);
1518
1519 if (mc_promisc && mc_promisc->uplink_rule)
1520 mlx5_del_flow_rule(mc_promisc->uplink_rule);
1521
1522 if (esw->mode == SRIOV_LEGACY)
1523 esw_destroy_legacy_fdb_table(esw);
1524 else if (esw->mode == SRIOV_OFFLOADS)
1525 esw_offloads_cleanup(esw, nvports);
1526
1527 esw->mode = SRIOV_NONE;
1528 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */
1529 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1530 }
1531
1532 void mlx5_eswitch_attach(struct mlx5_eswitch *esw)
1533 {
1534 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1535 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1536 return;
1537
1538 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1539 /* VF Vports will be enabled when SRIOV is enabled */
1540 }
1541
1542 void mlx5_eswitch_detach(struct mlx5_eswitch *esw)
1543 {
1544 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1545 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1546 return;
1547
1548 esw_disable_vport(esw, 0);
1549 }
1550
1551 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1552 {
1553 int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
1554 int total_vports = MLX5_TOTAL_VPORTS(dev);
1555 struct esw_mc_addr *mc_promisc;
1556 struct mlx5_eswitch *esw;
1557 int vport_num;
1558 int err;
1559
1560 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
1561 MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1562 return 0;
1563
1564 esw_info(dev,
1565 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
1566 total_vports, l2_table_size,
1567 MLX5_MAX_UC_PER_VPORT(dev),
1568 MLX5_MAX_MC_PER_VPORT(dev));
1569
1570 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1571 if (!esw)
1572 return -ENOMEM;
1573
1574 esw->dev = dev;
1575
1576 esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
1577 sizeof(uintptr_t), GFP_KERNEL);
1578 if (!esw->l2_table.bitmap) {
1579 err = -ENOMEM;
1580 goto abort;
1581 }
1582 esw->l2_table.size = l2_table_size;
1583
1584 mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
1585 if (!mc_promisc) {
1586 err = -ENOMEM;
1587 goto abort;
1588 }
1589 esw->mc_promisc = mc_promisc;
1590
1591 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1592 if (!esw->work_queue) {
1593 err = -ENOMEM;
1594 goto abort;
1595 }
1596
1597 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1598 GFP_KERNEL);
1599 if (!esw->vports) {
1600 err = -ENOMEM;
1601 goto abort;
1602 }
1603
1604 esw->offloads.vport_reps =
1605 kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
1606 GFP_KERNEL);
1607 if (!esw->offloads.vport_reps) {
1608 err = -ENOMEM;
1609 goto abort;
1610 }
1611
1612 mutex_init(&esw->state_lock);
1613
1614 for (vport_num = 0; vport_num < total_vports; vport_num++) {
1615 struct mlx5_vport *vport = &esw->vports[vport_num];
1616
1617 vport->vport = vport_num;
1618 vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
1619 vport->dev = dev;
1620 INIT_WORK(&vport->vport_change_handler,
1621 esw_vport_change_handler);
1622 }
1623
1624 esw->total_vports = total_vports;
1625 esw->enabled_vports = 0;
1626 esw->mode = SRIOV_NONE;
1627
1628 dev->priv.eswitch = esw;
1629 return 0;
1630 abort:
1631 if (esw->work_queue)
1632 destroy_workqueue(esw->work_queue);
1633 kfree(esw->l2_table.bitmap);
1634 kfree(esw->vports);
1635 kfree(esw->offloads.vport_reps);
1636 kfree(esw);
1637 return err;
1638 }
1639
1640 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1641 {
1642 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1643 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1644 return;
1645
1646 esw_info(esw->dev, "cleanup\n");
1647
1648 esw->dev->priv.eswitch = NULL;
1649 destroy_workqueue(esw->work_queue);
1650 kfree(esw->l2_table.bitmap);
1651 kfree(esw->mc_promisc);
1652 kfree(esw->offloads.vport_reps);
1653 kfree(esw->vports);
1654 kfree(esw);
1655 }
1656
1657 void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1658 {
1659 struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
1660 u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
1661 struct mlx5_vport *vport;
1662
1663 if (!esw) {
1664 pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
1665 vport_num);
1666 return;
1667 }
1668
1669 vport = &esw->vports[vport_num];
1670 if (vport->enabled)
1671 queue_work(esw->work_queue, &vport->vport_change_handler);
1672 }
1673
1674 /* Vport Administration */
1675 #define ESW_ALLOWED(esw) \
1676 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1677 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1678
1679 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1680 int vport, u8 mac[ETH_ALEN])
1681 {
1682 struct mlx5_vport *evport;
1683 u64 node_guid;
1684 int err = 0;
1685
1686 if (!ESW_ALLOWED(esw))
1687 return -EPERM;
1688 if (!LEGAL_VPORT(esw, vport))
1689 return -EINVAL;
1690
1691 mutex_lock(&esw->state_lock);
1692 evport = &esw->vports[vport];
1693
1694 if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
1695 mlx5_core_warn(esw->dev,
1696 "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
1697 vport);
1698 err = -EPERM;
1699 goto unlock;
1700 }
1701
1702 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1703 if (err) {
1704 mlx5_core_warn(esw->dev,
1705 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1706 vport, err);
1707 goto unlock;
1708 }
1709
1710 node_guid_gen_from_mac(&node_guid, mac);
1711 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1712 if (err)
1713 mlx5_core_warn(esw->dev,
1714 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1715 vport, err);
1716
1717 ether_addr_copy(evport->info.mac, mac);
1718 evport->info.node_guid = node_guid;
1719 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1720 err = esw_vport_ingress_config(esw, evport);
1721
1722 unlock:
1723 mutex_unlock(&esw->state_lock);
1724 return err;
1725 }
1726
1727 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1728 int vport, int link_state)
1729 {
1730 struct mlx5_vport *evport;
1731 int err = 0;
1732
1733 if (!ESW_ALLOWED(esw))
1734 return -EPERM;
1735 if (!LEGAL_VPORT(esw, vport))
1736 return -EINVAL;
1737
1738 mutex_lock(&esw->state_lock);
1739 evport = &esw->vports[vport];
1740
1741 err = mlx5_modify_vport_admin_state(esw->dev,
1742 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1743 vport, link_state);
1744 if (err) {
1745 mlx5_core_warn(esw->dev,
1746 "Failed to set vport %d link state, err = %d",
1747 vport, err);
1748 goto unlock;
1749 }
1750
1751 evport->info.link_state = link_state;
1752
1753 unlock:
1754 mutex_unlock(&esw->state_lock);
1755 return 0;
1756 }
1757
1758 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1759 int vport, struct ifla_vf_info *ivi)
1760 {
1761 struct mlx5_vport *evport;
1762
1763 if (!ESW_ALLOWED(esw))
1764 return -EPERM;
1765 if (!LEGAL_VPORT(esw, vport))
1766 return -EINVAL;
1767
1768 evport = &esw->vports[vport];
1769
1770 memset(ivi, 0, sizeof(*ivi));
1771 ivi->vf = vport - 1;
1772
1773 mutex_lock(&esw->state_lock);
1774 ether_addr_copy(ivi->mac, evport->info.mac);
1775 ivi->linkstate = evport->info.link_state;
1776 ivi->vlan = evport->info.vlan;
1777 ivi->qos = evport->info.qos;
1778 ivi->spoofchk = evport->info.spoofchk;
1779 ivi->trusted = evport->info.trusted;
1780 mutex_unlock(&esw->state_lock);
1781
1782 return 0;
1783 }
1784
1785 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1786 int vport, u16 vlan, u8 qos, u8 set_flags)
1787 {
1788 struct mlx5_vport *evport;
1789 int err = 0;
1790
1791 if (!ESW_ALLOWED(esw))
1792 return -EPERM;
1793 if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
1794 return -EINVAL;
1795
1796 mutex_lock(&esw->state_lock);
1797 evport = &esw->vports[vport];
1798
1799 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
1800 if (err)
1801 goto unlock;
1802
1803 evport->info.vlan = vlan;
1804 evport->info.qos = qos;
1805 if (evport->enabled && esw->mode == SRIOV_LEGACY) {
1806 err = esw_vport_ingress_config(esw, evport);
1807 if (err)
1808 goto unlock;
1809 err = esw_vport_egress_config(esw, evport);
1810 }
1811
1812 unlock:
1813 mutex_unlock(&esw->state_lock);
1814 return err;
1815 }
1816
1817 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1818 int vport, u16 vlan, u8 qos)
1819 {
1820 u8 set_flags = 0;
1821
1822 if (vlan || qos)
1823 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
1824
1825 return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
1826 }
1827
1828 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
1829 int vport, bool spoofchk)
1830 {
1831 struct mlx5_vport *evport;
1832 bool pschk;
1833 int err = 0;
1834
1835 if (!ESW_ALLOWED(esw))
1836 return -EPERM;
1837 if (!LEGAL_VPORT(esw, vport))
1838 return -EINVAL;
1839
1840 mutex_lock(&esw->state_lock);
1841 evport = &esw->vports[vport];
1842 pschk = evport->info.spoofchk;
1843 evport->info.spoofchk = spoofchk;
1844 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1845 err = esw_vport_ingress_config(esw, evport);
1846 if (err)
1847 evport->info.spoofchk = pschk;
1848 mutex_unlock(&esw->state_lock);
1849
1850 return err;
1851 }
1852
1853 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
1854 int vport, bool setting)
1855 {
1856 struct mlx5_vport *evport;
1857
1858 if (!ESW_ALLOWED(esw))
1859 return -EPERM;
1860 if (!LEGAL_VPORT(esw, vport))
1861 return -EINVAL;
1862
1863 mutex_lock(&esw->state_lock);
1864 evport = &esw->vports[vport];
1865 evport->info.trusted = setting;
1866 if (evport->enabled)
1867 esw_vport_change_handle_locked(evport);
1868 mutex_unlock(&esw->state_lock);
1869
1870 return 0;
1871 }
1872
1873 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
1874 int vport,
1875 struct ifla_vf_stats *vf_stats)
1876 {
1877 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1878 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
1879 int err = 0;
1880 u32 *out;
1881
1882 if (!ESW_ALLOWED(esw))
1883 return -EPERM;
1884 if (!LEGAL_VPORT(esw, vport))
1885 return -EINVAL;
1886
1887 out = mlx5_vzalloc(outlen);
1888 if (!out)
1889 return -ENOMEM;
1890
1891 MLX5_SET(query_vport_counter_in, in, opcode,
1892 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1893 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
1894 MLX5_SET(query_vport_counter_in, in, vport_number, vport);
1895 if (vport)
1896 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1897
1898 memset(out, 0, outlen);
1899 err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
1900 if (err)
1901 goto free_out;
1902
1903 #define MLX5_GET_CTR(p, x) \
1904 MLX5_GET64(query_vport_counter_out, p, x)
1905
1906 memset(vf_stats, 0, sizeof(*vf_stats));
1907 vf_stats->rx_packets =
1908 MLX5_GET_CTR(out, received_eth_unicast.packets) +
1909 MLX5_GET_CTR(out, received_eth_multicast.packets) +
1910 MLX5_GET_CTR(out, received_eth_broadcast.packets);
1911
1912 vf_stats->rx_bytes =
1913 MLX5_GET_CTR(out, received_eth_unicast.octets) +
1914 MLX5_GET_CTR(out, received_eth_multicast.octets) +
1915 MLX5_GET_CTR(out, received_eth_broadcast.octets);
1916
1917 vf_stats->tx_packets =
1918 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
1919 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
1920 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
1921
1922 vf_stats->tx_bytes =
1923 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
1924 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
1925 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
1926
1927 vf_stats->multicast =
1928 MLX5_GET_CTR(out, received_eth_multicast.packets);
1929
1930 vf_stats->broadcast =
1931 MLX5_GET_CTR(out, received_eth_broadcast.packets);
1932
1933 free_out:
1934 kvfree(out);
1935 return err;
1936 }