]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
Merge branch 'drm-rockchip-next-fixes-2016-03-28' of https://github.com/markyzq/kerne...
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_fs.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
38 #include "en.h"
39
40 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
41
42 enum {
43 MLX5E_FULLMATCH = 0,
44 MLX5E_ALLMULTI = 1,
45 MLX5E_PROMISC = 2,
46 };
47
48 enum {
49 MLX5E_UC = 0,
50 MLX5E_MC_IPV4 = 1,
51 MLX5E_MC_IPV6 = 2,
52 MLX5E_MC_OTHER = 3,
53 };
54
55 enum {
56 MLX5E_ACTION_NONE = 0,
57 MLX5E_ACTION_ADD = 1,
58 MLX5E_ACTION_DEL = 2,
59 };
60
61 struct mlx5e_eth_addr_hash_node {
62 struct hlist_node hlist;
63 u8 action;
64 struct mlx5e_eth_addr_info ai;
65 };
66
67 static inline int mlx5e_hash_eth_addr(u8 *addr)
68 {
69 return addr[5];
70 }
71
72 static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
73 {
74 struct mlx5e_eth_addr_hash_node *hn;
75 int ix = mlx5e_hash_eth_addr(addr);
76 int found = 0;
77
78 hlist_for_each_entry(hn, &hash[ix], hlist)
79 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
80 found = 1;
81 break;
82 }
83
84 if (found) {
85 hn->action = MLX5E_ACTION_NONE;
86 return;
87 }
88
89 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
90 if (!hn)
91 return;
92
93 ether_addr_copy(hn->ai.addr, addr);
94 hn->action = MLX5E_ACTION_ADD;
95
96 hlist_add_head(&hn->hlist, &hash[ix]);
97 }
98
99 static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
100 {
101 hlist_del(&hn->hlist);
102 kfree(hn);
103 }
104
105 static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
106 struct mlx5e_eth_addr_info *ai)
107 {
108 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
109 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
110
111 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
112 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
113
114 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
115 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
116
117 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
118 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
119
120 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
121 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
122
123 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
124 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
125
126 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
127 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
128
129 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
130 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
131
132 if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
133 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
134
135 if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
136 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
137
138 if (ai->tt_vec & BIT(MLX5E_TT_ANY))
139 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
140 }
141
142 static int mlx5e_get_eth_addr_type(u8 *addr)
143 {
144 if (is_unicast_ether_addr(addr))
145 return MLX5E_UC;
146
147 if ((addr[0] == 0x01) &&
148 (addr[1] == 0x00) &&
149 (addr[2] == 0x5e) &&
150 !(addr[3] & 0x80))
151 return MLX5E_MC_IPV4;
152
153 if ((addr[0] == 0x33) &&
154 (addr[1] == 0x33))
155 return MLX5E_MC_IPV6;
156
157 return MLX5E_MC_OTHER;
158 }
159
160 static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
161 {
162 int eth_addr_type;
163 u32 ret;
164
165 switch (type) {
166 case MLX5E_FULLMATCH:
167 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
168 switch (eth_addr_type) {
169 case MLX5E_UC:
170 ret =
171 BIT(MLX5E_TT_IPV4_TCP) |
172 BIT(MLX5E_TT_IPV6_TCP) |
173 BIT(MLX5E_TT_IPV4_UDP) |
174 BIT(MLX5E_TT_IPV6_UDP) |
175 BIT(MLX5E_TT_IPV4_IPSEC_AH) |
176 BIT(MLX5E_TT_IPV6_IPSEC_AH) |
177 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
178 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
179 BIT(MLX5E_TT_IPV4) |
180 BIT(MLX5E_TT_IPV6) |
181 BIT(MLX5E_TT_ANY) |
182 0;
183 break;
184
185 case MLX5E_MC_IPV4:
186 ret =
187 BIT(MLX5E_TT_IPV4_UDP) |
188 BIT(MLX5E_TT_IPV4) |
189 0;
190 break;
191
192 case MLX5E_MC_IPV6:
193 ret =
194 BIT(MLX5E_TT_IPV6_UDP) |
195 BIT(MLX5E_TT_IPV6) |
196 0;
197 break;
198
199 case MLX5E_MC_OTHER:
200 ret =
201 BIT(MLX5E_TT_ANY) |
202 0;
203 break;
204 }
205
206 break;
207
208 case MLX5E_ALLMULTI:
209 ret =
210 BIT(MLX5E_TT_IPV4_UDP) |
211 BIT(MLX5E_TT_IPV6_UDP) |
212 BIT(MLX5E_TT_IPV4) |
213 BIT(MLX5E_TT_IPV6) |
214 BIT(MLX5E_TT_ANY) |
215 0;
216 break;
217
218 default: /* MLX5E_PROMISC */
219 ret =
220 BIT(MLX5E_TT_IPV4_TCP) |
221 BIT(MLX5E_TT_IPV6_TCP) |
222 BIT(MLX5E_TT_IPV4_UDP) |
223 BIT(MLX5E_TT_IPV6_UDP) |
224 BIT(MLX5E_TT_IPV4_IPSEC_AH) |
225 BIT(MLX5E_TT_IPV6_IPSEC_AH) |
226 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
227 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
228 BIT(MLX5E_TT_IPV4) |
229 BIT(MLX5E_TT_IPV6) |
230 BIT(MLX5E_TT_ANY) |
231 0;
232 break;
233 }
234
235 return ret;
236 }
237
238 static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
239 struct mlx5e_eth_addr_info *ai,
240 int type, u32 *mc, u32 *mv)
241 {
242 struct mlx5_flow_destination dest;
243 u8 match_criteria_enable = 0;
244 struct mlx5_flow_rule **rule_p;
245 struct mlx5_flow_table *ft = priv->fts.main.t;
246 u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
247 outer_headers.dmac_47_16);
248 u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
249 outer_headers.dmac_47_16);
250 u32 *tirn = priv->tirn;
251 u32 tt_vec;
252 int err = 0;
253
254 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
255
256 switch (type) {
257 case MLX5E_FULLMATCH:
258 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
259 eth_broadcast_addr(mc_dmac);
260 ether_addr_copy(mv_dmac, ai->addr);
261 break;
262
263 case MLX5E_ALLMULTI:
264 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
265 mc_dmac[0] = 0x01;
266 mv_dmac[0] = 0x01;
267 break;
268
269 case MLX5E_PROMISC:
270 break;
271 }
272
273 tt_vec = mlx5e_get_tt_vec(ai, type);
274
275 if (tt_vec & BIT(MLX5E_TT_ANY)) {
276 rule_p = &ai->ft_rule[MLX5E_TT_ANY];
277 dest.tir_num = tirn[MLX5E_TT_ANY];
278 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
279 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
280 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
281 if (IS_ERR_OR_NULL(*rule_p))
282 goto err_del_ai;
283 ai->tt_vec |= BIT(MLX5E_TT_ANY);
284 }
285
286 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
287 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
288
289 if (tt_vec & BIT(MLX5E_TT_IPV4)) {
290 rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
291 dest.tir_num = tirn[MLX5E_TT_IPV4];
292 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
293 ETH_P_IP);
294 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
295 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
296 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
297 if (IS_ERR_OR_NULL(*rule_p))
298 goto err_del_ai;
299 ai->tt_vec |= BIT(MLX5E_TT_IPV4);
300 }
301
302 if (tt_vec & BIT(MLX5E_TT_IPV6)) {
303 rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
304 dest.tir_num = tirn[MLX5E_TT_IPV6];
305 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
306 ETH_P_IPV6);
307 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
308 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
309 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
310 if (IS_ERR_OR_NULL(*rule_p))
311 goto err_del_ai;
312 ai->tt_vec |= BIT(MLX5E_TT_IPV6);
313 }
314
315 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
316 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
317
318 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
319 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
320 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
321 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
322 ETH_P_IP);
323 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
324 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
325 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
326 if (IS_ERR_OR_NULL(*rule_p))
327 goto err_del_ai;
328 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
329 }
330
331 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
332 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
333 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
334 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
335 ETH_P_IPV6);
336 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
337 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
338 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
339 if (IS_ERR_OR_NULL(*rule_p))
340 goto err_del_ai;
341 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
342 }
343
344 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
345
346 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
347 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
348 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
349 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
350 ETH_P_IP);
351 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
352 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
353 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
354 if (IS_ERR_OR_NULL(*rule_p))
355 goto err_del_ai;
356 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
357 }
358
359 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
360 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
361 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
362 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
363 ETH_P_IPV6);
364 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
365 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
366 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
367 if (IS_ERR_OR_NULL(*rule_p))
368 goto err_del_ai;
369
370 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
371 }
372
373 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
374
375 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
376 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
377 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
378 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
379 ETH_P_IP);
380 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
381 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
382 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
383 if (IS_ERR_OR_NULL(*rule_p))
384 goto err_del_ai;
385 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
386 }
387
388 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
389 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
390 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
391 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
392 ETH_P_IPV6);
393 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
394 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
395 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
396 if (IS_ERR_OR_NULL(*rule_p))
397 goto err_del_ai;
398 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
399 }
400
401 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
402
403 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
404 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
405 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
406 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
407 ETH_P_IP);
408 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
409 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
410 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
411 if (IS_ERR_OR_NULL(*rule_p))
412 goto err_del_ai;
413 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
414 }
415
416 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
417 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
418 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
419 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
420 ETH_P_IPV6);
421 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
422 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
423 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
424 if (IS_ERR_OR_NULL(*rule_p))
425 goto err_del_ai;
426 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
427 }
428
429 return 0;
430
431 err_del_ai:
432 err = PTR_ERR(*rule_p);
433 *rule_p = NULL;
434 mlx5e_del_eth_addr_from_flow_table(priv, ai);
435
436 return err;
437 }
438
439 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
440 struct mlx5e_eth_addr_info *ai, int type)
441 {
442 u32 *match_criteria;
443 u32 *match_value;
444 int err = 0;
445
446 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
447 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
448 if (!match_value || !match_criteria) {
449 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
450 err = -ENOMEM;
451 goto add_eth_addr_rule_out;
452 }
453
454 err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
455 match_value);
456
457 add_eth_addr_rule_out:
458 kvfree(match_criteria);
459 kvfree(match_value);
460
461 return err;
462 }
463
464 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
465 {
466 struct net_device *ndev = priv->netdev;
467 int max_list_size;
468 int list_size;
469 u16 *vlans;
470 int vlan;
471 int err;
472 int i;
473
474 list_size = 0;
475 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
476 list_size++;
477
478 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
479
480 if (list_size > max_list_size) {
481 netdev_warn(ndev,
482 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
483 list_size, max_list_size);
484 list_size = max_list_size;
485 }
486
487 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
488 if (!vlans)
489 return -ENOMEM;
490
491 i = 0;
492 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
493 if (i >= list_size)
494 break;
495 vlans[i++] = vlan;
496 }
497
498 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
499 if (err)
500 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
501 err);
502
503 kfree(vlans);
504 return err;
505 }
506
507 enum mlx5e_vlan_rule_type {
508 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
509 MLX5E_VLAN_RULE_TYPE_ANY_VID,
510 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
511 };
512
513 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
514 enum mlx5e_vlan_rule_type rule_type,
515 u16 vid, u32 *mc, u32 *mv)
516 {
517 struct mlx5_flow_table *ft = priv->fts.vlan.t;
518 struct mlx5_flow_destination dest;
519 u8 match_criteria_enable = 0;
520 struct mlx5_flow_rule **rule_p;
521 int err = 0;
522
523 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
524 dest.ft = priv->fts.main.t;
525
526 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
527 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
528
529 switch (rule_type) {
530 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
531 rule_p = &priv->vlan.untagged_rule;
532 break;
533 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
534 rule_p = &priv->vlan.any_vlan_rule;
535 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
536 break;
537 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
538 rule_p = &priv->vlan.active_vlans_rule[vid];
539 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
540 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
541 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
542 break;
543 }
544
545 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
546 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
547 MLX5_FS_DEFAULT_FLOW_TAG,
548 &dest);
549
550 if (IS_ERR(*rule_p)) {
551 err = PTR_ERR(*rule_p);
552 *rule_p = NULL;
553 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
554 }
555
556 return err;
557 }
558
559 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
560 enum mlx5e_vlan_rule_type rule_type, u16 vid)
561 {
562 u32 *match_criteria;
563 u32 *match_value;
564 int err = 0;
565
566 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
567 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
568 if (!match_value || !match_criteria) {
569 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
570 err = -ENOMEM;
571 goto add_vlan_rule_out;
572 }
573
574 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
575 mlx5e_vport_context_update_vlans(priv);
576
577 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria,
578 match_value);
579
580 add_vlan_rule_out:
581 kvfree(match_criteria);
582 kvfree(match_value);
583
584 return err;
585 }
586
587 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
588 enum mlx5e_vlan_rule_type rule_type, u16 vid)
589 {
590 switch (rule_type) {
591 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
592 if (priv->vlan.untagged_rule) {
593 mlx5_del_flow_rule(priv->vlan.untagged_rule);
594 priv->vlan.untagged_rule = NULL;
595 }
596 break;
597 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
598 if (priv->vlan.any_vlan_rule) {
599 mlx5_del_flow_rule(priv->vlan.any_vlan_rule);
600 priv->vlan.any_vlan_rule = NULL;
601 }
602 break;
603 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
604 mlx5e_vport_context_update_vlans(priv);
605 if (priv->vlan.active_vlans_rule[vid]) {
606 mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]);
607 priv->vlan.active_vlans_rule[vid] = NULL;
608 }
609 mlx5e_vport_context_update_vlans(priv);
610 break;
611 }
612 }
613
614 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
615 {
616 if (!priv->vlan.filter_disabled)
617 return;
618
619 priv->vlan.filter_disabled = false;
620 if (priv->netdev->flags & IFF_PROMISC)
621 return;
622 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
623 }
624
625 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
626 {
627 if (priv->vlan.filter_disabled)
628 return;
629
630 priv->vlan.filter_disabled = true;
631 if (priv->netdev->flags & IFF_PROMISC)
632 return;
633 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
634 }
635
636 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
637 u16 vid)
638 {
639 struct mlx5e_priv *priv = netdev_priv(dev);
640
641 set_bit(vid, priv->vlan.active_vlans);
642
643 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
644 }
645
646 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
647 u16 vid)
648 {
649 struct mlx5e_priv *priv = netdev_priv(dev);
650
651 clear_bit(vid, priv->vlan.active_vlans);
652
653 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
654
655 return 0;
656 }
657
658 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
659 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
660 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
661
662 static void mlx5e_execute_action(struct mlx5e_priv *priv,
663 struct mlx5e_eth_addr_hash_node *hn)
664 {
665 switch (hn->action) {
666 case MLX5E_ACTION_ADD:
667 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
668 hn->action = MLX5E_ACTION_NONE;
669 break;
670
671 case MLX5E_ACTION_DEL:
672 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
673 mlx5e_del_eth_addr_from_hash(hn);
674 break;
675 }
676 }
677
678 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
679 {
680 struct net_device *netdev = priv->netdev;
681 struct netdev_hw_addr *ha;
682
683 netif_addr_lock_bh(netdev);
684
685 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
686 priv->netdev->dev_addr);
687
688 netdev_for_each_uc_addr(ha, netdev)
689 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
690
691 netdev_for_each_mc_addr(ha, netdev)
692 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
693
694 netif_addr_unlock_bh(netdev);
695 }
696
697 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
698 u8 addr_array[][ETH_ALEN], int size)
699 {
700 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
701 struct net_device *ndev = priv->netdev;
702 struct mlx5e_eth_addr_hash_node *hn;
703 struct hlist_head *addr_list;
704 struct hlist_node *tmp;
705 int i = 0;
706 int hi;
707
708 addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
709
710 if (is_uc) /* Make sure our own address is pushed first */
711 ether_addr_copy(addr_array[i++], ndev->dev_addr);
712 else if (priv->eth_addr.broadcast_enabled)
713 ether_addr_copy(addr_array[i++], ndev->broadcast);
714
715 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
716 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
717 continue;
718 if (i >= size)
719 break;
720 ether_addr_copy(addr_array[i++], hn->ai.addr);
721 }
722 }
723
724 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
725 int list_type)
726 {
727 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
728 struct mlx5e_eth_addr_hash_node *hn;
729 u8 (*addr_array)[ETH_ALEN] = NULL;
730 struct hlist_head *addr_list;
731 struct hlist_node *tmp;
732 int max_size;
733 int size;
734 int err;
735 int hi;
736
737 size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
738 max_size = is_uc ?
739 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
740 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
741
742 addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
743 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
744 size++;
745
746 if (size > max_size) {
747 netdev_warn(priv->netdev,
748 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
749 is_uc ? "UC" : "MC", size, max_size);
750 size = max_size;
751 }
752
753 if (size) {
754 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
755 if (!addr_array) {
756 err = -ENOMEM;
757 goto out;
758 }
759 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
760 }
761
762 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
763 out:
764 if (err)
765 netdev_err(priv->netdev,
766 "Failed to modify vport %s list err(%d)\n",
767 is_uc ? "UC" : "MC", err);
768 kfree(addr_array);
769 }
770
771 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
772 {
773 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
774
775 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
776 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
777 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
778 ea->allmulti_enabled,
779 ea->promisc_enabled);
780 }
781
782 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
783 {
784 struct mlx5e_eth_addr_hash_node *hn;
785 struct hlist_node *tmp;
786 int i;
787
788 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
789 mlx5e_execute_action(priv, hn);
790
791 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
792 mlx5e_execute_action(priv, hn);
793 }
794
795 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
796 {
797 struct mlx5e_eth_addr_hash_node *hn;
798 struct hlist_node *tmp;
799 int i;
800
801 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
802 hn->action = MLX5E_ACTION_DEL;
803 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
804 hn->action = MLX5E_ACTION_DEL;
805
806 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
807 mlx5e_sync_netdev_addr(priv);
808
809 mlx5e_apply_netdev_addr(priv);
810 }
811
812 void mlx5e_set_rx_mode_work(struct work_struct *work)
813 {
814 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
815 set_rx_mode_work);
816
817 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
818 struct net_device *ndev = priv->netdev;
819
820 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
821 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
822 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
823 bool broadcast_enabled = rx_mode_enable;
824
825 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
826 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
827 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
828 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
829 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
830 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
831
832 if (enable_promisc) {
833 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
834 if (!priv->vlan.filter_disabled)
835 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
836 0);
837 }
838 if (enable_allmulti)
839 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
840 if (enable_broadcast)
841 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
842
843 mlx5e_handle_netdev_addr(priv);
844
845 if (disable_broadcast)
846 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
847 if (disable_allmulti)
848 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
849 if (disable_promisc) {
850 if (!priv->vlan.filter_disabled)
851 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
852 0);
853 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
854 }
855
856 ea->promisc_enabled = promisc_enabled;
857 ea->allmulti_enabled = allmulti_enabled;
858 ea->broadcast_enabled = broadcast_enabled;
859
860 mlx5e_vport_context_update(priv);
861 }
862
863 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
864 {
865 int i;
866
867 for (i = ft->num_groups - 1; i >= 0; i--) {
868 if (!IS_ERR_OR_NULL(ft->g[i]))
869 mlx5_destroy_flow_group(ft->g[i]);
870 ft->g[i] = NULL;
871 }
872 ft->num_groups = 0;
873 }
874
875 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
876 {
877 ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
878 }
879
880 #define MLX5E_MAIN_GROUP0_SIZE BIT(3)
881 #define MLX5E_MAIN_GROUP1_SIZE BIT(1)
882 #define MLX5E_MAIN_GROUP2_SIZE BIT(0)
883 #define MLX5E_MAIN_GROUP3_SIZE BIT(14)
884 #define MLX5E_MAIN_GROUP4_SIZE BIT(13)
885 #define MLX5E_MAIN_GROUP5_SIZE BIT(11)
886 #define MLX5E_MAIN_GROUP6_SIZE BIT(2)
887 #define MLX5E_MAIN_GROUP7_SIZE BIT(1)
888 #define MLX5E_MAIN_GROUP8_SIZE BIT(0)
889 #define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\
890 MLX5E_MAIN_GROUP1_SIZE +\
891 MLX5E_MAIN_GROUP2_SIZE +\
892 MLX5E_MAIN_GROUP3_SIZE +\
893 MLX5E_MAIN_GROUP4_SIZE +\
894 MLX5E_MAIN_GROUP5_SIZE +\
895 MLX5E_MAIN_GROUP6_SIZE +\
896 MLX5E_MAIN_GROUP7_SIZE +\
897 MLX5E_MAIN_GROUP8_SIZE)
898
899 static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
900 int inlen)
901 {
902 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
903 u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
904 match_criteria.outer_headers.dmac_47_16);
905 int err;
906 int ix = 0;
907
908 memset(in, 0, inlen);
909 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
910 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
911 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
912 MLX5_SET_CFG(in, start_flow_index, ix);
913 ix += MLX5E_MAIN_GROUP0_SIZE;
914 MLX5_SET_CFG(in, end_flow_index, ix - 1);
915 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
916 if (IS_ERR(ft->g[ft->num_groups]))
917 goto err_destroy_groups;
918 ft->num_groups++;
919
920 memset(in, 0, inlen);
921 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
922 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
923 MLX5_SET_CFG(in, start_flow_index, ix);
924 ix += MLX5E_MAIN_GROUP1_SIZE;
925 MLX5_SET_CFG(in, end_flow_index, ix - 1);
926 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
927 if (IS_ERR(ft->g[ft->num_groups]))
928 goto err_destroy_groups;
929 ft->num_groups++;
930
931 memset(in, 0, inlen);
932 MLX5_SET_CFG(in, start_flow_index, ix);
933 ix += MLX5E_MAIN_GROUP2_SIZE;
934 MLX5_SET_CFG(in, end_flow_index, ix - 1);
935 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
936 if (IS_ERR(ft->g[ft->num_groups]))
937 goto err_destroy_groups;
938 ft->num_groups++;
939
940 memset(in, 0, inlen);
941 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
942 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
943 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
944 eth_broadcast_addr(dmac);
945 MLX5_SET_CFG(in, start_flow_index, ix);
946 ix += MLX5E_MAIN_GROUP3_SIZE;
947 MLX5_SET_CFG(in, end_flow_index, ix - 1);
948 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
949 if (IS_ERR(ft->g[ft->num_groups]))
950 goto err_destroy_groups;
951 ft->num_groups++;
952
953 memset(in, 0, inlen);
954 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
955 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
956 eth_broadcast_addr(dmac);
957 MLX5_SET_CFG(in, start_flow_index, ix);
958 ix += MLX5E_MAIN_GROUP4_SIZE;
959 MLX5_SET_CFG(in, end_flow_index, ix - 1);
960 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
961 if (IS_ERR(ft->g[ft->num_groups]))
962 goto err_destroy_groups;
963 ft->num_groups++;
964
965 memset(in, 0, inlen);
966 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
967 eth_broadcast_addr(dmac);
968 MLX5_SET_CFG(in, start_flow_index, ix);
969 ix += MLX5E_MAIN_GROUP5_SIZE;
970 MLX5_SET_CFG(in, end_flow_index, ix - 1);
971 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
972 if (IS_ERR(ft->g[ft->num_groups]))
973 goto err_destroy_groups;
974 ft->num_groups++;
975
976 memset(in, 0, inlen);
977 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
978 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
979 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
980 dmac[0] = 0x01;
981 MLX5_SET_CFG(in, start_flow_index, ix);
982 ix += MLX5E_MAIN_GROUP6_SIZE;
983 MLX5_SET_CFG(in, end_flow_index, ix - 1);
984 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
985 if (IS_ERR(ft->g[ft->num_groups]))
986 goto err_destroy_groups;
987 ft->num_groups++;
988
989 memset(in, 0, inlen);
990 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
991 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
992 dmac[0] = 0x01;
993 MLX5_SET_CFG(in, start_flow_index, ix);
994 ix += MLX5E_MAIN_GROUP7_SIZE;
995 MLX5_SET_CFG(in, end_flow_index, ix - 1);
996 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
997 if (IS_ERR(ft->g[ft->num_groups]))
998 goto err_destroy_groups;
999 ft->num_groups++;
1000
1001 memset(in, 0, inlen);
1002 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1003 dmac[0] = 0x01;
1004 MLX5_SET_CFG(in, start_flow_index, ix);
1005 ix += MLX5E_MAIN_GROUP8_SIZE;
1006 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1007 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1008 if (IS_ERR(ft->g[ft->num_groups]))
1009 goto err_destroy_groups;
1010 ft->num_groups++;
1011
1012 return 0;
1013
1014 err_destroy_groups:
1015 err = PTR_ERR(ft->g[ft->num_groups]);
1016 ft->g[ft->num_groups] = NULL;
1017 mlx5e_destroy_groups(ft);
1018
1019 return err;
1020 }
1021
1022 static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1023 {
1024 u32 *in;
1025 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1026 int err;
1027
1028 in = mlx5_vzalloc(inlen);
1029 if (!in)
1030 return -ENOMEM;
1031
1032 err = __mlx5e_create_main_groups(ft, in, inlen);
1033
1034 kvfree(in);
1035 return err;
1036 }
1037
1038 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
1039 {
1040 struct mlx5e_flow_table *ft = &priv->fts.main;
1041 int err;
1042
1043 ft->num_groups = 0;
1044 ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE);
1045
1046 if (IS_ERR(ft->t)) {
1047 err = PTR_ERR(ft->t);
1048 ft->t = NULL;
1049 return err;
1050 }
1051 ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1052 if (!ft->g) {
1053 err = -ENOMEM;
1054 goto err_destroy_main_flow_table;
1055 }
1056
1057 err = mlx5e_create_main_groups(ft);
1058 if (err)
1059 goto err_free_g;
1060 return 0;
1061
1062 err_free_g:
1063 kfree(ft->g);
1064
1065 err_destroy_main_flow_table:
1066 mlx5_destroy_flow_table(ft->t);
1067 ft->t = NULL;
1068
1069 return err;
1070 }
1071
1072 static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1073 {
1074 mlx5e_destroy_groups(ft);
1075 kfree(ft->g);
1076 mlx5_destroy_flow_table(ft->t);
1077 ft->t = NULL;
1078 }
1079
1080 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1081 {
1082 mlx5e_destroy_flow_table(&priv->fts.main);
1083 }
1084
1085 #define MLX5E_NUM_VLAN_GROUPS 2
1086 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1087 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1088 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1089 MLX5E_VLAN_GROUP1_SIZE)
1090
1091 static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
1092 int inlen)
1093 {
1094 int err;
1095 int ix = 0;
1096 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1097
1098 memset(in, 0, inlen);
1099 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1100 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
1101 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1102 MLX5_SET_CFG(in, start_flow_index, ix);
1103 ix += MLX5E_VLAN_GROUP0_SIZE;
1104 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1105 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1106 if (IS_ERR(ft->g[ft->num_groups]))
1107 goto err_destroy_groups;
1108 ft->num_groups++;
1109
1110 memset(in, 0, inlen);
1111 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1112 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
1113 MLX5_SET_CFG(in, start_flow_index, ix);
1114 ix += MLX5E_VLAN_GROUP1_SIZE;
1115 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1116 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1117 if (IS_ERR(ft->g[ft->num_groups]))
1118 goto err_destroy_groups;
1119 ft->num_groups++;
1120
1121 return 0;
1122
1123 err_destroy_groups:
1124 err = PTR_ERR(ft->g[ft->num_groups]);
1125 ft->g[ft->num_groups] = NULL;
1126 mlx5e_destroy_groups(ft);
1127
1128 return err;
1129 }
1130
1131 static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1132 {
1133 u32 *in;
1134 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1135 int err;
1136
1137 in = mlx5_vzalloc(inlen);
1138 if (!in)
1139 return -ENOMEM;
1140
1141 err = __mlx5e_create_vlan_groups(ft, in, inlen);
1142
1143 kvfree(in);
1144 return err;
1145 }
1146
1147 static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1148 {
1149 struct mlx5e_flow_table *ft = &priv->fts.vlan;
1150 int err;
1151
1152 ft->num_groups = 0;
1153 ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE);
1154
1155 if (IS_ERR(ft->t)) {
1156 err = PTR_ERR(ft->t);
1157 ft->t = NULL;
1158 return err;
1159 }
1160 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1161 if (!ft->g) {
1162 err = -ENOMEM;
1163 goto err_destroy_vlan_flow_table;
1164 }
1165
1166 err = mlx5e_create_vlan_groups(ft);
1167 if (err)
1168 goto err_free_g;
1169
1170 return 0;
1171
1172 err_free_g:
1173 kfree(ft->g);
1174
1175 err_destroy_vlan_flow_table:
1176 mlx5_destroy_flow_table(ft->t);
1177 ft->t = NULL;
1178
1179 return err;
1180 }
1181
1182 static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1183 {
1184 mlx5e_destroy_flow_table(&priv->fts.vlan);
1185 }
1186
1187 int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
1188 {
1189 int err;
1190
1191 priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
1192 MLX5_FLOW_NAMESPACE_KERNEL);
1193
1194 if (!priv->fts.ns)
1195 return -EINVAL;
1196
1197 err = mlx5e_create_vlan_flow_table(priv);
1198 if (err)
1199 return err;
1200
1201 err = mlx5e_create_main_flow_table(priv);
1202 if (err)
1203 goto err_destroy_vlan_flow_table;
1204
1205 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
1206 if (err)
1207 goto err_destroy_main_flow_table;
1208
1209 return 0;
1210
1211 err_destroy_main_flow_table:
1212 mlx5e_destroy_main_flow_table(priv);
1213 err_destroy_vlan_flow_table:
1214 mlx5e_destroy_vlan_flow_table(priv);
1215
1216 return err;
1217 }
1218
1219 void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
1220 {
1221 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
1222 mlx5e_destroy_main_flow_table(priv);
1223 mlx5e_destroy_vlan_flow_table(priv);
1224 }