]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
regulator: arizona: Add regulator specific device tree binding document
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_flow_table.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/flow_table.h>
38 #include "en.h"
39
40 enum {
41 MLX5E_FULLMATCH = 0,
42 MLX5E_ALLMULTI = 1,
43 MLX5E_PROMISC = 2,
44 };
45
46 enum {
47 MLX5E_UC = 0,
48 MLX5E_MC_IPV4 = 1,
49 MLX5E_MC_IPV6 = 2,
50 MLX5E_MC_OTHER = 3,
51 };
52
53 enum {
54 MLX5E_ACTION_NONE = 0,
55 MLX5E_ACTION_ADD = 1,
56 MLX5E_ACTION_DEL = 2,
57 };
58
59 struct mlx5e_eth_addr_hash_node {
60 struct hlist_node hlist;
61 u8 action;
62 struct mlx5e_eth_addr_info ai;
63 };
64
65 static inline int mlx5e_hash_eth_addr(u8 *addr)
66 {
67 return addr[5];
68 }
69
70 static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
71 {
72 struct mlx5e_eth_addr_hash_node *hn;
73 int ix = mlx5e_hash_eth_addr(addr);
74 int found = 0;
75
76 hlist_for_each_entry(hn, &hash[ix], hlist)
77 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
78 found = 1;
79 break;
80 }
81
82 if (found) {
83 hn->action = MLX5E_ACTION_NONE;
84 return;
85 }
86
87 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
88 if (!hn)
89 return;
90
91 ether_addr_copy(hn->ai.addr, addr);
92 hn->action = MLX5E_ACTION_ADD;
93
94 hlist_add_head(&hn->hlist, &hash[ix]);
95 }
96
97 static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
98 {
99 hlist_del(&hn->hlist);
100 kfree(hn);
101 }
102
103 static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
104 struct mlx5e_eth_addr_info *ai)
105 {
106 void *ft = priv->ft.main;
107
108 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
109 mlx5_del_flow_table_entry(ft,
110 ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
111
112 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
113 mlx5_del_flow_table_entry(ft,
114 ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
115
116 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
117 mlx5_del_flow_table_entry(ft,
118 ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
119
120 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
121 mlx5_del_flow_table_entry(ft,
122 ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
123
124 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
125 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
126
127 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
128 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
129
130 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
131 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
132
133 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
134 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
135
136 if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
137 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
138
139 if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
140 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
141
142 if (ai->tt_vec & BIT(MLX5E_TT_ANY))
143 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
144 }
145
146 static int mlx5e_get_eth_addr_type(u8 *addr)
147 {
148 if (is_unicast_ether_addr(addr))
149 return MLX5E_UC;
150
151 if ((addr[0] == 0x01) &&
152 (addr[1] == 0x00) &&
153 (addr[2] == 0x5e) &&
154 !(addr[3] & 0x80))
155 return MLX5E_MC_IPV4;
156
157 if ((addr[0] == 0x33) &&
158 (addr[1] == 0x33))
159 return MLX5E_MC_IPV6;
160
161 return MLX5E_MC_OTHER;
162 }
163
164 static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
165 {
166 int eth_addr_type;
167 u32 ret;
168
169 switch (type) {
170 case MLX5E_FULLMATCH:
171 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
172 switch (eth_addr_type) {
173 case MLX5E_UC:
174 ret =
175 BIT(MLX5E_TT_IPV4_TCP) |
176 BIT(MLX5E_TT_IPV6_TCP) |
177 BIT(MLX5E_TT_IPV4_UDP) |
178 BIT(MLX5E_TT_IPV6_UDP) |
179 BIT(MLX5E_TT_IPV4_IPSEC_AH) |
180 BIT(MLX5E_TT_IPV6_IPSEC_AH) |
181 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
182 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
183 BIT(MLX5E_TT_IPV4) |
184 BIT(MLX5E_TT_IPV6) |
185 BIT(MLX5E_TT_ANY) |
186 0;
187 break;
188
189 case MLX5E_MC_IPV4:
190 ret =
191 BIT(MLX5E_TT_IPV4_UDP) |
192 BIT(MLX5E_TT_IPV4) |
193 0;
194 break;
195
196 case MLX5E_MC_IPV6:
197 ret =
198 BIT(MLX5E_TT_IPV6_UDP) |
199 BIT(MLX5E_TT_IPV6) |
200 0;
201 break;
202
203 case MLX5E_MC_OTHER:
204 ret =
205 BIT(MLX5E_TT_ANY) |
206 0;
207 break;
208 }
209
210 break;
211
212 case MLX5E_ALLMULTI:
213 ret =
214 BIT(MLX5E_TT_IPV4_UDP) |
215 BIT(MLX5E_TT_IPV6_UDP) |
216 BIT(MLX5E_TT_IPV4) |
217 BIT(MLX5E_TT_IPV6) |
218 BIT(MLX5E_TT_ANY) |
219 0;
220 break;
221
222 default: /* MLX5E_PROMISC */
223 ret =
224 BIT(MLX5E_TT_IPV4_TCP) |
225 BIT(MLX5E_TT_IPV6_TCP) |
226 BIT(MLX5E_TT_IPV4_UDP) |
227 BIT(MLX5E_TT_IPV6_UDP) |
228 BIT(MLX5E_TT_IPV4_IPSEC_AH) |
229 BIT(MLX5E_TT_IPV6_IPSEC_AH) |
230 BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
231 BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
232 BIT(MLX5E_TT_IPV4) |
233 BIT(MLX5E_TT_IPV6) |
234 BIT(MLX5E_TT_ANY) |
235 0;
236 break;
237 }
238
239 return ret;
240 }
241
242 static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
243 struct mlx5e_eth_addr_info *ai, int type,
244 void *flow_context, void *match_criteria)
245 {
246 u8 match_criteria_enable = 0;
247 void *match_value;
248 void *dest;
249 u8 *dmac;
250 u8 *match_criteria_dmac;
251 void *ft = priv->ft.main;
252 u32 *tirn = priv->tirn;
253 u32 *ft_ix;
254 u32 tt_vec;
255 int err;
256
257 match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
258 dmac = MLX5_ADDR_OF(fte_match_param, match_value,
259 outer_headers.dmac_47_16);
260 match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
261 outer_headers.dmac_47_16);
262 dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
263
264 MLX5_SET(flow_context, flow_context, action,
265 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
266 MLX5_SET(flow_context, flow_context, destination_list_size, 1);
267 MLX5_SET(dest_format_struct, dest, destination_type,
268 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
269
270 switch (type) {
271 case MLX5E_FULLMATCH:
272 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
273 memset(match_criteria_dmac, 0xff, ETH_ALEN);
274 ether_addr_copy(dmac, ai->addr);
275 break;
276
277 case MLX5E_ALLMULTI:
278 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
279 match_criteria_dmac[0] = 0x01;
280 dmac[0] = 0x01;
281 break;
282
283 case MLX5E_PROMISC:
284 break;
285 }
286
287 tt_vec = mlx5e_get_tt_vec(ai, type);
288
289 ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
290 if (tt_vec & BIT(MLX5E_TT_ANY)) {
291 MLX5_SET(dest_format_struct, dest, destination_id,
292 tirn[MLX5E_TT_ANY]);
293 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
294 match_criteria, flow_context,
295 ft_ix);
296 if (err)
297 goto err_del_ai;
298
299 ai->tt_vec |= BIT(MLX5E_TT_ANY);
300 }
301
302 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
303 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
304 outer_headers.ethertype);
305
306 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
307 if (tt_vec & BIT(MLX5E_TT_IPV4)) {
308 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
309 ETH_P_IP);
310 MLX5_SET(dest_format_struct, dest, destination_id,
311 tirn[MLX5E_TT_IPV4]);
312 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
313 match_criteria, flow_context,
314 ft_ix);
315 if (err)
316 goto err_del_ai;
317
318 ai->tt_vec |= BIT(MLX5E_TT_IPV4);
319 }
320
321 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
322 if (tt_vec & BIT(MLX5E_TT_IPV6)) {
323 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
324 ETH_P_IPV6);
325 MLX5_SET(dest_format_struct, dest, destination_id,
326 tirn[MLX5E_TT_IPV6]);
327 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
328 match_criteria, flow_context,
329 ft_ix);
330 if (err)
331 goto err_del_ai;
332
333 ai->tt_vec |= BIT(MLX5E_TT_IPV6);
334 }
335
336 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
337 outer_headers.ip_protocol);
338 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
339 IPPROTO_UDP);
340
341 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
342 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
343 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
344 ETH_P_IP);
345 MLX5_SET(dest_format_struct, dest, destination_id,
346 tirn[MLX5E_TT_IPV4_UDP]);
347 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
348 match_criteria, flow_context,
349 ft_ix);
350 if (err)
351 goto err_del_ai;
352
353 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
354 }
355
356 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
357 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
358 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
359 ETH_P_IPV6);
360 MLX5_SET(dest_format_struct, dest, destination_id,
361 tirn[MLX5E_TT_IPV6_UDP]);
362 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
363 match_criteria, flow_context,
364 ft_ix);
365 if (err)
366 goto err_del_ai;
367
368 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
369 }
370
371 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
372 IPPROTO_TCP);
373
374 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
375 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
376 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
377 ETH_P_IP);
378 MLX5_SET(dest_format_struct, dest, destination_id,
379 tirn[MLX5E_TT_IPV4_TCP]);
380 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
381 match_criteria, flow_context,
382 ft_ix);
383 if (err)
384 goto err_del_ai;
385
386 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
387 }
388
389 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
390 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
391 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
392 ETH_P_IPV6);
393 MLX5_SET(dest_format_struct, dest, destination_id,
394 tirn[MLX5E_TT_IPV6_TCP]);
395 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
396 match_criteria, flow_context,
397 ft_ix);
398 if (err)
399 goto err_del_ai;
400
401 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
402 }
403
404 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
405 IPPROTO_AH);
406
407 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
408 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
409 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
410 ETH_P_IP);
411 MLX5_SET(dest_format_struct, dest, destination_id,
412 tirn[MLX5E_TT_IPV4_IPSEC_AH]);
413 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
414 match_criteria, flow_context,
415 ft_ix);
416 if (err)
417 goto err_del_ai;
418
419 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
420 }
421
422 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
423 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
424 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
425 ETH_P_IPV6);
426 MLX5_SET(dest_format_struct, dest, destination_id,
427 tirn[MLX5E_TT_IPV6_IPSEC_AH]);
428 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
429 match_criteria, flow_context,
430 ft_ix);
431 if (err)
432 goto err_del_ai;
433
434 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
435 }
436
437 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
438 IPPROTO_ESP);
439
440 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
441 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
442 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
443 ETH_P_IP);
444 MLX5_SET(dest_format_struct, dest, destination_id,
445 tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
446 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
447 match_criteria, flow_context,
448 ft_ix);
449 if (err)
450 goto err_del_ai;
451
452 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
453 }
454
455 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
456 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
457 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
458 ETH_P_IPV6);
459 MLX5_SET(dest_format_struct, dest, destination_id,
460 tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
461 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
462 match_criteria, flow_context,
463 ft_ix);
464 if (err)
465 goto err_del_ai;
466
467 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
468 }
469
470 return 0;
471
472 err_del_ai:
473 mlx5e_del_eth_addr_from_flow_table(priv, ai);
474
475 return err;
476 }
477
478 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
479 struct mlx5e_eth_addr_info *ai, int type)
480 {
481 u32 *flow_context;
482 u32 *match_criteria;
483 int err;
484
485 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
486 MLX5_ST_SZ_BYTES(dest_format_struct));
487 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
488 if (!flow_context || !match_criteria) {
489 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
490 err = -ENOMEM;
491 goto add_eth_addr_rule_out;
492 }
493
494 err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
495 match_criteria);
496 if (err)
497 netdev_err(priv->netdev, "%s: failed\n", __func__);
498
499 add_eth_addr_rule_out:
500 kvfree(match_criteria);
501 kvfree(flow_context);
502 return err;
503 }
504
505 enum mlx5e_vlan_rule_type {
506 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
507 MLX5E_VLAN_RULE_TYPE_ANY_VID,
508 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
509 };
510
511 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
512 enum mlx5e_vlan_rule_type rule_type, u16 vid)
513 {
514 u8 match_criteria_enable = 0;
515 u32 *flow_context;
516 void *match_value;
517 void *dest;
518 u32 *match_criteria;
519 u32 *ft_ix;
520 int err;
521
522 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
523 MLX5_ST_SZ_BYTES(dest_format_struct));
524 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
525 if (!flow_context || !match_criteria) {
526 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
527 err = -ENOMEM;
528 goto add_vlan_rule_out;
529 }
530 match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
531 dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
532
533 MLX5_SET(flow_context, flow_context, action,
534 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
535 MLX5_SET(flow_context, flow_context, destination_list_size, 1);
536 MLX5_SET(dest_format_struct, dest, destination_type,
537 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
538 MLX5_SET(dest_format_struct, dest, destination_id,
539 mlx5_get_flow_table_id(priv->ft.main));
540
541 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
542 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
543 outer_headers.vlan_tag);
544
545 switch (rule_type) {
546 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
547 ft_ix = &priv->vlan.untagged_rule_ft_ix;
548 break;
549 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
550 ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
551 MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
552 1);
553 break;
554 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
555 ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
556 MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
557 1);
558 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
559 outer_headers.first_vid);
560 MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
561 vid);
562 break;
563 }
564
565 err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
566 match_criteria, flow_context, ft_ix);
567 if (err)
568 netdev_err(priv->netdev, "%s: failed\n", __func__);
569
570 add_vlan_rule_out:
571 kvfree(match_criteria);
572 kvfree(flow_context);
573 return err;
574 }
575
576 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
577 enum mlx5e_vlan_rule_type rule_type, u16 vid)
578 {
579 switch (rule_type) {
580 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
581 mlx5_del_flow_table_entry(priv->ft.vlan,
582 priv->vlan.untagged_rule_ft_ix);
583 break;
584 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
585 mlx5_del_flow_table_entry(priv->ft.vlan,
586 priv->vlan.any_vlan_rule_ft_ix);
587 break;
588 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
589 mlx5_del_flow_table_entry(priv->ft.vlan,
590 priv->vlan.active_vlans_ft_ix[vid]);
591 break;
592 }
593 }
594
595 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
596 {
597 if (!priv->vlan.filter_disabled)
598 return;
599
600 priv->vlan.filter_disabled = false;
601 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
602 }
603
604 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
605 {
606 if (priv->vlan.filter_disabled)
607 return;
608
609 priv->vlan.filter_disabled = true;
610 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
611 }
612
613 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
614 u16 vid)
615 {
616 struct mlx5e_priv *priv = netdev_priv(dev);
617
618 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
619 }
620
621 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
622 u16 vid)
623 {
624 struct mlx5e_priv *priv = netdev_priv(dev);
625
626 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
627
628 return 0;
629 }
630
631 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
632 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
633 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
634
635 static void mlx5e_execute_action(struct mlx5e_priv *priv,
636 struct mlx5e_eth_addr_hash_node *hn)
637 {
638 switch (hn->action) {
639 case MLX5E_ACTION_ADD:
640 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
641 hn->action = MLX5E_ACTION_NONE;
642 break;
643
644 case MLX5E_ACTION_DEL:
645 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
646 mlx5e_del_eth_addr_from_hash(hn);
647 break;
648 }
649 }
650
651 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
652 {
653 struct net_device *netdev = priv->netdev;
654 struct netdev_hw_addr *ha;
655
656 netif_addr_lock_bh(netdev);
657
658 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
659 priv->netdev->dev_addr);
660
661 netdev_for_each_uc_addr(ha, netdev)
662 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
663
664 netdev_for_each_mc_addr(ha, netdev)
665 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
666
667 netif_addr_unlock_bh(netdev);
668 }
669
670 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
671 {
672 struct mlx5e_eth_addr_hash_node *hn;
673 struct hlist_node *tmp;
674 int i;
675
676 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
677 mlx5e_execute_action(priv, hn);
678
679 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
680 mlx5e_execute_action(priv, hn);
681 }
682
683 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
684 {
685 struct mlx5e_eth_addr_hash_node *hn;
686 struct hlist_node *tmp;
687 int i;
688
689 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
690 hn->action = MLX5E_ACTION_DEL;
691 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
692 hn->action = MLX5E_ACTION_DEL;
693
694 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
695 mlx5e_sync_netdev_addr(priv);
696
697 mlx5e_apply_netdev_addr(priv);
698 }
699
700 void mlx5e_set_rx_mode_work(struct work_struct *work)
701 {
702 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
703 set_rx_mode_work);
704
705 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
706 struct net_device *ndev = priv->netdev;
707
708 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
709 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
710 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
711 bool broadcast_enabled = rx_mode_enable;
712
713 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
714 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
715 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
716 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
717 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
718 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
719
720 if (enable_promisc)
721 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
722 if (enable_allmulti)
723 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
724 if (enable_broadcast)
725 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
726
727 mlx5e_handle_netdev_addr(priv);
728
729 if (disable_broadcast)
730 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
731 if (disable_allmulti)
732 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
733 if (disable_promisc)
734 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
735
736 ea->promisc_enabled = promisc_enabled;
737 ea->allmulti_enabled = allmulti_enabled;
738 ea->broadcast_enabled = broadcast_enabled;
739 }
740
741 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
742 {
743 ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
744 }
745
746 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
747 {
748 struct mlx5_flow_table_group *g;
749 u8 *dmac;
750
751 g = kcalloc(9, sizeof(*g), GFP_KERNEL);
752 if (!g)
753 return -ENOMEM;
754
755 g[0].log_sz = 3;
756 g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
757 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
758 outer_headers.ethertype);
759 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
760 outer_headers.ip_protocol);
761
762 g[1].log_sz = 1;
763 g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
764 MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
765 outer_headers.ethertype);
766
767 g[2].log_sz = 0;
768
769 g[3].log_sz = 14;
770 g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
771 dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
772 outer_headers.dmac_47_16);
773 memset(dmac, 0xff, ETH_ALEN);
774 MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
775 outer_headers.ethertype);
776 MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
777 outer_headers.ip_protocol);
778
779 g[4].log_sz = 13;
780 g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
781 dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
782 outer_headers.dmac_47_16);
783 memset(dmac, 0xff, ETH_ALEN);
784 MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
785 outer_headers.ethertype);
786
787 g[5].log_sz = 11;
788 g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
789 dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
790 outer_headers.dmac_47_16);
791 memset(dmac, 0xff, ETH_ALEN);
792
793 g[6].log_sz = 2;
794 g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
795 dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
796 outer_headers.dmac_47_16);
797 dmac[0] = 0x01;
798 MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
799 outer_headers.ethertype);
800 MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
801 outer_headers.ip_protocol);
802
803 g[7].log_sz = 1;
804 g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
805 dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
806 outer_headers.dmac_47_16);
807 dmac[0] = 0x01;
808 MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
809 outer_headers.ethertype);
810
811 g[8].log_sz = 0;
812 g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
813 dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
814 outer_headers.dmac_47_16);
815 dmac[0] = 0x01;
816 priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
817 MLX5_FLOW_TABLE_TYPE_NIC_RCV,
818 9, g);
819 kfree(g);
820
821 return priv->ft.main ? 0 : -ENOMEM;
822 }
823
824 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
825 {
826 mlx5_destroy_flow_table(priv->ft.main);
827 }
828
829 static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
830 {
831 struct mlx5_flow_table_group *g;
832
833 g = kcalloc(2, sizeof(*g), GFP_KERNEL);
834 if (!g)
835 return -ENOMEM;
836
837 g[0].log_sz = 12;
838 g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
839 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
840 outer_headers.vlan_tag);
841 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
842 outer_headers.first_vid);
843
844 /* untagged + any vlan id */
845 g[1].log_sz = 1;
846 g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
847 MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
848 outer_headers.vlan_tag);
849
850 priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
851 MLX5_FLOW_TABLE_TYPE_NIC_RCV,
852 2, g);
853
854 kfree(g);
855 return priv->ft.vlan ? 0 : -ENOMEM;
856 }
857
858 static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
859 {
860 mlx5_destroy_flow_table(priv->ft.vlan);
861 }
862
863 int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
864 {
865 int err;
866
867 err = mlx5e_create_main_flow_table(priv);
868 if (err)
869 return err;
870
871 err = mlx5e_create_vlan_flow_table(priv);
872 if (err)
873 goto err_destroy_main_flow_table;
874
875 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
876 if (err)
877 goto err_destroy_vlan_flow_table;
878
879 return 0;
880
881 err_destroy_vlan_flow_table:
882 mlx5e_destroy_vlan_flow_table(priv);
883
884 err_destroy_main_flow_table:
885 mlx5e_destroy_main_flow_table(priv);
886
887 return err;
888 }
889
890 void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
891 {
892 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
893 mlx5e_destroy_vlan_flow_table(priv);
894 mlx5e_destroy_main_flow_table(priv);
895 }