]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
net/mlx5e: Remove redundant vport context vlan update
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_fs.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
38 #include "en.h"
39 #include "lib/mpfs.h"
40
41 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
42 struct mlx5e_l2_rule *ai, int type);
43 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
44 struct mlx5e_l2_rule *ai);
45
46 enum {
47 MLX5E_FULLMATCH = 0,
48 MLX5E_ALLMULTI = 1,
49 MLX5E_PROMISC = 2,
50 };
51
52 enum {
53 MLX5E_UC = 0,
54 MLX5E_MC_IPV4 = 1,
55 MLX5E_MC_IPV6 = 2,
56 MLX5E_MC_OTHER = 3,
57 };
58
59 enum {
60 MLX5E_ACTION_NONE = 0,
61 MLX5E_ACTION_ADD = 1,
62 MLX5E_ACTION_DEL = 2,
63 };
64
65 struct mlx5e_l2_hash_node {
66 struct hlist_node hlist;
67 u8 action;
68 struct mlx5e_l2_rule ai;
69 bool mpfs;
70 };
71
72 static inline int mlx5e_hash_l2(u8 *addr)
73 {
74 return addr[5];
75 }
76
77 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
78 {
79 struct mlx5e_l2_hash_node *hn;
80 int ix = mlx5e_hash_l2(addr);
81 int found = 0;
82
83 hlist_for_each_entry(hn, &hash[ix], hlist)
84 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
85 found = 1;
86 break;
87 }
88
89 if (found) {
90 hn->action = MLX5E_ACTION_NONE;
91 return;
92 }
93
94 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
95 if (!hn)
96 return;
97
98 ether_addr_copy(hn->ai.addr, addr);
99 hn->action = MLX5E_ACTION_ADD;
100
101 hlist_add_head(&hn->hlist, &hash[ix]);
102 }
103
104 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
105 {
106 hlist_del(&hn->hlist);
107 kfree(hn);
108 }
109
110 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
111 {
112 struct net_device *ndev = priv->netdev;
113 int max_list_size;
114 int list_size;
115 u16 *vlans;
116 int vlan;
117 int err;
118 int i;
119
120 list_size = 0;
121 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
122 list_size++;
123
124 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
125
126 if (list_size > max_list_size) {
127 netdev_warn(ndev,
128 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
129 list_size, max_list_size);
130 list_size = max_list_size;
131 }
132
133 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
134 if (!vlans)
135 return -ENOMEM;
136
137 i = 0;
138 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
139 if (i >= list_size)
140 break;
141 vlans[i++] = vlan;
142 }
143
144 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
145 if (err)
146 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
147 err);
148
149 kfree(vlans);
150 return err;
151 }
152
153 enum mlx5e_vlan_rule_type {
154 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
155 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
156 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
157 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
158 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
159 };
160
161 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
162 enum mlx5e_vlan_rule_type rule_type,
163 u16 vid, struct mlx5_flow_spec *spec)
164 {
165 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
166 struct mlx5_flow_destination dest = {};
167 struct mlx5_flow_handle **rule_p;
168 MLX5_DECLARE_FLOW_ACT(flow_act);
169 int err = 0;
170
171 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
172 dest.ft = priv->fs.l2.ft.t;
173
174 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
175
176 switch (rule_type) {
177 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
178 /* cvlan_tag enabled in match criteria and
179 * disabled in match value means both S & C tags
180 * don't exist (untagged of both)
181 */
182 rule_p = &priv->fs.vlan.untagged_rule;
183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
184 outer_headers.cvlan_tag);
185 break;
186 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
187 rule_p = &priv->fs.vlan.any_cvlan_rule;
188 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
189 outer_headers.cvlan_tag);
190 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
191 break;
192 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
193 rule_p = &priv->fs.vlan.any_svlan_rule;
194 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
195 outer_headers.svlan_tag);
196 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
197 break;
198 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
199 rule_p = &priv->fs.vlan.active_svlans_rule[vid];
200 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
201 outer_headers.svlan_tag);
202 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
203 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
204 outer_headers.first_vid);
205 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
206 vid);
207 break;
208 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
209 rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
210 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
211 outer_headers.cvlan_tag);
212 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
213 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
214 outer_headers.first_vid);
215 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
216 vid);
217 break;
218 }
219
220 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
221
222 if (IS_ERR(*rule_p)) {
223 err = PTR_ERR(*rule_p);
224 *rule_p = NULL;
225 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
226 }
227
228 return err;
229 }
230
231 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
232 enum mlx5e_vlan_rule_type rule_type, u16 vid)
233 {
234 struct mlx5_flow_spec *spec;
235 int err = 0;
236
237 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
238 if (!spec)
239 return -ENOMEM;
240
241 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
242 mlx5e_vport_context_update_vlans(priv);
243
244 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
245
246 kvfree(spec);
247
248 return err;
249 }
250
251 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
252 enum mlx5e_vlan_rule_type rule_type, u16 vid)
253 {
254 switch (rule_type) {
255 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
256 if (priv->fs.vlan.untagged_rule) {
257 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
258 priv->fs.vlan.untagged_rule = NULL;
259 }
260 break;
261 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
262 if (priv->fs.vlan.any_cvlan_rule) {
263 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
264 priv->fs.vlan.any_cvlan_rule = NULL;
265 }
266 break;
267 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
268 if (priv->fs.vlan.any_svlan_rule) {
269 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
270 priv->fs.vlan.any_svlan_rule = NULL;
271 }
272 break;
273 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
274 if (priv->fs.vlan.active_svlans_rule[vid]) {
275 mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
276 priv->fs.vlan.active_svlans_rule[vid] = NULL;
277 }
278 break;
279 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
280 if (priv->fs.vlan.active_cvlans_rule[vid]) {
281 mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
282 priv->fs.vlan.active_cvlans_rule[vid] = NULL;
283 }
284 mlx5e_vport_context_update_vlans(priv);
285 break;
286 }
287 }
288
289 static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
290 {
291 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
292 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
293 }
294
295 static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
296 {
297 int err;
298
299 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
300 if (err)
301 return err;
302
303 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
304 }
305
306 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
307 {
308 if (!priv->fs.vlan.cvlan_filter_disabled)
309 return;
310
311 priv->fs.vlan.cvlan_filter_disabled = false;
312 if (priv->netdev->flags & IFF_PROMISC)
313 return;
314 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
315 }
316
317 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
318 {
319 if (priv->fs.vlan.cvlan_filter_disabled)
320 return;
321
322 priv->fs.vlan.cvlan_filter_disabled = true;
323 if (priv->netdev->flags & IFF_PROMISC)
324 return;
325 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
326 }
327
328 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
329 {
330 int err;
331
332 set_bit(vid, priv->fs.vlan.active_cvlans);
333
334 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
335 if (err)
336 clear_bit(vid, priv->fs.vlan.active_cvlans);
337
338 return err;
339 }
340
341 static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
342 {
343 struct net_device *netdev = priv->netdev;
344 int err;
345
346 set_bit(vid, priv->fs.vlan.active_svlans);
347
348 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
349 if (err) {
350 clear_bit(vid, priv->fs.vlan.active_svlans);
351 return err;
352 }
353
354 /* Need to fix some features.. */
355 netdev_update_features(netdev);
356 return err;
357 }
358
359 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
360 {
361 struct mlx5e_priv *priv = netdev_priv(dev);
362
363 if (be16_to_cpu(proto) == ETH_P_8021Q)
364 return mlx5e_vlan_rx_add_cvid(priv, vid);
365 else if (be16_to_cpu(proto) == ETH_P_8021AD)
366 return mlx5e_vlan_rx_add_svid(priv, vid);
367
368 return -EOPNOTSUPP;
369 }
370
371 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
372 {
373 struct mlx5e_priv *priv = netdev_priv(dev);
374
375 if (be16_to_cpu(proto) == ETH_P_8021Q) {
376 clear_bit(vid, priv->fs.vlan.active_cvlans);
377 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
378 } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
379 clear_bit(vid, priv->fs.vlan.active_svlans);
380 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
381 netdev_update_features(dev);
382 }
383
384 return 0;
385 }
386
387 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
388 {
389 int i;
390
391 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
392
393 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
394 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
395 }
396
397 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
398 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
399
400 if (priv->fs.vlan.cvlan_filter_disabled &&
401 !(priv->netdev->flags & IFF_PROMISC))
402 mlx5e_add_any_vid_rules(priv);
403 }
404
405 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
406 {
407 int i;
408
409 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
410
411 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
412 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
413 }
414
415 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
416 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
417
418 if (priv->fs.vlan.cvlan_filter_disabled &&
419 !(priv->netdev->flags & IFF_PROMISC))
420 mlx5e_del_any_vid_rules(priv);
421 }
422
423 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
424 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
425 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
426
427 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
428 struct mlx5e_l2_hash_node *hn)
429 {
430 u8 action = hn->action;
431 u8 mac_addr[ETH_ALEN];
432 int l2_err = 0;
433
434 ether_addr_copy(mac_addr, hn->ai.addr);
435
436 switch (action) {
437 case MLX5E_ACTION_ADD:
438 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
439 if (!is_multicast_ether_addr(mac_addr)) {
440 l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
441 hn->mpfs = !l2_err;
442 }
443 hn->action = MLX5E_ACTION_NONE;
444 break;
445
446 case MLX5E_ACTION_DEL:
447 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
448 l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
449 mlx5e_del_l2_flow_rule(priv, &hn->ai);
450 mlx5e_del_l2_from_hash(hn);
451 break;
452 }
453
454 if (l2_err)
455 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
456 action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
457 }
458
459 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
460 {
461 struct net_device *netdev = priv->netdev;
462 struct netdev_hw_addr *ha;
463
464 netif_addr_lock_bh(netdev);
465
466 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
467 priv->netdev->dev_addr);
468
469 netdev_for_each_uc_addr(ha, netdev)
470 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
471
472 netdev_for_each_mc_addr(ha, netdev)
473 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
474
475 netif_addr_unlock_bh(netdev);
476 }
477
478 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
479 u8 addr_array[][ETH_ALEN], int size)
480 {
481 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
482 struct net_device *ndev = priv->netdev;
483 struct mlx5e_l2_hash_node *hn;
484 struct hlist_head *addr_list;
485 struct hlist_node *tmp;
486 int i = 0;
487 int hi;
488
489 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
490
491 if (is_uc) /* Make sure our own address is pushed first */
492 ether_addr_copy(addr_array[i++], ndev->dev_addr);
493 else if (priv->fs.l2.broadcast_enabled)
494 ether_addr_copy(addr_array[i++], ndev->broadcast);
495
496 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
497 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
498 continue;
499 if (i >= size)
500 break;
501 ether_addr_copy(addr_array[i++], hn->ai.addr);
502 }
503 }
504
505 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
506 int list_type)
507 {
508 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
509 struct mlx5e_l2_hash_node *hn;
510 u8 (*addr_array)[ETH_ALEN] = NULL;
511 struct hlist_head *addr_list;
512 struct hlist_node *tmp;
513 int max_size;
514 int size;
515 int err;
516 int hi;
517
518 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
519 max_size = is_uc ?
520 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
521 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
522
523 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
524 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
525 size++;
526
527 if (size > max_size) {
528 netdev_warn(priv->netdev,
529 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
530 is_uc ? "UC" : "MC", size, max_size);
531 size = max_size;
532 }
533
534 if (size) {
535 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
536 if (!addr_array) {
537 err = -ENOMEM;
538 goto out;
539 }
540 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
541 }
542
543 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
544 out:
545 if (err)
546 netdev_err(priv->netdev,
547 "Failed to modify vport %s list err(%d)\n",
548 is_uc ? "UC" : "MC", err);
549 kfree(addr_array);
550 }
551
552 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
553 {
554 struct mlx5e_l2_table *ea = &priv->fs.l2;
555
556 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
557 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
558 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
559 ea->allmulti_enabled,
560 ea->promisc_enabled);
561 }
562
563 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
564 {
565 struct mlx5e_l2_hash_node *hn;
566 struct hlist_node *tmp;
567 int i;
568
569 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
570 mlx5e_execute_l2_action(priv, hn);
571
572 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
573 mlx5e_execute_l2_action(priv, hn);
574 }
575
576 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
577 {
578 struct mlx5e_l2_hash_node *hn;
579 struct hlist_node *tmp;
580 int i;
581
582 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
583 hn->action = MLX5E_ACTION_DEL;
584 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
585 hn->action = MLX5E_ACTION_DEL;
586
587 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
588 mlx5e_sync_netdev_addr(priv);
589
590 mlx5e_apply_netdev_addr(priv);
591 }
592
593 void mlx5e_set_rx_mode_work(struct work_struct *work)
594 {
595 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
596 set_rx_mode_work);
597
598 struct mlx5e_l2_table *ea = &priv->fs.l2;
599 struct net_device *ndev = priv->netdev;
600
601 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
602 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
603 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
604 bool broadcast_enabled = rx_mode_enable;
605
606 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
607 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
608 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
609 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
610 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
611 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
612
613 if (enable_promisc) {
614 if (!priv->channels.params.vlan_strip_disable)
615 netdev_warn_once(ndev,
616 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
617 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
618 if (!priv->fs.vlan.cvlan_filter_disabled)
619 mlx5e_add_any_vid_rules(priv);
620 }
621 if (enable_allmulti)
622 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
623 if (enable_broadcast)
624 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
625
626 mlx5e_handle_netdev_addr(priv);
627
628 if (disable_broadcast)
629 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
630 if (disable_allmulti)
631 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
632 if (disable_promisc) {
633 if (!priv->fs.vlan.cvlan_filter_disabled)
634 mlx5e_del_any_vid_rules(priv);
635 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
636 }
637
638 ea->promisc_enabled = promisc_enabled;
639 ea->allmulti_enabled = allmulti_enabled;
640 ea->broadcast_enabled = broadcast_enabled;
641
642 mlx5e_vport_context_update(priv);
643 }
644
645 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
646 {
647 int i;
648
649 for (i = ft->num_groups - 1; i >= 0; i--) {
650 if (!IS_ERR_OR_NULL(ft->g[i]))
651 mlx5_destroy_flow_group(ft->g[i]);
652 ft->g[i] = NULL;
653 }
654 ft->num_groups = 0;
655 }
656
657 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
658 {
659 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
660 }
661
662 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
663 {
664 mlx5e_destroy_groups(ft);
665 kfree(ft->g);
666 mlx5_destroy_flow_table(ft->t);
667 ft->t = NULL;
668 }
669
670 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
671 {
672 int i;
673
674 for (i = 0; i < MLX5E_NUM_TT; i++) {
675 if (!IS_ERR_OR_NULL(ttc->rules[i])) {
676 mlx5_del_flow_rules(ttc->rules[i]);
677 ttc->rules[i] = NULL;
678 }
679 }
680
681 for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
682 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
683 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
684 ttc->tunnel_rules[i] = NULL;
685 }
686 }
687 }
688
689 struct mlx5e_etype_proto {
690 u16 etype;
691 u8 proto;
692 };
693
694 static struct mlx5e_etype_proto ttc_rules[] = {
695 [MLX5E_TT_IPV4_TCP] = {
696 .etype = ETH_P_IP,
697 .proto = IPPROTO_TCP,
698 },
699 [MLX5E_TT_IPV6_TCP] = {
700 .etype = ETH_P_IPV6,
701 .proto = IPPROTO_TCP,
702 },
703 [MLX5E_TT_IPV4_UDP] = {
704 .etype = ETH_P_IP,
705 .proto = IPPROTO_UDP,
706 },
707 [MLX5E_TT_IPV6_UDP] = {
708 .etype = ETH_P_IPV6,
709 .proto = IPPROTO_UDP,
710 },
711 [MLX5E_TT_IPV4_IPSEC_AH] = {
712 .etype = ETH_P_IP,
713 .proto = IPPROTO_AH,
714 },
715 [MLX5E_TT_IPV6_IPSEC_AH] = {
716 .etype = ETH_P_IPV6,
717 .proto = IPPROTO_AH,
718 },
719 [MLX5E_TT_IPV4_IPSEC_ESP] = {
720 .etype = ETH_P_IP,
721 .proto = IPPROTO_ESP,
722 },
723 [MLX5E_TT_IPV6_IPSEC_ESP] = {
724 .etype = ETH_P_IPV6,
725 .proto = IPPROTO_ESP,
726 },
727 [MLX5E_TT_IPV4] = {
728 .etype = ETH_P_IP,
729 .proto = 0,
730 },
731 [MLX5E_TT_IPV6] = {
732 .etype = ETH_P_IPV6,
733 .proto = 0,
734 },
735 [MLX5E_TT_ANY] = {
736 .etype = 0,
737 .proto = 0,
738 },
739 };
740
741 static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
742 [MLX5E_TT_IPV4_GRE] = {
743 .etype = ETH_P_IP,
744 .proto = IPPROTO_GRE,
745 },
746 [MLX5E_TT_IPV6_GRE] = {
747 .etype = ETH_P_IPV6,
748 .proto = IPPROTO_GRE,
749 },
750 };
751
752 static u8 mlx5e_etype_to_ipv(u16 ethertype)
753 {
754 if (ethertype == ETH_P_IP)
755 return 4;
756
757 if (ethertype == ETH_P_IPV6)
758 return 6;
759
760 return 0;
761 }
762
763 static struct mlx5_flow_handle *
764 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
765 struct mlx5_flow_table *ft,
766 struct mlx5_flow_destination *dest,
767 u16 etype,
768 u8 proto)
769 {
770 int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
771 MLX5_DECLARE_FLOW_ACT(flow_act);
772 struct mlx5_flow_handle *rule;
773 struct mlx5_flow_spec *spec;
774 int err = 0;
775 u8 ipv;
776
777 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
778 if (!spec)
779 return ERR_PTR(-ENOMEM);
780
781 if (proto) {
782 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
783 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
784 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
785 }
786
787 ipv = mlx5e_etype_to_ipv(etype);
788 if (match_ipv_outer && ipv) {
789 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
790 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
791 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
792 } else if (etype) {
793 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
794 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
795 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
796 }
797
798 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
799 if (IS_ERR(rule)) {
800 err = PTR_ERR(rule);
801 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
802 }
803
804 kvfree(spec);
805 return err ? ERR_PTR(err) : rule;
806 }
807
808 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
809 {
810 struct mlx5_flow_destination dest = {};
811 struct mlx5e_ttc_table *ttc;
812 struct mlx5_flow_handle **rules;
813 struct mlx5_flow_table *ft;
814 int tt;
815 int err;
816
817 ttc = &priv->fs.ttc;
818 ft = ttc->ft.t;
819 rules = ttc->rules;
820
821 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
822 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
823 if (tt == MLX5E_TT_ANY)
824 dest.tir_num = priv->direct_tir[0].tirn;
825 else
826 dest.tir_num = priv->indir_tir[tt].tirn;
827 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
828 ttc_rules[tt].etype,
829 ttc_rules[tt].proto);
830 if (IS_ERR(rules[tt]))
831 goto del_rules;
832 }
833
834 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
835 return 0;
836
837 rules = ttc->tunnel_rules;
838 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
839 dest.ft = priv->fs.inner_ttc.ft.t;
840 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
841 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
842 ttc_tunnel_rules[tt].etype,
843 ttc_tunnel_rules[tt].proto);
844 if (IS_ERR(rules[tt]))
845 goto del_rules;
846 }
847
848 return 0;
849
850 del_rules:
851 err = PTR_ERR(rules[tt]);
852 rules[tt] = NULL;
853 mlx5e_cleanup_ttc_rules(ttc);
854 return err;
855 }
856
857 #define MLX5E_TTC_NUM_GROUPS 3
858 #define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
859 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
860 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
861 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
862 MLX5E_TTC_GROUP2_SIZE +\
863 MLX5E_TTC_GROUP3_SIZE)
864
865 #define MLX5E_INNER_TTC_NUM_GROUPS 3
866 #define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
867 #define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
868 #define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
869 #define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
870 MLX5E_INNER_TTC_GROUP2_SIZE +\
871 MLX5E_INNER_TTC_GROUP3_SIZE)
872
873 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
874 bool use_ipv)
875 {
876 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
877 struct mlx5e_flow_table *ft = &ttc->ft;
878 int ix = 0;
879 u32 *in;
880 int err;
881 u8 *mc;
882
883 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
884 sizeof(*ft->g), GFP_KERNEL);
885 if (!ft->g)
886 return -ENOMEM;
887 in = kvzalloc(inlen, GFP_KERNEL);
888 if (!in) {
889 kfree(ft->g);
890 return -ENOMEM;
891 }
892
893 /* L4 Group */
894 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
895 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
896 if (use_ipv)
897 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
898 else
899 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
900 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
901 MLX5_SET_CFG(in, start_flow_index, ix);
902 ix += MLX5E_TTC_GROUP1_SIZE;
903 MLX5_SET_CFG(in, end_flow_index, ix - 1);
904 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
905 if (IS_ERR(ft->g[ft->num_groups]))
906 goto err;
907 ft->num_groups++;
908
909 /* L3 Group */
910 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
911 MLX5_SET_CFG(in, start_flow_index, ix);
912 ix += MLX5E_TTC_GROUP2_SIZE;
913 MLX5_SET_CFG(in, end_flow_index, ix - 1);
914 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
915 if (IS_ERR(ft->g[ft->num_groups]))
916 goto err;
917 ft->num_groups++;
918
919 /* Any Group */
920 memset(in, 0, inlen);
921 MLX5_SET_CFG(in, start_flow_index, ix);
922 ix += MLX5E_TTC_GROUP3_SIZE;
923 MLX5_SET_CFG(in, end_flow_index, ix - 1);
924 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
925 if (IS_ERR(ft->g[ft->num_groups]))
926 goto err;
927 ft->num_groups++;
928
929 kvfree(in);
930 return 0;
931
932 err:
933 err = PTR_ERR(ft->g[ft->num_groups]);
934 ft->g[ft->num_groups] = NULL;
935 kvfree(in);
936
937 return err;
938 }
939
940 static struct mlx5_flow_handle *
941 mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
942 struct mlx5_flow_table *ft,
943 struct mlx5_flow_destination *dest,
944 u16 etype, u8 proto)
945 {
946 MLX5_DECLARE_FLOW_ACT(flow_act);
947 struct mlx5_flow_handle *rule;
948 struct mlx5_flow_spec *spec;
949 int err = 0;
950 u8 ipv;
951
952 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
953 if (!spec)
954 return ERR_PTR(-ENOMEM);
955
956 ipv = mlx5e_etype_to_ipv(etype);
957 if (etype && ipv) {
958 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
959 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
960 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
961 }
962
963 if (proto) {
964 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
965 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
966 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
967 }
968
969 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
970 if (IS_ERR(rule)) {
971 err = PTR_ERR(rule);
972 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
973 }
974
975 kvfree(spec);
976 return err ? ERR_PTR(err) : rule;
977 }
978
979 static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv)
980 {
981 struct mlx5_flow_destination dest = {};
982 struct mlx5_flow_handle **rules;
983 struct mlx5e_ttc_table *ttc;
984 struct mlx5_flow_table *ft;
985 int err;
986 int tt;
987
988 ttc = &priv->fs.inner_ttc;
989 ft = ttc->ft.t;
990 rules = ttc->rules;
991
992 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
993 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
994 if (tt == MLX5E_TT_ANY)
995 dest.tir_num = priv->direct_tir[0].tirn;
996 else
997 dest.tir_num = priv->inner_indir_tir[tt].tirn;
998
999 rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
1000 ttc_rules[tt].etype,
1001 ttc_rules[tt].proto);
1002 if (IS_ERR(rules[tt]))
1003 goto del_rules;
1004 }
1005
1006 return 0;
1007
1008 del_rules:
1009 err = PTR_ERR(rules[tt]);
1010 rules[tt] = NULL;
1011 mlx5e_cleanup_ttc_rules(ttc);
1012 return err;
1013 }
1014
1015 static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
1016 {
1017 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1018 struct mlx5e_flow_table *ft = &ttc->ft;
1019 int ix = 0;
1020 u32 *in;
1021 int err;
1022 u8 *mc;
1023
1024 ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1025 if (!ft->g)
1026 return -ENOMEM;
1027 in = kvzalloc(inlen, GFP_KERNEL);
1028 if (!in) {
1029 kfree(ft->g);
1030 return -ENOMEM;
1031 }
1032
1033 /* L4 Group */
1034 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1035 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1036 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
1037 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1038 MLX5_SET_CFG(in, start_flow_index, ix);
1039 ix += MLX5E_INNER_TTC_GROUP1_SIZE;
1040 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1041 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1042 if (IS_ERR(ft->g[ft->num_groups]))
1043 goto err;
1044 ft->num_groups++;
1045
1046 /* L3 Group */
1047 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
1048 MLX5_SET_CFG(in, start_flow_index, ix);
1049 ix += MLX5E_INNER_TTC_GROUP2_SIZE;
1050 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1051 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1052 if (IS_ERR(ft->g[ft->num_groups]))
1053 goto err;
1054 ft->num_groups++;
1055
1056 /* Any Group */
1057 memset(in, 0, inlen);
1058 MLX5_SET_CFG(in, start_flow_index, ix);
1059 ix += MLX5E_INNER_TTC_GROUP3_SIZE;
1060 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1061 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1062 if (IS_ERR(ft->g[ft->num_groups]))
1063 goto err;
1064 ft->num_groups++;
1065
1066 kvfree(in);
1067 return 0;
1068
1069 err:
1070 err = PTR_ERR(ft->g[ft->num_groups]);
1071 ft->g[ft->num_groups] = NULL;
1072 kvfree(in);
1073
1074 return err;
1075 }
1076
1077 int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
1078 {
1079 struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
1080 struct mlx5_flow_table_attr ft_attr = {};
1081 struct mlx5e_flow_table *ft = &ttc->ft;
1082 int err;
1083
1084 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1085 return 0;
1086
1087 ft_attr.max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
1088 ft_attr.level = MLX5E_INNER_TTC_FT_LEVEL;
1089 ft_attr.prio = MLX5E_NIC_PRIO;
1090
1091 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1092 if (IS_ERR(ft->t)) {
1093 err = PTR_ERR(ft->t);
1094 ft->t = NULL;
1095 return err;
1096 }
1097
1098 err = mlx5e_create_inner_ttc_table_groups(ttc);
1099 if (err)
1100 goto err;
1101
1102 err = mlx5e_generate_inner_ttc_table_rules(priv);
1103 if (err)
1104 goto err;
1105
1106 return 0;
1107
1108 err:
1109 mlx5e_destroy_flow_table(ft);
1110 return err;
1111 }
1112
1113 void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
1114 {
1115 struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
1116
1117 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1118 return;
1119
1120 mlx5e_cleanup_ttc_rules(ttc);
1121 mlx5e_destroy_flow_table(&ttc->ft);
1122 }
1123
1124 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
1125 {
1126 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
1127
1128 mlx5e_cleanup_ttc_rules(ttc);
1129 mlx5e_destroy_flow_table(&ttc->ft);
1130 }
1131
1132 int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
1133 {
1134 bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1135 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
1136 struct mlx5_flow_table_attr ft_attr = {};
1137 struct mlx5e_flow_table *ft = &ttc->ft;
1138 int err;
1139
1140 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
1141 ft_attr.level = MLX5E_TTC_FT_LEVEL;
1142 ft_attr.prio = MLX5E_NIC_PRIO;
1143
1144 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1145 if (IS_ERR(ft->t)) {
1146 err = PTR_ERR(ft->t);
1147 ft->t = NULL;
1148 return err;
1149 }
1150
1151 err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
1152 if (err)
1153 goto err;
1154
1155 err = mlx5e_generate_ttc_table_rules(priv);
1156 if (err)
1157 goto err;
1158
1159 return 0;
1160 err:
1161 mlx5e_destroy_flow_table(ft);
1162 return err;
1163 }
1164
1165 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
1166 struct mlx5e_l2_rule *ai)
1167 {
1168 if (!IS_ERR_OR_NULL(ai->rule)) {
1169 mlx5_del_flow_rules(ai->rule);
1170 ai->rule = NULL;
1171 }
1172 }
1173
1174 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
1175 struct mlx5e_l2_rule *ai, int type)
1176 {
1177 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
1178 struct mlx5_flow_destination dest = {};
1179 MLX5_DECLARE_FLOW_ACT(flow_act);
1180 struct mlx5_flow_spec *spec;
1181 int err = 0;
1182 u8 *mc_dmac;
1183 u8 *mv_dmac;
1184
1185 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1186 if (!spec)
1187 return -ENOMEM;
1188
1189 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1190 outer_headers.dmac_47_16);
1191 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1192 outer_headers.dmac_47_16);
1193
1194 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1195 dest.ft = priv->fs.ttc.ft.t;
1196
1197 switch (type) {
1198 case MLX5E_FULLMATCH:
1199 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1200 eth_broadcast_addr(mc_dmac);
1201 ether_addr_copy(mv_dmac, ai->addr);
1202 break;
1203
1204 case MLX5E_ALLMULTI:
1205 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1206 mc_dmac[0] = 0x01;
1207 mv_dmac[0] = 0x01;
1208 break;
1209
1210 case MLX5E_PROMISC:
1211 break;
1212 }
1213
1214 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1215 if (IS_ERR(ai->rule)) {
1216 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
1217 __func__, mv_dmac);
1218 err = PTR_ERR(ai->rule);
1219 ai->rule = NULL;
1220 }
1221
1222 kvfree(spec);
1223
1224 return err;
1225 }
1226
1227 #define MLX5E_NUM_L2_GROUPS 3
1228 #define MLX5E_L2_GROUP1_SIZE BIT(0)
1229 #define MLX5E_L2_GROUP2_SIZE BIT(15)
1230 #define MLX5E_L2_GROUP3_SIZE BIT(0)
1231 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1232 MLX5E_L2_GROUP2_SIZE +\
1233 MLX5E_L2_GROUP3_SIZE)
1234 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1235 {
1236 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1237 struct mlx5e_flow_table *ft = &l2_table->ft;
1238 int ix = 0;
1239 u8 *mc_dmac;
1240 u32 *in;
1241 int err;
1242 u8 *mc;
1243
1244 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1245 if (!ft->g)
1246 return -ENOMEM;
1247 in = kvzalloc(inlen, GFP_KERNEL);
1248 if (!in) {
1249 kfree(ft->g);
1250 return -ENOMEM;
1251 }
1252
1253 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1254 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1255 outer_headers.dmac_47_16);
1256 /* Flow Group for promiscuous */
1257 MLX5_SET_CFG(in, start_flow_index, ix);
1258 ix += MLX5E_L2_GROUP1_SIZE;
1259 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1260 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1261 if (IS_ERR(ft->g[ft->num_groups]))
1262 goto err_destroy_groups;
1263 ft->num_groups++;
1264
1265 /* Flow Group for full match */
1266 eth_broadcast_addr(mc_dmac);
1267 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1268 MLX5_SET_CFG(in, start_flow_index, ix);
1269 ix += MLX5E_L2_GROUP2_SIZE;
1270 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1271 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1272 if (IS_ERR(ft->g[ft->num_groups]))
1273 goto err_destroy_groups;
1274 ft->num_groups++;
1275
1276 /* Flow Group for allmulti */
1277 eth_zero_addr(mc_dmac);
1278 mc_dmac[0] = 0x01;
1279 MLX5_SET_CFG(in, start_flow_index, ix);
1280 ix += MLX5E_L2_GROUP3_SIZE;
1281 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1282 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1283 if (IS_ERR(ft->g[ft->num_groups]))
1284 goto err_destroy_groups;
1285 ft->num_groups++;
1286
1287 kvfree(in);
1288 return 0;
1289
1290 err_destroy_groups:
1291 err = PTR_ERR(ft->g[ft->num_groups]);
1292 ft->g[ft->num_groups] = NULL;
1293 mlx5e_destroy_groups(ft);
1294 kvfree(in);
1295
1296 return err;
1297 }
1298
1299 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1300 {
1301 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
1302 }
1303
1304 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1305 {
1306 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
1307 struct mlx5e_flow_table *ft = &l2_table->ft;
1308 struct mlx5_flow_table_attr ft_attr = {};
1309 int err;
1310
1311 ft->num_groups = 0;
1312
1313 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1314 ft_attr.level = MLX5E_L2_FT_LEVEL;
1315 ft_attr.prio = MLX5E_NIC_PRIO;
1316
1317 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1318 if (IS_ERR(ft->t)) {
1319 err = PTR_ERR(ft->t);
1320 ft->t = NULL;
1321 return err;
1322 }
1323
1324 err = mlx5e_create_l2_table_groups(l2_table);
1325 if (err)
1326 goto err_destroy_flow_table;
1327
1328 return 0;
1329
1330 err_destroy_flow_table:
1331 mlx5_destroy_flow_table(ft->t);
1332 ft->t = NULL;
1333
1334 return err;
1335 }
1336
1337 #define MLX5E_NUM_VLAN_GROUPS 4
1338 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1339 #define MLX5E_VLAN_GROUP1_SIZE BIT(12)
1340 #define MLX5E_VLAN_GROUP2_SIZE BIT(1)
1341 #define MLX5E_VLAN_GROUP3_SIZE BIT(0)
1342 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1343 MLX5E_VLAN_GROUP1_SIZE +\
1344 MLX5E_VLAN_GROUP2_SIZE +\
1345 MLX5E_VLAN_GROUP3_SIZE)
1346
1347 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1348 int inlen)
1349 {
1350 int err;
1351 int ix = 0;
1352 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1353
1354 memset(in, 0, inlen);
1355 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1356 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1357 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1358 MLX5_SET_CFG(in, start_flow_index, ix);
1359 ix += MLX5E_VLAN_GROUP0_SIZE;
1360 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1361 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1362 if (IS_ERR(ft->g[ft->num_groups]))
1363 goto err_destroy_groups;
1364 ft->num_groups++;
1365
1366 memset(in, 0, inlen);
1367 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1368 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1369 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1370 MLX5_SET_CFG(in, start_flow_index, ix);
1371 ix += MLX5E_VLAN_GROUP1_SIZE;
1372 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1373 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1374 if (IS_ERR(ft->g[ft->num_groups]))
1375 goto err_destroy_groups;
1376 ft->num_groups++;
1377
1378 memset(in, 0, inlen);
1379 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1380 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1381 MLX5_SET_CFG(in, start_flow_index, ix);
1382 ix += MLX5E_VLAN_GROUP2_SIZE;
1383 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1384 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1385 if (IS_ERR(ft->g[ft->num_groups]))
1386 goto err_destroy_groups;
1387 ft->num_groups++;
1388
1389 memset(in, 0, inlen);
1390 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1391 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1392 MLX5_SET_CFG(in, start_flow_index, ix);
1393 ix += MLX5E_VLAN_GROUP3_SIZE;
1394 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1395 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1396 if (IS_ERR(ft->g[ft->num_groups]))
1397 goto err_destroy_groups;
1398 ft->num_groups++;
1399
1400 return 0;
1401
1402 err_destroy_groups:
1403 err = PTR_ERR(ft->g[ft->num_groups]);
1404 ft->g[ft->num_groups] = NULL;
1405 mlx5e_destroy_groups(ft);
1406
1407 return err;
1408 }
1409
1410 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1411 {
1412 u32 *in;
1413 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1414 int err;
1415
1416 in = kvzalloc(inlen, GFP_KERNEL);
1417 if (!in)
1418 return -ENOMEM;
1419
1420 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1421
1422 kvfree(in);
1423 return err;
1424 }
1425
1426 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1427 {
1428 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1429 struct mlx5_flow_table_attr ft_attr = {};
1430 int err;
1431
1432 ft->num_groups = 0;
1433
1434 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1435 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1436 ft_attr.prio = MLX5E_NIC_PRIO;
1437
1438 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1439
1440 if (IS_ERR(ft->t)) {
1441 err = PTR_ERR(ft->t);
1442 ft->t = NULL;
1443 return err;
1444 }
1445 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1446 if (!ft->g) {
1447 err = -ENOMEM;
1448 goto err_destroy_vlan_table;
1449 }
1450
1451 err = mlx5e_create_vlan_table_groups(ft);
1452 if (err)
1453 goto err_free_g;
1454
1455 mlx5e_add_vlan_rules(priv);
1456
1457 return 0;
1458
1459 err_free_g:
1460 kfree(ft->g);
1461 err_destroy_vlan_table:
1462 mlx5_destroy_flow_table(ft->t);
1463 ft->t = NULL;
1464
1465 return err;
1466 }
1467
1468 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1469 {
1470 mlx5e_del_vlan_rules(priv);
1471 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1472 }
1473
1474 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1475 {
1476 int err;
1477
1478 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1479 MLX5_FLOW_NAMESPACE_KERNEL);
1480
1481 if (!priv->fs.ns)
1482 return -EOPNOTSUPP;
1483
1484 err = mlx5e_arfs_create_tables(priv);
1485 if (err) {
1486 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1487 err);
1488 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1489 }
1490
1491 err = mlx5e_create_inner_ttc_table(priv);
1492 if (err) {
1493 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1494 err);
1495 goto err_destroy_arfs_tables;
1496 }
1497
1498 err = mlx5e_create_ttc_table(priv);
1499 if (err) {
1500 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1501 err);
1502 goto err_destroy_inner_ttc_table;
1503 }
1504
1505 err = mlx5e_create_l2_table(priv);
1506 if (err) {
1507 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1508 err);
1509 goto err_destroy_ttc_table;
1510 }
1511
1512 err = mlx5e_create_vlan_table(priv);
1513 if (err) {
1514 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1515 err);
1516 goto err_destroy_l2_table;
1517 }
1518
1519 mlx5e_ethtool_init_steering(priv);
1520
1521 return 0;
1522
1523 err_destroy_l2_table:
1524 mlx5e_destroy_l2_table(priv);
1525 err_destroy_ttc_table:
1526 mlx5e_destroy_ttc_table(priv);
1527 err_destroy_inner_ttc_table:
1528 mlx5e_destroy_inner_ttc_table(priv);
1529 err_destroy_arfs_tables:
1530 mlx5e_arfs_destroy_tables(priv);
1531
1532 return err;
1533 }
1534
1535 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1536 {
1537 mlx5e_destroy_vlan_table(priv);
1538 mlx5e_destroy_l2_table(priv);
1539 mlx5e_destroy_ttc_table(priv);
1540 mlx5e_destroy_inner_ttc_table(priv);
1541 mlx5e_arfs_destroy_tables(priv);
1542 mlx5e_ethtool_cleanup_steering(priv);
1543 }