]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
Merge tag 'mvebu-fixes-4.14-2' of git://git.infradead.org/linux-mvebu into fixes
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_fs.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
38 #include "en.h"
39 #include "lib/mpfs.h"
40
41 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
42 struct mlx5e_l2_rule *ai, int type);
43 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
44 struct mlx5e_l2_rule *ai);
45
46 enum {
47 MLX5E_FULLMATCH = 0,
48 MLX5E_ALLMULTI = 1,
49 MLX5E_PROMISC = 2,
50 };
51
52 enum {
53 MLX5E_UC = 0,
54 MLX5E_MC_IPV4 = 1,
55 MLX5E_MC_IPV6 = 2,
56 MLX5E_MC_OTHER = 3,
57 };
58
59 enum {
60 MLX5E_ACTION_NONE = 0,
61 MLX5E_ACTION_ADD = 1,
62 MLX5E_ACTION_DEL = 2,
63 };
64
65 struct mlx5e_l2_hash_node {
66 struct hlist_node hlist;
67 u8 action;
68 struct mlx5e_l2_rule ai;
69 bool mpfs;
70 };
71
72 static inline int mlx5e_hash_l2(u8 *addr)
73 {
74 return addr[5];
75 }
76
77 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
78 {
79 struct mlx5e_l2_hash_node *hn;
80 int ix = mlx5e_hash_l2(addr);
81 int found = 0;
82
83 hlist_for_each_entry(hn, &hash[ix], hlist)
84 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
85 found = 1;
86 break;
87 }
88
89 if (found) {
90 hn->action = MLX5E_ACTION_NONE;
91 return;
92 }
93
94 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
95 if (!hn)
96 return;
97
98 ether_addr_copy(hn->ai.addr, addr);
99 hn->action = MLX5E_ACTION_ADD;
100
101 hlist_add_head(&hn->hlist, &hash[ix]);
102 }
103
104 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
105 {
106 hlist_del(&hn->hlist);
107 kfree(hn);
108 }
109
110 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
111 {
112 struct net_device *ndev = priv->netdev;
113 int max_list_size;
114 int list_size;
115 u16 *vlans;
116 int vlan;
117 int err;
118 int i;
119
120 list_size = 0;
121 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
122 list_size++;
123
124 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
125
126 if (list_size > max_list_size) {
127 netdev_warn(ndev,
128 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
129 list_size, max_list_size);
130 list_size = max_list_size;
131 }
132
133 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
134 if (!vlans)
135 return -ENOMEM;
136
137 i = 0;
138 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
139 if (i >= list_size)
140 break;
141 vlans[i++] = vlan;
142 }
143
144 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
145 if (err)
146 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
147 err);
148
149 kfree(vlans);
150 return err;
151 }
152
153 enum mlx5e_vlan_rule_type {
154 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
155 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
156 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
157 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
158 };
159
160 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
161 enum mlx5e_vlan_rule_type rule_type,
162 u16 vid, struct mlx5_flow_spec *spec)
163 {
164 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
165 struct mlx5_flow_destination dest;
166 struct mlx5_flow_handle **rule_p;
167 MLX5_DECLARE_FLOW_ACT(flow_act);
168 int err = 0;
169
170 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
171 dest.ft = priv->fs.l2.ft.t;
172
173 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
174
175 switch (rule_type) {
176 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
177 rule_p = &priv->fs.vlan.untagged_rule;
178 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
179 outer_headers.cvlan_tag);
180 break;
181 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
182 rule_p = &priv->fs.vlan.any_cvlan_rule;
183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
184 outer_headers.cvlan_tag);
185 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
186 break;
187 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
188 rule_p = &priv->fs.vlan.any_svlan_rule;
189 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
190 outer_headers.svlan_tag);
191 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
192 break;
193 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
194 rule_p = &priv->fs.vlan.active_vlans_rule[vid];
195 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
196 outer_headers.cvlan_tag);
197 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
198 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
199 outer_headers.first_vid);
200 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
201 vid);
202 break;
203 }
204
205 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
206
207 if (IS_ERR(*rule_p)) {
208 err = PTR_ERR(*rule_p);
209 *rule_p = NULL;
210 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
211 }
212
213 return err;
214 }
215
216 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
217 enum mlx5e_vlan_rule_type rule_type, u16 vid)
218 {
219 struct mlx5_flow_spec *spec;
220 int err = 0;
221
222 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
223 if (!spec)
224 return -ENOMEM;
225
226 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
227 mlx5e_vport_context_update_vlans(priv);
228
229 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
230
231 kvfree(spec);
232
233 return err;
234 }
235
236 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
237 enum mlx5e_vlan_rule_type rule_type, u16 vid)
238 {
239 switch (rule_type) {
240 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
241 if (priv->fs.vlan.untagged_rule) {
242 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
243 priv->fs.vlan.untagged_rule = NULL;
244 }
245 break;
246 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
247 if (priv->fs.vlan.any_cvlan_rule) {
248 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
249 priv->fs.vlan.any_cvlan_rule = NULL;
250 }
251 break;
252 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
253 if (priv->fs.vlan.any_svlan_rule) {
254 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
255 priv->fs.vlan.any_svlan_rule = NULL;
256 }
257 break;
258 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
259 mlx5e_vport_context_update_vlans(priv);
260 if (priv->fs.vlan.active_vlans_rule[vid]) {
261 mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
262 priv->fs.vlan.active_vlans_rule[vid] = NULL;
263 }
264 mlx5e_vport_context_update_vlans(priv);
265 break;
266 }
267 }
268
269 static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
270 {
271 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
272 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
273 }
274
275 static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
276 {
277 int err;
278
279 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
280 if (err)
281 return err;
282
283 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
284 }
285
286 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
287 {
288 if (!priv->fs.vlan.filter_disabled)
289 return;
290
291 priv->fs.vlan.filter_disabled = false;
292 if (priv->netdev->flags & IFF_PROMISC)
293 return;
294 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
295 }
296
297 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
298 {
299 if (priv->fs.vlan.filter_disabled)
300 return;
301
302 priv->fs.vlan.filter_disabled = true;
303 if (priv->netdev->flags & IFF_PROMISC)
304 return;
305 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
306 }
307
308 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
309 u16 vid)
310 {
311 struct mlx5e_priv *priv = netdev_priv(dev);
312
313 set_bit(vid, priv->fs.vlan.active_vlans);
314
315 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
316 }
317
318 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
319 u16 vid)
320 {
321 struct mlx5e_priv *priv = netdev_priv(dev);
322
323 clear_bit(vid, priv->fs.vlan.active_vlans);
324
325 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
326
327 return 0;
328 }
329
330 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
331 {
332 int i;
333
334 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
335
336 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
337 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
338 }
339
340 if (priv->fs.vlan.filter_disabled &&
341 !(priv->netdev->flags & IFF_PROMISC))
342 mlx5e_add_any_vid_rules(priv);
343 }
344
345 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
346 {
347 int i;
348
349 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
350
351 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
352 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
353 }
354
355 if (priv->fs.vlan.filter_disabled &&
356 !(priv->netdev->flags & IFF_PROMISC))
357 mlx5e_del_any_vid_rules(priv);
358 }
359
360 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
361 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
362 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
363
364 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
365 struct mlx5e_l2_hash_node *hn)
366 {
367 u8 action = hn->action;
368 int l2_err = 0;
369
370 switch (action) {
371 case MLX5E_ACTION_ADD:
372 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
373 if (!is_multicast_ether_addr(hn->ai.addr)) {
374 l2_err = mlx5_mpfs_add_mac(priv->mdev, hn->ai.addr);
375 hn->mpfs = !l2_err;
376 }
377 hn->action = MLX5E_ACTION_NONE;
378 break;
379
380 case MLX5E_ACTION_DEL:
381 if (!is_multicast_ether_addr(hn->ai.addr) && hn->mpfs)
382 l2_err = mlx5_mpfs_del_mac(priv->mdev, hn->ai.addr);
383 mlx5e_del_l2_flow_rule(priv, &hn->ai);
384 mlx5e_del_l2_from_hash(hn);
385 break;
386 }
387
388 if (l2_err)
389 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
390 action == MLX5E_ACTION_ADD ? "add" : "del", hn->ai.addr, l2_err);
391 }
392
393 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
394 {
395 struct net_device *netdev = priv->netdev;
396 struct netdev_hw_addr *ha;
397
398 netif_addr_lock_bh(netdev);
399
400 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
401 priv->netdev->dev_addr);
402
403 netdev_for_each_uc_addr(ha, netdev)
404 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
405
406 netdev_for_each_mc_addr(ha, netdev)
407 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
408
409 netif_addr_unlock_bh(netdev);
410 }
411
412 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
413 u8 addr_array[][ETH_ALEN], int size)
414 {
415 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
416 struct net_device *ndev = priv->netdev;
417 struct mlx5e_l2_hash_node *hn;
418 struct hlist_head *addr_list;
419 struct hlist_node *tmp;
420 int i = 0;
421 int hi;
422
423 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
424
425 if (is_uc) /* Make sure our own address is pushed first */
426 ether_addr_copy(addr_array[i++], ndev->dev_addr);
427 else if (priv->fs.l2.broadcast_enabled)
428 ether_addr_copy(addr_array[i++], ndev->broadcast);
429
430 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
431 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
432 continue;
433 if (i >= size)
434 break;
435 ether_addr_copy(addr_array[i++], hn->ai.addr);
436 }
437 }
438
439 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
440 int list_type)
441 {
442 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
443 struct mlx5e_l2_hash_node *hn;
444 u8 (*addr_array)[ETH_ALEN] = NULL;
445 struct hlist_head *addr_list;
446 struct hlist_node *tmp;
447 int max_size;
448 int size;
449 int err;
450 int hi;
451
452 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
453 max_size = is_uc ?
454 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
455 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
456
457 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
458 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
459 size++;
460
461 if (size > max_size) {
462 netdev_warn(priv->netdev,
463 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
464 is_uc ? "UC" : "MC", size, max_size);
465 size = max_size;
466 }
467
468 if (size) {
469 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
470 if (!addr_array) {
471 err = -ENOMEM;
472 goto out;
473 }
474 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
475 }
476
477 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
478 out:
479 if (err)
480 netdev_err(priv->netdev,
481 "Failed to modify vport %s list err(%d)\n",
482 is_uc ? "UC" : "MC", err);
483 kfree(addr_array);
484 }
485
486 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
487 {
488 struct mlx5e_l2_table *ea = &priv->fs.l2;
489
490 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
491 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
492 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
493 ea->allmulti_enabled,
494 ea->promisc_enabled);
495 }
496
497 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
498 {
499 struct mlx5e_l2_hash_node *hn;
500 struct hlist_node *tmp;
501 int i;
502
503 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
504 mlx5e_execute_l2_action(priv, hn);
505
506 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
507 mlx5e_execute_l2_action(priv, hn);
508 }
509
510 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
511 {
512 struct mlx5e_l2_hash_node *hn;
513 struct hlist_node *tmp;
514 int i;
515
516 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
517 hn->action = MLX5E_ACTION_DEL;
518 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
519 hn->action = MLX5E_ACTION_DEL;
520
521 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
522 mlx5e_sync_netdev_addr(priv);
523
524 mlx5e_apply_netdev_addr(priv);
525 }
526
527 void mlx5e_set_rx_mode_work(struct work_struct *work)
528 {
529 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
530 set_rx_mode_work);
531
532 struct mlx5e_l2_table *ea = &priv->fs.l2;
533 struct net_device *ndev = priv->netdev;
534
535 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
536 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
537 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
538 bool broadcast_enabled = rx_mode_enable;
539
540 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
541 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
542 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
543 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
544 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
545 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
546
547 if (enable_promisc) {
548 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
549 if (!priv->fs.vlan.filter_disabled)
550 mlx5e_add_any_vid_rules(priv);
551 }
552 if (enable_allmulti)
553 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
554 if (enable_broadcast)
555 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
556
557 mlx5e_handle_netdev_addr(priv);
558
559 if (disable_broadcast)
560 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
561 if (disable_allmulti)
562 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
563 if (disable_promisc) {
564 if (!priv->fs.vlan.filter_disabled)
565 mlx5e_del_any_vid_rules(priv);
566 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
567 }
568
569 ea->promisc_enabled = promisc_enabled;
570 ea->allmulti_enabled = allmulti_enabled;
571 ea->broadcast_enabled = broadcast_enabled;
572
573 mlx5e_vport_context_update(priv);
574 }
575
576 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
577 {
578 int i;
579
580 for (i = ft->num_groups - 1; i >= 0; i--) {
581 if (!IS_ERR_OR_NULL(ft->g[i]))
582 mlx5_destroy_flow_group(ft->g[i]);
583 ft->g[i] = NULL;
584 }
585 ft->num_groups = 0;
586 }
587
588 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
589 {
590 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
591 }
592
593 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
594 {
595 mlx5e_destroy_groups(ft);
596 kfree(ft->g);
597 mlx5_destroy_flow_table(ft->t);
598 ft->t = NULL;
599 }
600
601 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
602 {
603 int i;
604
605 for (i = 0; i < MLX5E_NUM_TT; i++) {
606 if (!IS_ERR_OR_NULL(ttc->rules[i])) {
607 mlx5_del_flow_rules(ttc->rules[i]);
608 ttc->rules[i] = NULL;
609 }
610 }
611
612 for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
613 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
614 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
615 ttc->tunnel_rules[i] = NULL;
616 }
617 }
618 }
619
620 struct mlx5e_etype_proto {
621 u16 etype;
622 u8 proto;
623 };
624
625 static struct mlx5e_etype_proto ttc_rules[] = {
626 [MLX5E_TT_IPV4_TCP] = {
627 .etype = ETH_P_IP,
628 .proto = IPPROTO_TCP,
629 },
630 [MLX5E_TT_IPV6_TCP] = {
631 .etype = ETH_P_IPV6,
632 .proto = IPPROTO_TCP,
633 },
634 [MLX5E_TT_IPV4_UDP] = {
635 .etype = ETH_P_IP,
636 .proto = IPPROTO_UDP,
637 },
638 [MLX5E_TT_IPV6_UDP] = {
639 .etype = ETH_P_IPV6,
640 .proto = IPPROTO_UDP,
641 },
642 [MLX5E_TT_IPV4_IPSEC_AH] = {
643 .etype = ETH_P_IP,
644 .proto = IPPROTO_AH,
645 },
646 [MLX5E_TT_IPV6_IPSEC_AH] = {
647 .etype = ETH_P_IPV6,
648 .proto = IPPROTO_AH,
649 },
650 [MLX5E_TT_IPV4_IPSEC_ESP] = {
651 .etype = ETH_P_IP,
652 .proto = IPPROTO_ESP,
653 },
654 [MLX5E_TT_IPV6_IPSEC_ESP] = {
655 .etype = ETH_P_IPV6,
656 .proto = IPPROTO_ESP,
657 },
658 [MLX5E_TT_IPV4] = {
659 .etype = ETH_P_IP,
660 .proto = 0,
661 },
662 [MLX5E_TT_IPV6] = {
663 .etype = ETH_P_IPV6,
664 .proto = 0,
665 },
666 [MLX5E_TT_ANY] = {
667 .etype = 0,
668 .proto = 0,
669 },
670 };
671
672 static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
673 [MLX5E_TT_IPV4_GRE] = {
674 .etype = ETH_P_IP,
675 .proto = IPPROTO_GRE,
676 },
677 [MLX5E_TT_IPV6_GRE] = {
678 .etype = ETH_P_IPV6,
679 .proto = IPPROTO_GRE,
680 },
681 };
682
683 static u8 mlx5e_etype_to_ipv(u16 ethertype)
684 {
685 if (ethertype == ETH_P_IP)
686 return 4;
687
688 if (ethertype == ETH_P_IPV6)
689 return 6;
690
691 return 0;
692 }
693
694 static struct mlx5_flow_handle *
695 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
696 struct mlx5_flow_table *ft,
697 struct mlx5_flow_destination *dest,
698 u16 etype,
699 u8 proto)
700 {
701 int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
702 MLX5_DECLARE_FLOW_ACT(flow_act);
703 struct mlx5_flow_handle *rule;
704 struct mlx5_flow_spec *spec;
705 int err = 0;
706 u8 ipv;
707
708 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
709 if (!spec)
710 return ERR_PTR(-ENOMEM);
711
712 if (proto) {
713 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
714 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
715 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
716 }
717
718 ipv = mlx5e_etype_to_ipv(etype);
719 if (match_ipv_outer && ipv) {
720 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
721 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
722 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
723 } else if (etype) {
724 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
725 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
726 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
727 }
728
729 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
730 if (IS_ERR(rule)) {
731 err = PTR_ERR(rule);
732 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
733 }
734
735 kvfree(spec);
736 return err ? ERR_PTR(err) : rule;
737 }
738
739 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
740 {
741 struct mlx5_flow_destination dest;
742 struct mlx5e_ttc_table *ttc;
743 struct mlx5_flow_handle **rules;
744 struct mlx5_flow_table *ft;
745 int tt;
746 int err;
747
748 ttc = &priv->fs.ttc;
749 ft = ttc->ft.t;
750 rules = ttc->rules;
751
752 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
753 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
754 if (tt == MLX5E_TT_ANY)
755 dest.tir_num = priv->direct_tir[0].tirn;
756 else
757 dest.tir_num = priv->indir_tir[tt].tirn;
758 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
759 ttc_rules[tt].etype,
760 ttc_rules[tt].proto);
761 if (IS_ERR(rules[tt]))
762 goto del_rules;
763 }
764
765 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
766 return 0;
767
768 rules = ttc->tunnel_rules;
769 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
770 dest.ft = priv->fs.inner_ttc.ft.t;
771 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
772 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
773 ttc_tunnel_rules[tt].etype,
774 ttc_tunnel_rules[tt].proto);
775 if (IS_ERR(rules[tt]))
776 goto del_rules;
777 }
778
779 return 0;
780
781 del_rules:
782 err = PTR_ERR(rules[tt]);
783 rules[tt] = NULL;
784 mlx5e_cleanup_ttc_rules(ttc);
785 return err;
786 }
787
788 #define MLX5E_TTC_NUM_GROUPS 3
789 #define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
790 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
791 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
792 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
793 MLX5E_TTC_GROUP2_SIZE +\
794 MLX5E_TTC_GROUP3_SIZE)
795
796 #define MLX5E_INNER_TTC_NUM_GROUPS 3
797 #define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
798 #define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
799 #define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
800 #define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
801 MLX5E_INNER_TTC_GROUP2_SIZE +\
802 MLX5E_INNER_TTC_GROUP3_SIZE)
803
804 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
805 bool use_ipv)
806 {
807 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
808 struct mlx5e_flow_table *ft = &ttc->ft;
809 int ix = 0;
810 u32 *in;
811 int err;
812 u8 *mc;
813
814 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
815 sizeof(*ft->g), GFP_KERNEL);
816 if (!ft->g)
817 return -ENOMEM;
818 in = kvzalloc(inlen, GFP_KERNEL);
819 if (!in) {
820 kfree(ft->g);
821 return -ENOMEM;
822 }
823
824 /* L4 Group */
825 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
826 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
827 if (use_ipv)
828 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
829 else
830 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
831 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
832 MLX5_SET_CFG(in, start_flow_index, ix);
833 ix += MLX5E_TTC_GROUP1_SIZE;
834 MLX5_SET_CFG(in, end_flow_index, ix - 1);
835 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
836 if (IS_ERR(ft->g[ft->num_groups]))
837 goto err;
838 ft->num_groups++;
839
840 /* L3 Group */
841 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
842 MLX5_SET_CFG(in, start_flow_index, ix);
843 ix += MLX5E_TTC_GROUP2_SIZE;
844 MLX5_SET_CFG(in, end_flow_index, ix - 1);
845 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
846 if (IS_ERR(ft->g[ft->num_groups]))
847 goto err;
848 ft->num_groups++;
849
850 /* Any Group */
851 memset(in, 0, inlen);
852 MLX5_SET_CFG(in, start_flow_index, ix);
853 ix += MLX5E_TTC_GROUP3_SIZE;
854 MLX5_SET_CFG(in, end_flow_index, ix - 1);
855 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
856 if (IS_ERR(ft->g[ft->num_groups]))
857 goto err;
858 ft->num_groups++;
859
860 kvfree(in);
861 return 0;
862
863 err:
864 err = PTR_ERR(ft->g[ft->num_groups]);
865 ft->g[ft->num_groups] = NULL;
866 kvfree(in);
867
868 return err;
869 }
870
871 static struct mlx5_flow_handle *
872 mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
873 struct mlx5_flow_table *ft,
874 struct mlx5_flow_destination *dest,
875 u16 etype, u8 proto)
876 {
877 MLX5_DECLARE_FLOW_ACT(flow_act);
878 struct mlx5_flow_handle *rule;
879 struct mlx5_flow_spec *spec;
880 int err = 0;
881 u8 ipv;
882
883 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
884 if (!spec)
885 return ERR_PTR(-ENOMEM);
886
887 ipv = mlx5e_etype_to_ipv(etype);
888 if (etype && ipv) {
889 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
890 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
891 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
892 }
893
894 if (proto) {
895 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
896 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
897 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
898 }
899
900 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
901 if (IS_ERR(rule)) {
902 err = PTR_ERR(rule);
903 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
904 }
905
906 kvfree(spec);
907 return err ? ERR_PTR(err) : rule;
908 }
909
910 static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv)
911 {
912 struct mlx5_flow_destination dest;
913 struct mlx5_flow_handle **rules;
914 struct mlx5e_ttc_table *ttc;
915 struct mlx5_flow_table *ft;
916 int err;
917 int tt;
918
919 ttc = &priv->fs.inner_ttc;
920 ft = ttc->ft.t;
921 rules = ttc->rules;
922
923 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
924 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
925 if (tt == MLX5E_TT_ANY)
926 dest.tir_num = priv->direct_tir[0].tirn;
927 else
928 dest.tir_num = priv->inner_indir_tir[tt].tirn;
929
930 rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
931 ttc_rules[tt].etype,
932 ttc_rules[tt].proto);
933 if (IS_ERR(rules[tt]))
934 goto del_rules;
935 }
936
937 return 0;
938
939 del_rules:
940 err = PTR_ERR(rules[tt]);
941 rules[tt] = NULL;
942 mlx5e_cleanup_ttc_rules(ttc);
943 return err;
944 }
945
946 static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
947 {
948 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
949 struct mlx5e_flow_table *ft = &ttc->ft;
950 int ix = 0;
951 u32 *in;
952 int err;
953 u8 *mc;
954
955 ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
956 if (!ft->g)
957 return -ENOMEM;
958 in = kvzalloc(inlen, GFP_KERNEL);
959 if (!in) {
960 kfree(ft->g);
961 return -ENOMEM;
962 }
963
964 /* L4 Group */
965 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
966 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
967 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
968 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
969 MLX5_SET_CFG(in, start_flow_index, ix);
970 ix += MLX5E_INNER_TTC_GROUP1_SIZE;
971 MLX5_SET_CFG(in, end_flow_index, ix - 1);
972 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
973 if (IS_ERR(ft->g[ft->num_groups]))
974 goto err;
975 ft->num_groups++;
976
977 /* L3 Group */
978 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
979 MLX5_SET_CFG(in, start_flow_index, ix);
980 ix += MLX5E_INNER_TTC_GROUP2_SIZE;
981 MLX5_SET_CFG(in, end_flow_index, ix - 1);
982 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
983 if (IS_ERR(ft->g[ft->num_groups]))
984 goto err;
985 ft->num_groups++;
986
987 /* Any Group */
988 memset(in, 0, inlen);
989 MLX5_SET_CFG(in, start_flow_index, ix);
990 ix += MLX5E_INNER_TTC_GROUP3_SIZE;
991 MLX5_SET_CFG(in, end_flow_index, ix - 1);
992 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
993 if (IS_ERR(ft->g[ft->num_groups]))
994 goto err;
995 ft->num_groups++;
996
997 kvfree(in);
998 return 0;
999
1000 err:
1001 err = PTR_ERR(ft->g[ft->num_groups]);
1002 ft->g[ft->num_groups] = NULL;
1003 kvfree(in);
1004
1005 return err;
1006 }
1007
1008 static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
1009 {
1010 struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
1011 struct mlx5_flow_table_attr ft_attr = {};
1012 struct mlx5e_flow_table *ft = &ttc->ft;
1013 int err;
1014
1015 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1016 return 0;
1017
1018 ft_attr.max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
1019 ft_attr.level = MLX5E_INNER_TTC_FT_LEVEL;
1020 ft_attr.prio = MLX5E_NIC_PRIO;
1021
1022 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1023 if (IS_ERR(ft->t)) {
1024 err = PTR_ERR(ft->t);
1025 ft->t = NULL;
1026 return err;
1027 }
1028
1029 err = mlx5e_create_inner_ttc_table_groups(ttc);
1030 if (err)
1031 goto err;
1032
1033 err = mlx5e_generate_inner_ttc_table_rules(priv);
1034 if (err)
1035 goto err;
1036
1037 return 0;
1038
1039 err:
1040 mlx5e_destroy_flow_table(ft);
1041 return err;
1042 }
1043
1044 static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
1045 {
1046 struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
1047
1048 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1049 return;
1050
1051 mlx5e_cleanup_ttc_rules(ttc);
1052 mlx5e_destroy_flow_table(&ttc->ft);
1053 }
1054
1055 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
1056 {
1057 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
1058
1059 mlx5e_cleanup_ttc_rules(ttc);
1060 mlx5e_destroy_flow_table(&ttc->ft);
1061 }
1062
1063 int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
1064 {
1065 bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1066 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
1067 struct mlx5_flow_table_attr ft_attr = {};
1068 struct mlx5e_flow_table *ft = &ttc->ft;
1069 int err;
1070
1071 ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE;
1072 ft_attr.level = MLX5E_TTC_FT_LEVEL;
1073 ft_attr.prio = MLX5E_NIC_PRIO;
1074
1075 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1076 if (IS_ERR(ft->t)) {
1077 err = PTR_ERR(ft->t);
1078 ft->t = NULL;
1079 return err;
1080 }
1081
1082 err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
1083 if (err)
1084 goto err;
1085
1086 err = mlx5e_generate_ttc_table_rules(priv);
1087 if (err)
1088 goto err;
1089
1090 return 0;
1091 err:
1092 mlx5e_destroy_flow_table(ft);
1093 return err;
1094 }
1095
1096 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
1097 struct mlx5e_l2_rule *ai)
1098 {
1099 if (!IS_ERR_OR_NULL(ai->rule)) {
1100 mlx5_del_flow_rules(ai->rule);
1101 ai->rule = NULL;
1102 }
1103 }
1104
1105 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
1106 struct mlx5e_l2_rule *ai, int type)
1107 {
1108 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
1109 struct mlx5_flow_destination dest;
1110 MLX5_DECLARE_FLOW_ACT(flow_act);
1111 struct mlx5_flow_spec *spec;
1112 int err = 0;
1113 u8 *mc_dmac;
1114 u8 *mv_dmac;
1115
1116 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1117 if (!spec)
1118 return -ENOMEM;
1119
1120 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1121 outer_headers.dmac_47_16);
1122 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1123 outer_headers.dmac_47_16);
1124
1125 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1126 dest.ft = priv->fs.ttc.ft.t;
1127
1128 switch (type) {
1129 case MLX5E_FULLMATCH:
1130 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1131 eth_broadcast_addr(mc_dmac);
1132 ether_addr_copy(mv_dmac, ai->addr);
1133 break;
1134
1135 case MLX5E_ALLMULTI:
1136 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1137 mc_dmac[0] = 0x01;
1138 mv_dmac[0] = 0x01;
1139 break;
1140
1141 case MLX5E_PROMISC:
1142 break;
1143 }
1144
1145 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1146 if (IS_ERR(ai->rule)) {
1147 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
1148 __func__, mv_dmac);
1149 err = PTR_ERR(ai->rule);
1150 ai->rule = NULL;
1151 }
1152
1153 kvfree(spec);
1154
1155 return err;
1156 }
1157
1158 #define MLX5E_NUM_L2_GROUPS 3
1159 #define MLX5E_L2_GROUP1_SIZE BIT(0)
1160 #define MLX5E_L2_GROUP2_SIZE BIT(15)
1161 #define MLX5E_L2_GROUP3_SIZE BIT(0)
1162 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1163 MLX5E_L2_GROUP2_SIZE +\
1164 MLX5E_L2_GROUP3_SIZE)
1165 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1166 {
1167 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1168 struct mlx5e_flow_table *ft = &l2_table->ft;
1169 int ix = 0;
1170 u8 *mc_dmac;
1171 u32 *in;
1172 int err;
1173 u8 *mc;
1174
1175 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1176 if (!ft->g)
1177 return -ENOMEM;
1178 in = kvzalloc(inlen, GFP_KERNEL);
1179 if (!in) {
1180 kfree(ft->g);
1181 return -ENOMEM;
1182 }
1183
1184 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1185 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1186 outer_headers.dmac_47_16);
1187 /* Flow Group for promiscuous */
1188 MLX5_SET_CFG(in, start_flow_index, ix);
1189 ix += MLX5E_L2_GROUP1_SIZE;
1190 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1191 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1192 if (IS_ERR(ft->g[ft->num_groups]))
1193 goto err_destroy_groups;
1194 ft->num_groups++;
1195
1196 /* Flow Group for full match */
1197 eth_broadcast_addr(mc_dmac);
1198 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1199 MLX5_SET_CFG(in, start_flow_index, ix);
1200 ix += MLX5E_L2_GROUP2_SIZE;
1201 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1202 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1203 if (IS_ERR(ft->g[ft->num_groups]))
1204 goto err_destroy_groups;
1205 ft->num_groups++;
1206
1207 /* Flow Group for allmulti */
1208 eth_zero_addr(mc_dmac);
1209 mc_dmac[0] = 0x01;
1210 MLX5_SET_CFG(in, start_flow_index, ix);
1211 ix += MLX5E_L2_GROUP3_SIZE;
1212 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1213 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1214 if (IS_ERR(ft->g[ft->num_groups]))
1215 goto err_destroy_groups;
1216 ft->num_groups++;
1217
1218 kvfree(in);
1219 return 0;
1220
1221 err_destroy_groups:
1222 err = PTR_ERR(ft->g[ft->num_groups]);
1223 ft->g[ft->num_groups] = NULL;
1224 mlx5e_destroy_groups(ft);
1225 kvfree(in);
1226
1227 return err;
1228 }
1229
1230 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1231 {
1232 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
1233 }
1234
1235 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1236 {
1237 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
1238 struct mlx5e_flow_table *ft = &l2_table->ft;
1239 struct mlx5_flow_table_attr ft_attr = {};
1240 int err;
1241
1242 ft->num_groups = 0;
1243
1244 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1245 ft_attr.level = MLX5E_L2_FT_LEVEL;
1246 ft_attr.prio = MLX5E_NIC_PRIO;
1247
1248 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1249 if (IS_ERR(ft->t)) {
1250 err = PTR_ERR(ft->t);
1251 ft->t = NULL;
1252 return err;
1253 }
1254
1255 err = mlx5e_create_l2_table_groups(l2_table);
1256 if (err)
1257 goto err_destroy_flow_table;
1258
1259 return 0;
1260
1261 err_destroy_flow_table:
1262 mlx5_destroy_flow_table(ft->t);
1263 ft->t = NULL;
1264
1265 return err;
1266 }
1267
1268 #define MLX5E_NUM_VLAN_GROUPS 3
1269 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1270 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1271 #define MLX5E_VLAN_GROUP2_SIZE BIT(0)
1272 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1273 MLX5E_VLAN_GROUP1_SIZE +\
1274 MLX5E_VLAN_GROUP2_SIZE)
1275
1276 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1277 int inlen)
1278 {
1279 int err;
1280 int ix = 0;
1281 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1282
1283 memset(in, 0, inlen);
1284 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1285 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1286 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1287 MLX5_SET_CFG(in, start_flow_index, ix);
1288 ix += MLX5E_VLAN_GROUP0_SIZE;
1289 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1290 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1291 if (IS_ERR(ft->g[ft->num_groups]))
1292 goto err_destroy_groups;
1293 ft->num_groups++;
1294
1295 memset(in, 0, inlen);
1296 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1297 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1298 MLX5_SET_CFG(in, start_flow_index, ix);
1299 ix += MLX5E_VLAN_GROUP1_SIZE;
1300 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1301 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1302 if (IS_ERR(ft->g[ft->num_groups]))
1303 goto err_destroy_groups;
1304 ft->num_groups++;
1305
1306 memset(in, 0, inlen);
1307 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1308 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1309 MLX5_SET_CFG(in, start_flow_index, ix);
1310 ix += MLX5E_VLAN_GROUP2_SIZE;
1311 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1312 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1313 if (IS_ERR(ft->g[ft->num_groups]))
1314 goto err_destroy_groups;
1315 ft->num_groups++;
1316
1317 return 0;
1318
1319 err_destroy_groups:
1320 err = PTR_ERR(ft->g[ft->num_groups]);
1321 ft->g[ft->num_groups] = NULL;
1322 mlx5e_destroy_groups(ft);
1323
1324 return err;
1325 }
1326
1327 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1328 {
1329 u32 *in;
1330 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1331 int err;
1332
1333 in = kvzalloc(inlen, GFP_KERNEL);
1334 if (!in)
1335 return -ENOMEM;
1336
1337 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1338
1339 kvfree(in);
1340 return err;
1341 }
1342
1343 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1344 {
1345 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1346 struct mlx5_flow_table_attr ft_attr = {};
1347 int err;
1348
1349 ft->num_groups = 0;
1350
1351 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1352 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1353 ft_attr.prio = MLX5E_NIC_PRIO;
1354
1355 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1356
1357 if (IS_ERR(ft->t)) {
1358 err = PTR_ERR(ft->t);
1359 ft->t = NULL;
1360 return err;
1361 }
1362 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1363 if (!ft->g) {
1364 err = -ENOMEM;
1365 goto err_destroy_vlan_table;
1366 }
1367
1368 err = mlx5e_create_vlan_table_groups(ft);
1369 if (err)
1370 goto err_free_g;
1371
1372 mlx5e_add_vlan_rules(priv);
1373
1374 return 0;
1375
1376 err_free_g:
1377 kfree(ft->g);
1378 err_destroy_vlan_table:
1379 mlx5_destroy_flow_table(ft->t);
1380 ft->t = NULL;
1381
1382 return err;
1383 }
1384
1385 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1386 {
1387 mlx5e_del_vlan_rules(priv);
1388 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1389 }
1390
1391 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1392 {
1393 int err;
1394
1395 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1396 MLX5_FLOW_NAMESPACE_KERNEL);
1397
1398 if (!priv->fs.ns)
1399 return -EOPNOTSUPP;
1400
1401 err = mlx5e_arfs_create_tables(priv);
1402 if (err) {
1403 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1404 err);
1405 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1406 }
1407
1408 err = mlx5e_create_inner_ttc_table(priv);
1409 if (err) {
1410 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1411 err);
1412 goto err_destroy_arfs_tables;
1413 }
1414
1415 err = mlx5e_create_ttc_table(priv);
1416 if (err) {
1417 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1418 err);
1419 goto err_destroy_inner_ttc_table;
1420 }
1421
1422 err = mlx5e_create_l2_table(priv);
1423 if (err) {
1424 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1425 err);
1426 goto err_destroy_ttc_table;
1427 }
1428
1429 err = mlx5e_create_vlan_table(priv);
1430 if (err) {
1431 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1432 err);
1433 goto err_destroy_l2_table;
1434 }
1435
1436 mlx5e_ethtool_init_steering(priv);
1437
1438 return 0;
1439
1440 err_destroy_l2_table:
1441 mlx5e_destroy_l2_table(priv);
1442 err_destroy_ttc_table:
1443 mlx5e_destroy_ttc_table(priv);
1444 err_destroy_inner_ttc_table:
1445 mlx5e_destroy_inner_ttc_table(priv);
1446 err_destroy_arfs_tables:
1447 mlx5e_arfs_destroy_tables(priv);
1448
1449 return err;
1450 }
1451
1452 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1453 {
1454 mlx5e_destroy_vlan_table(priv);
1455 mlx5e_destroy_l2_table(priv);
1456 mlx5e_destroy_ttc_table(priv);
1457 mlx5e_destroy_inner_ttc_table(priv);
1458 mlx5e_arfs_destroy_tables(priv);
1459 mlx5e_ethtool_cleanup_steering(priv);
1460 }