]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
net/mlx5e: Split the main flow steering table
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_fs.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
38 #include "en.h"
39
40 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
41 struct mlx5e_l2_rule *ai, int type);
42 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
43 struct mlx5e_l2_rule *ai);
44
45 /* NIC prio FTS */
46 enum {
47 MLX5E_VLAN_FT_LEVEL = 0,
48 MLX5E_L2_FT_LEVEL,
49 MLX5E_TTC_FT_LEVEL
50 };
51
52 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
53
54 enum {
55 MLX5E_FULLMATCH = 0,
56 MLX5E_ALLMULTI = 1,
57 MLX5E_PROMISC = 2,
58 };
59
60 enum {
61 MLX5E_UC = 0,
62 MLX5E_MC_IPV4 = 1,
63 MLX5E_MC_IPV6 = 2,
64 MLX5E_MC_OTHER = 3,
65 };
66
67 enum {
68 MLX5E_ACTION_NONE = 0,
69 MLX5E_ACTION_ADD = 1,
70 MLX5E_ACTION_DEL = 2,
71 };
72
73 struct mlx5e_l2_hash_node {
74 struct hlist_node hlist;
75 u8 action;
76 struct mlx5e_l2_rule ai;
77 };
78
79 static inline int mlx5e_hash_l2(u8 *addr)
80 {
81 return addr[5];
82 }
83
84 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
85 {
86 struct mlx5e_l2_hash_node *hn;
87 int ix = mlx5e_hash_l2(addr);
88 int found = 0;
89
90 hlist_for_each_entry(hn, &hash[ix], hlist)
91 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
92 found = 1;
93 break;
94 }
95
96 if (found) {
97 hn->action = MLX5E_ACTION_NONE;
98 return;
99 }
100
101 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
102 if (!hn)
103 return;
104
105 ether_addr_copy(hn->ai.addr, addr);
106 hn->action = MLX5E_ACTION_ADD;
107
108 hlist_add_head(&hn->hlist, &hash[ix]);
109 }
110
111 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
112 {
113 hlist_del(&hn->hlist);
114 kfree(hn);
115 }
116
117 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
118 {
119 struct net_device *ndev = priv->netdev;
120 int max_list_size;
121 int list_size;
122 u16 *vlans;
123 int vlan;
124 int err;
125 int i;
126
127 list_size = 0;
128 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
129 list_size++;
130
131 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
132
133 if (list_size > max_list_size) {
134 netdev_warn(ndev,
135 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
136 list_size, max_list_size);
137 list_size = max_list_size;
138 }
139
140 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
141 if (!vlans)
142 return -ENOMEM;
143
144 i = 0;
145 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
146 if (i >= list_size)
147 break;
148 vlans[i++] = vlan;
149 }
150
151 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
152 if (err)
153 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
154 err);
155
156 kfree(vlans);
157 return err;
158 }
159
160 enum mlx5e_vlan_rule_type {
161 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
162 MLX5E_VLAN_RULE_TYPE_ANY_VID,
163 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
164 };
165
166 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
167 enum mlx5e_vlan_rule_type rule_type,
168 u16 vid, u32 *mc, u32 *mv)
169 {
170 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
171 struct mlx5_flow_destination dest;
172 u8 match_criteria_enable = 0;
173 struct mlx5_flow_rule **rule_p;
174 int err = 0;
175
176 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
177 dest.ft = priv->fs.l2.ft.t;
178
179 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
180 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
181
182 switch (rule_type) {
183 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
184 rule_p = &priv->fs.vlan.untagged_rule;
185 break;
186 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
187 rule_p = &priv->fs.vlan.any_vlan_rule;
188 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
189 break;
190 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
191 rule_p = &priv->fs.vlan.active_vlans_rule[vid];
192 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
193 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
194 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
195 break;
196 }
197
198 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
199 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
200 MLX5_FS_DEFAULT_FLOW_TAG,
201 &dest);
202
203 if (IS_ERR(*rule_p)) {
204 err = PTR_ERR(*rule_p);
205 *rule_p = NULL;
206 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
207 }
208
209 return err;
210 }
211
212 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
213 enum mlx5e_vlan_rule_type rule_type, u16 vid)
214 {
215 u32 *match_criteria;
216 u32 *match_value;
217 int err = 0;
218
219 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
220 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
221 if (!match_value || !match_criteria) {
222 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
223 err = -ENOMEM;
224 goto add_vlan_rule_out;
225 }
226
227 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
228 mlx5e_vport_context_update_vlans(priv);
229
230 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria,
231 match_value);
232
233 add_vlan_rule_out:
234 kvfree(match_criteria);
235 kvfree(match_value);
236
237 return err;
238 }
239
240 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
241 enum mlx5e_vlan_rule_type rule_type, u16 vid)
242 {
243 switch (rule_type) {
244 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
245 if (priv->fs.vlan.untagged_rule) {
246 mlx5_del_flow_rule(priv->fs.vlan.untagged_rule);
247 priv->fs.vlan.untagged_rule = NULL;
248 }
249 break;
250 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
251 if (priv->fs.vlan.any_vlan_rule) {
252 mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule);
253 priv->fs.vlan.any_vlan_rule = NULL;
254 }
255 break;
256 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
257 mlx5e_vport_context_update_vlans(priv);
258 if (priv->fs.vlan.active_vlans_rule[vid]) {
259 mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]);
260 priv->fs.vlan.active_vlans_rule[vid] = NULL;
261 }
262 mlx5e_vport_context_update_vlans(priv);
263 break;
264 }
265 }
266
267 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
268 {
269 if (!priv->fs.vlan.filter_disabled)
270 return;
271
272 priv->fs.vlan.filter_disabled = false;
273 if (priv->netdev->flags & IFF_PROMISC)
274 return;
275 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
276 }
277
278 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
279 {
280 if (priv->fs.vlan.filter_disabled)
281 return;
282
283 priv->fs.vlan.filter_disabled = true;
284 if (priv->netdev->flags & IFF_PROMISC)
285 return;
286 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
287 }
288
289 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
290 u16 vid)
291 {
292 struct mlx5e_priv *priv = netdev_priv(dev);
293
294 set_bit(vid, priv->fs.vlan.active_vlans);
295
296 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
297 }
298
299 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
300 u16 vid)
301 {
302 struct mlx5e_priv *priv = netdev_priv(dev);
303
304 clear_bit(vid, priv->fs.vlan.active_vlans);
305
306 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
307
308 return 0;
309 }
310
311 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
312 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
313 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
314
315 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
316 struct mlx5e_l2_hash_node *hn)
317 {
318 switch (hn->action) {
319 case MLX5E_ACTION_ADD:
320 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
321 hn->action = MLX5E_ACTION_NONE;
322 break;
323
324 case MLX5E_ACTION_DEL:
325 mlx5e_del_l2_flow_rule(priv, &hn->ai);
326 mlx5e_del_l2_from_hash(hn);
327 break;
328 }
329 }
330
331 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
332 {
333 struct net_device *netdev = priv->netdev;
334 struct netdev_hw_addr *ha;
335
336 netif_addr_lock_bh(netdev);
337
338 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
339 priv->netdev->dev_addr);
340
341 netdev_for_each_uc_addr(ha, netdev)
342 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
343
344 netdev_for_each_mc_addr(ha, netdev)
345 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
346
347 netif_addr_unlock_bh(netdev);
348 }
349
350 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
351 u8 addr_array[][ETH_ALEN], int size)
352 {
353 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
354 struct net_device *ndev = priv->netdev;
355 struct mlx5e_l2_hash_node *hn;
356 struct hlist_head *addr_list;
357 struct hlist_node *tmp;
358 int i = 0;
359 int hi;
360
361 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
362
363 if (is_uc) /* Make sure our own address is pushed first */
364 ether_addr_copy(addr_array[i++], ndev->dev_addr);
365 else if (priv->fs.l2.broadcast_enabled)
366 ether_addr_copy(addr_array[i++], ndev->broadcast);
367
368 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
369 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
370 continue;
371 if (i >= size)
372 break;
373 ether_addr_copy(addr_array[i++], hn->ai.addr);
374 }
375 }
376
377 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
378 int list_type)
379 {
380 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
381 struct mlx5e_l2_hash_node *hn;
382 u8 (*addr_array)[ETH_ALEN] = NULL;
383 struct hlist_head *addr_list;
384 struct hlist_node *tmp;
385 int max_size;
386 int size;
387 int err;
388 int hi;
389
390 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
391 max_size = is_uc ?
392 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
393 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
394
395 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
396 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
397 size++;
398
399 if (size > max_size) {
400 netdev_warn(priv->netdev,
401 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
402 is_uc ? "UC" : "MC", size, max_size);
403 size = max_size;
404 }
405
406 if (size) {
407 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
408 if (!addr_array) {
409 err = -ENOMEM;
410 goto out;
411 }
412 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
413 }
414
415 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
416 out:
417 if (err)
418 netdev_err(priv->netdev,
419 "Failed to modify vport %s list err(%d)\n",
420 is_uc ? "UC" : "MC", err);
421 kfree(addr_array);
422 }
423
424 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
425 {
426 struct mlx5e_l2_table *ea = &priv->fs.l2;
427
428 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
429 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
430 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
431 ea->allmulti_enabled,
432 ea->promisc_enabled);
433 }
434
435 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
436 {
437 struct mlx5e_l2_hash_node *hn;
438 struct hlist_node *tmp;
439 int i;
440
441 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
442 mlx5e_execute_l2_action(priv, hn);
443
444 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
445 mlx5e_execute_l2_action(priv, hn);
446 }
447
448 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
449 {
450 struct mlx5e_l2_hash_node *hn;
451 struct hlist_node *tmp;
452 int i;
453
454 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
455 hn->action = MLX5E_ACTION_DEL;
456 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
457 hn->action = MLX5E_ACTION_DEL;
458
459 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
460 mlx5e_sync_netdev_addr(priv);
461
462 mlx5e_apply_netdev_addr(priv);
463 }
464
465 void mlx5e_set_rx_mode_work(struct work_struct *work)
466 {
467 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
468 set_rx_mode_work);
469
470 struct mlx5e_l2_table *ea = &priv->fs.l2;
471 struct net_device *ndev = priv->netdev;
472
473 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
474 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
475 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
476 bool broadcast_enabled = rx_mode_enable;
477
478 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
479 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
480 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
481 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
482 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
483 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
484
485 if (enable_promisc) {
486 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
487 if (!priv->fs.vlan.filter_disabled)
488 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
489 0);
490 }
491 if (enable_allmulti)
492 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
493 if (enable_broadcast)
494 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
495
496 mlx5e_handle_netdev_addr(priv);
497
498 if (disable_broadcast)
499 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
500 if (disable_allmulti)
501 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
502 if (disable_promisc) {
503 if (!priv->fs.vlan.filter_disabled)
504 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
505 0);
506 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
507 }
508
509 ea->promisc_enabled = promisc_enabled;
510 ea->allmulti_enabled = allmulti_enabled;
511 ea->broadcast_enabled = broadcast_enabled;
512
513 mlx5e_vport_context_update(priv);
514 }
515
516 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
517 {
518 int i;
519
520 for (i = ft->num_groups - 1; i >= 0; i--) {
521 if (!IS_ERR_OR_NULL(ft->g[i]))
522 mlx5_destroy_flow_group(ft->g[i]);
523 ft->g[i] = NULL;
524 }
525 ft->num_groups = 0;
526 }
527
528 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
529 {
530 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
531 }
532
533 static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
534 {
535 mlx5e_destroy_groups(ft);
536 kfree(ft->g);
537 mlx5_destroy_flow_table(ft->t);
538 ft->t = NULL;
539 }
540
541 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
542 {
543 int i;
544
545 for (i = 0; i < MLX5E_NUM_TT; i++) {
546 if (!IS_ERR_OR_NULL(ttc->rules[i])) {
547 mlx5_del_flow_rule(ttc->rules[i]);
548 ttc->rules[i] = NULL;
549 }
550 }
551 }
552
553 static struct {
554 u16 etype;
555 u8 proto;
556 } ttc_rules[] = {
557 [MLX5E_TT_IPV4_TCP] = {
558 .etype = ETH_P_IP,
559 .proto = IPPROTO_TCP,
560 },
561 [MLX5E_TT_IPV6_TCP] = {
562 .etype = ETH_P_IPV6,
563 .proto = IPPROTO_TCP,
564 },
565 [MLX5E_TT_IPV4_UDP] = {
566 .etype = ETH_P_IP,
567 .proto = IPPROTO_UDP,
568 },
569 [MLX5E_TT_IPV6_UDP] = {
570 .etype = ETH_P_IPV6,
571 .proto = IPPROTO_UDP,
572 },
573 [MLX5E_TT_IPV4_IPSEC_AH] = {
574 .etype = ETH_P_IP,
575 .proto = IPPROTO_AH,
576 },
577 [MLX5E_TT_IPV6_IPSEC_AH] = {
578 .etype = ETH_P_IPV6,
579 .proto = IPPROTO_AH,
580 },
581 [MLX5E_TT_IPV4_IPSEC_ESP] = {
582 .etype = ETH_P_IP,
583 .proto = IPPROTO_ESP,
584 },
585 [MLX5E_TT_IPV6_IPSEC_ESP] = {
586 .etype = ETH_P_IPV6,
587 .proto = IPPROTO_ESP,
588 },
589 [MLX5E_TT_IPV4] = {
590 .etype = ETH_P_IP,
591 .proto = 0,
592 },
593 [MLX5E_TT_IPV6] = {
594 .etype = ETH_P_IPV6,
595 .proto = 0,
596 },
597 [MLX5E_TT_ANY] = {
598 .etype = 0,
599 .proto = 0,
600 },
601 };
602
603 static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
604 struct mlx5_flow_table *ft,
605 struct mlx5_flow_destination *dest,
606 u16 etype,
607 u8 proto)
608 {
609 struct mlx5_flow_rule *rule;
610 u8 match_criteria_enable = 0;
611 u32 *match_criteria;
612 u32 *match_value;
613 int err = 0;
614
615 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
616 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
617 if (!match_value || !match_criteria) {
618 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
619 err = -ENOMEM;
620 goto out;
621 }
622
623 if (proto) {
624 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
625 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol);
626 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto);
627 }
628 if (etype) {
629 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
630 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype);
631 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype);
632 }
633
634 rule = mlx5_add_flow_rule(ft, match_criteria_enable,
635 match_criteria, match_value,
636 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
637 MLX5_FS_DEFAULT_FLOW_TAG,
638 dest);
639 if (IS_ERR(rule)) {
640 err = PTR_ERR(rule);
641 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
642 }
643 out:
644 kvfree(match_criteria);
645 kvfree(match_value);
646 return err ? ERR_PTR(err) : rule;
647 }
648
649 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
650 {
651 struct mlx5_flow_destination dest;
652 struct mlx5e_ttc_table *ttc;
653 struct mlx5_flow_rule **rules;
654 struct mlx5_flow_table *ft;
655 int tt;
656 int err;
657
658 ttc = &priv->fs.ttc;
659 ft = ttc->ft.t;
660 rules = ttc->rules;
661
662 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
663 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
664 if (tt == MLX5E_TT_ANY)
665 dest.tir_num = priv->direct_tir[0].tirn;
666 else
667 dest.tir_num = priv->indir_tirn[tt];
668 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
669 ttc_rules[tt].etype,
670 ttc_rules[tt].proto);
671 if (IS_ERR(rules[tt]))
672 goto del_rules;
673 }
674
675 return 0;
676
677 del_rules:
678 err = PTR_ERR(rules[tt]);
679 rules[tt] = NULL;
680 mlx5e_cleanup_ttc_rules(ttc);
681 return err;
682 }
683
684 #define MLX5E_TTC_NUM_GROUPS 3
685 #define MLX5E_TTC_GROUP1_SIZE BIT(3)
686 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
687 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
688 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
689 MLX5E_TTC_GROUP2_SIZE +\
690 MLX5E_TTC_GROUP3_SIZE)
691 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
692 {
693 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
694 struct mlx5e_flow_table *ft = &ttc->ft;
695 int ix = 0;
696 u32 *in;
697 int err;
698 u8 *mc;
699
700 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
701 sizeof(*ft->g), GFP_KERNEL);
702 if (!ft->g)
703 return -ENOMEM;
704 in = mlx5_vzalloc(inlen);
705 if (!in) {
706 kfree(ft->g);
707 return -ENOMEM;
708 }
709
710 /* L4 Group */
711 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
712 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
713 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
714 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
715 MLX5_SET_CFG(in, start_flow_index, ix);
716 ix += MLX5E_TTC_GROUP1_SIZE;
717 MLX5_SET_CFG(in, end_flow_index, ix - 1);
718 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
719 if (IS_ERR(ft->g[ft->num_groups]))
720 goto err;
721 ft->num_groups++;
722
723 /* L3 Group */
724 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
725 MLX5_SET_CFG(in, start_flow_index, ix);
726 ix += MLX5E_TTC_GROUP2_SIZE;
727 MLX5_SET_CFG(in, end_flow_index, ix - 1);
728 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
729 if (IS_ERR(ft->g[ft->num_groups]))
730 goto err;
731 ft->num_groups++;
732
733 /* Any Group */
734 memset(in, 0, inlen);
735 MLX5_SET_CFG(in, start_flow_index, ix);
736 ix += MLX5E_TTC_GROUP3_SIZE;
737 MLX5_SET_CFG(in, end_flow_index, ix - 1);
738 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
739 if (IS_ERR(ft->g[ft->num_groups]))
740 goto err;
741 ft->num_groups++;
742
743 kvfree(in);
744 return 0;
745
746 err:
747 err = PTR_ERR(ft->g[ft->num_groups]);
748 ft->g[ft->num_groups] = NULL;
749 kvfree(in);
750
751 return err;
752 }
753
754 static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
755 {
756 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
757
758 mlx5e_cleanup_ttc_rules(ttc);
759 mlx5e_destroy_flow_table(&ttc->ft);
760 }
761
762 static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
763 {
764 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
765 struct mlx5e_flow_table *ft = &ttc->ft;
766 int err;
767
768 ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
769 MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL);
770 if (IS_ERR(ft->t)) {
771 err = PTR_ERR(ft->t);
772 ft->t = NULL;
773 return err;
774 }
775
776 err = mlx5e_create_ttc_table_groups(ttc);
777 if (err)
778 goto err;
779
780 err = mlx5e_generate_ttc_table_rules(priv);
781 if (err)
782 goto err;
783
784 return 0;
785 err:
786 mlx5e_destroy_flow_table(ft);
787 return err;
788 }
789
790 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
791 struct mlx5e_l2_rule *ai)
792 {
793 if (!IS_ERR_OR_NULL(ai->rule)) {
794 mlx5_del_flow_rule(ai->rule);
795 ai->rule = NULL;
796 }
797 }
798
799 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
800 struct mlx5e_l2_rule *ai, int type)
801 {
802 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
803 struct mlx5_flow_destination dest;
804 u8 match_criteria_enable = 0;
805 u32 *match_criteria;
806 u32 *match_value;
807 int err = 0;
808 u8 *mc_dmac;
809 u8 *mv_dmac;
810
811 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
812 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
813 if (!match_value || !match_criteria) {
814 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
815 err = -ENOMEM;
816 goto add_l2_rule_out;
817 }
818
819 mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
820 outer_headers.dmac_47_16);
821 mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value,
822 outer_headers.dmac_47_16);
823
824 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
825 dest.ft = priv->fs.ttc.ft.t;
826
827 switch (type) {
828 case MLX5E_FULLMATCH:
829 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
830 eth_broadcast_addr(mc_dmac);
831 ether_addr_copy(mv_dmac, ai->addr);
832 break;
833
834 case MLX5E_ALLMULTI:
835 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
836 mc_dmac[0] = 0x01;
837 mv_dmac[0] = 0x01;
838 break;
839
840 case MLX5E_PROMISC:
841 break;
842 }
843
844 ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
845 match_value,
846 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
847 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
848 if (IS_ERR(ai->rule)) {
849 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
850 __func__, mv_dmac);
851 err = PTR_ERR(ai->rule);
852 ai->rule = NULL;
853 }
854
855 add_l2_rule_out:
856 kvfree(match_criteria);
857 kvfree(match_value);
858
859 return err;
860 }
861
862 #define MLX5E_NUM_L2_GROUPS 3
863 #define MLX5E_L2_GROUP1_SIZE BIT(0)
864 #define MLX5E_L2_GROUP2_SIZE BIT(15)
865 #define MLX5E_L2_GROUP3_SIZE BIT(0)
866 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
867 MLX5E_L2_GROUP2_SIZE +\
868 MLX5E_L2_GROUP3_SIZE)
869 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
870 {
871 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
872 struct mlx5e_flow_table *ft = &l2_table->ft;
873 int ix = 0;
874 u8 *mc_dmac;
875 u32 *in;
876 int err;
877 u8 *mc;
878
879 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
880 if (!ft->g)
881 return -ENOMEM;
882 in = mlx5_vzalloc(inlen);
883 if (!in) {
884 kfree(ft->g);
885 return -ENOMEM;
886 }
887
888 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
889 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
890 outer_headers.dmac_47_16);
891 /* Flow Group for promiscuous */
892 MLX5_SET_CFG(in, start_flow_index, ix);
893 ix += MLX5E_L2_GROUP1_SIZE;
894 MLX5_SET_CFG(in, end_flow_index, ix - 1);
895 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
896 if (IS_ERR(ft->g[ft->num_groups]))
897 goto err_destroy_groups;
898 ft->num_groups++;
899
900 /* Flow Group for full match */
901 eth_broadcast_addr(mc_dmac);
902 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
903 MLX5_SET_CFG(in, start_flow_index, ix);
904 ix += MLX5E_L2_GROUP2_SIZE;
905 MLX5_SET_CFG(in, end_flow_index, ix - 1);
906 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
907 if (IS_ERR(ft->g[ft->num_groups]))
908 goto err_destroy_groups;
909 ft->num_groups++;
910
911 /* Flow Group for allmulti */
912 eth_zero_addr(mc_dmac);
913 mc_dmac[0] = 0x01;
914 MLX5_SET_CFG(in, start_flow_index, ix);
915 ix += MLX5E_L2_GROUP3_SIZE;
916 MLX5_SET_CFG(in, end_flow_index, ix - 1);
917 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
918 if (IS_ERR(ft->g[ft->num_groups]))
919 goto err_destroy_groups;
920 ft->num_groups++;
921
922 kvfree(in);
923 return 0;
924
925 err_destroy_groups:
926 err = PTR_ERR(ft->g[ft->num_groups]);
927 ft->g[ft->num_groups] = NULL;
928 mlx5e_destroy_groups(ft);
929 kvfree(in);
930
931 return err;
932 }
933
934 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
935 {
936 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
937 }
938
939 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
940 {
941 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
942 struct mlx5e_flow_table *ft = &l2_table->ft;
943 int err;
944
945 ft->num_groups = 0;
946 ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
947 MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL);
948
949 if (IS_ERR(ft->t)) {
950 err = PTR_ERR(ft->t);
951 ft->t = NULL;
952 return err;
953 }
954
955 err = mlx5e_create_l2_table_groups(l2_table);
956 if (err)
957 goto err_destroy_flow_table;
958
959 return 0;
960
961 err_destroy_flow_table:
962 mlx5_destroy_flow_table(ft->t);
963 ft->t = NULL;
964
965 return err;
966 }
967
968 #define MLX5E_NUM_VLAN_GROUPS 2
969 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
970 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
971 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
972 MLX5E_VLAN_GROUP1_SIZE)
973
974 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
975 int inlen)
976 {
977 int err;
978 int ix = 0;
979 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
980
981 memset(in, 0, inlen);
982 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
983 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
984 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
985 MLX5_SET_CFG(in, start_flow_index, ix);
986 ix += MLX5E_VLAN_GROUP0_SIZE;
987 MLX5_SET_CFG(in, end_flow_index, ix - 1);
988 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
989 if (IS_ERR(ft->g[ft->num_groups]))
990 goto err_destroy_groups;
991 ft->num_groups++;
992
993 memset(in, 0, inlen);
994 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
995 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
996 MLX5_SET_CFG(in, start_flow_index, ix);
997 ix += MLX5E_VLAN_GROUP1_SIZE;
998 MLX5_SET_CFG(in, end_flow_index, ix - 1);
999 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1000 if (IS_ERR(ft->g[ft->num_groups]))
1001 goto err_destroy_groups;
1002 ft->num_groups++;
1003
1004 return 0;
1005
1006 err_destroy_groups:
1007 err = PTR_ERR(ft->g[ft->num_groups]);
1008 ft->g[ft->num_groups] = NULL;
1009 mlx5e_destroy_groups(ft);
1010
1011 return err;
1012 }
1013
1014 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1015 {
1016 u32 *in;
1017 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1018 int err;
1019
1020 in = mlx5_vzalloc(inlen);
1021 if (!in)
1022 return -ENOMEM;
1023
1024 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1025
1026 kvfree(in);
1027 return err;
1028 }
1029
1030 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1031 {
1032 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1033 int err;
1034
1035 ft->num_groups = 0;
1036 ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
1037 MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL);
1038
1039 if (IS_ERR(ft->t)) {
1040 err = PTR_ERR(ft->t);
1041 ft->t = NULL;
1042 return err;
1043 }
1044 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1045 if (!ft->g) {
1046 err = -ENOMEM;
1047 goto err_destroy_vlan_table;
1048 }
1049
1050 err = mlx5e_create_vlan_table_groups(ft);
1051 if (err)
1052 goto err_free_g;
1053
1054 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
1055 if (err)
1056 goto err_destroy_vlan_flow_groups;
1057
1058 return 0;
1059
1060 err_destroy_vlan_flow_groups:
1061 mlx5e_destroy_groups(ft);
1062 err_free_g:
1063 kfree(ft->g);
1064 err_destroy_vlan_table:
1065 mlx5_destroy_flow_table(ft->t);
1066 ft->t = NULL;
1067
1068 return err;
1069 }
1070
1071 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1072 {
1073 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1074 }
1075
1076 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1077 {
1078 int err;
1079
1080 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1081 MLX5_FLOW_NAMESPACE_KERNEL);
1082
1083 if (!priv->fs.ns)
1084 return -EINVAL;
1085
1086 err = mlx5e_create_ttc_table(priv);
1087 if (err) {
1088 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1089 err);
1090 return err;
1091 }
1092
1093 err = mlx5e_create_l2_table(priv);
1094 if (err) {
1095 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1096 err);
1097 goto err_destroy_ttc_table;
1098 }
1099
1100 err = mlx5e_create_vlan_table(priv);
1101 if (err) {
1102 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1103 err);
1104 goto err_destroy_l2_table;
1105 }
1106
1107 return 0;
1108
1109 err_destroy_l2_table:
1110 mlx5e_destroy_l2_table(priv);
1111 err_destroy_ttc_table:
1112 mlx5e_destroy_ttc_table(priv);
1113
1114 return err;
1115 }
1116
1117 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1118 {
1119 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
1120 mlx5e_destroy_vlan_table(priv);
1121 mlx5e_destroy_l2_table(priv);
1122 mlx5e_destroy_ttc_table(priv);
1123 }