2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
41 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv
*priv
,
42 struct mlx5e_l2_rule
*ai
, int type
);
43 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv
*priv
,
44 struct mlx5e_l2_rule
*ai
);
60 MLX5E_ACTION_NONE
= 0,
65 struct mlx5e_l2_hash_node
{
66 struct hlist_node hlist
;
68 struct mlx5e_l2_rule ai
;
72 static inline int mlx5e_hash_l2(u8
*addr
)
77 static void mlx5e_add_l2_to_hash(struct hlist_head
*hash
, u8
*addr
)
79 struct mlx5e_l2_hash_node
*hn
;
80 int ix
= mlx5e_hash_l2(addr
);
83 hlist_for_each_entry(hn
, &hash
[ix
], hlist
)
84 if (ether_addr_equal_64bits(hn
->ai
.addr
, addr
)) {
90 hn
->action
= MLX5E_ACTION_NONE
;
94 hn
= kzalloc(sizeof(*hn
), GFP_ATOMIC
);
98 ether_addr_copy(hn
->ai
.addr
, addr
);
99 hn
->action
= MLX5E_ACTION_ADD
;
101 hlist_add_head(&hn
->hlist
, &hash
[ix
]);
104 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node
*hn
)
106 hlist_del(&hn
->hlist
);
110 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv
*priv
)
112 struct net_device
*ndev
= priv
->netdev
;
121 for_each_set_bit(vlan
, priv
->fs
.vlan
.active_vlans
, VLAN_N_VID
)
124 max_list_size
= 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_vlan_list
);
126 if (list_size
> max_list_size
) {
128 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
129 list_size
, max_list_size
);
130 list_size
= max_list_size
;
133 vlans
= kcalloc(list_size
, sizeof(*vlans
), GFP_KERNEL
);
138 for_each_set_bit(vlan
, priv
->fs
.vlan
.active_vlans
, VLAN_N_VID
) {
144 err
= mlx5_modify_nic_vport_vlans(priv
->mdev
, vlans
, list_size
);
146 netdev_err(ndev
, "Failed to modify vport vlans list err(%d)\n",
153 enum mlx5e_vlan_rule_type
{
154 MLX5E_VLAN_RULE_TYPE_UNTAGGED
,
155 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
,
156 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID
,
157 MLX5E_VLAN_RULE_TYPE_MATCH_VID
,
160 static int __mlx5e_add_vlan_rule(struct mlx5e_priv
*priv
,
161 enum mlx5e_vlan_rule_type rule_type
,
162 u16 vid
, struct mlx5_flow_spec
*spec
)
164 struct mlx5_flow_table
*ft
= priv
->fs
.vlan
.ft
.t
;
165 struct mlx5_flow_destination dest
;
166 struct mlx5_flow_handle
**rule_p
;
167 MLX5_DECLARE_FLOW_ACT(flow_act
);
170 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
171 dest
.ft
= priv
->fs
.l2
.ft
.t
;
173 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
176 case MLX5E_VLAN_RULE_TYPE_UNTAGGED
:
177 rule_p
= &priv
->fs
.vlan
.untagged_rule
;
178 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
179 outer_headers
.cvlan_tag
);
181 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
:
182 rule_p
= &priv
->fs
.vlan
.any_cvlan_rule
;
183 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
184 outer_headers
.cvlan_tag
);
185 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.cvlan_tag
, 1);
187 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID
:
188 rule_p
= &priv
->fs
.vlan
.any_svlan_rule
;
189 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
190 outer_headers
.svlan_tag
);
191 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.svlan_tag
, 1);
193 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
194 rule_p
= &priv
->fs
.vlan
.active_vlans_rule
[vid
];
195 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
196 outer_headers
.cvlan_tag
);
197 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.cvlan_tag
, 1);
198 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
199 outer_headers
.first_vid
);
200 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.first_vid
,
205 *rule_p
= mlx5_add_flow_rules(ft
, spec
, &flow_act
, &dest
, 1);
207 if (IS_ERR(*rule_p
)) {
208 err
= PTR_ERR(*rule_p
);
210 netdev_err(priv
->netdev
, "%s: add rule failed\n", __func__
);
216 static int mlx5e_add_vlan_rule(struct mlx5e_priv
*priv
,
217 enum mlx5e_vlan_rule_type rule_type
, u16 vid
)
219 struct mlx5_flow_spec
*spec
;
222 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
226 if (rule_type
== MLX5E_VLAN_RULE_TYPE_MATCH_VID
)
227 mlx5e_vport_context_update_vlans(priv
);
229 err
= __mlx5e_add_vlan_rule(priv
, rule_type
, vid
, spec
);
236 static void mlx5e_del_vlan_rule(struct mlx5e_priv
*priv
,
237 enum mlx5e_vlan_rule_type rule_type
, u16 vid
)
240 case MLX5E_VLAN_RULE_TYPE_UNTAGGED
:
241 if (priv
->fs
.vlan
.untagged_rule
) {
242 mlx5_del_flow_rules(priv
->fs
.vlan
.untagged_rule
);
243 priv
->fs
.vlan
.untagged_rule
= NULL
;
246 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
:
247 if (priv
->fs
.vlan
.any_cvlan_rule
) {
248 mlx5_del_flow_rules(priv
->fs
.vlan
.any_cvlan_rule
);
249 priv
->fs
.vlan
.any_cvlan_rule
= NULL
;
252 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID
:
253 if (priv
->fs
.vlan
.any_svlan_rule
) {
254 mlx5_del_flow_rules(priv
->fs
.vlan
.any_svlan_rule
);
255 priv
->fs
.vlan
.any_svlan_rule
= NULL
;
258 case MLX5E_VLAN_RULE_TYPE_MATCH_VID
:
259 mlx5e_vport_context_update_vlans(priv
);
260 if (priv
->fs
.vlan
.active_vlans_rule
[vid
]) {
261 mlx5_del_flow_rules(priv
->fs
.vlan
.active_vlans_rule
[vid
]);
262 priv
->fs
.vlan
.active_vlans_rule
[vid
] = NULL
;
264 mlx5e_vport_context_update_vlans(priv
);
269 static void mlx5e_del_any_vid_rules(struct mlx5e_priv
*priv
)
271 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
, 0);
272 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID
, 0);
275 static int mlx5e_add_any_vid_rules(struct mlx5e_priv
*priv
)
279 err
= mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
, 0);
283 return mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID
, 0);
286 void mlx5e_enable_vlan_filter(struct mlx5e_priv
*priv
)
288 if (!priv
->fs
.vlan
.filter_disabled
)
291 priv
->fs
.vlan
.filter_disabled
= false;
292 if (priv
->netdev
->flags
& IFF_PROMISC
)
294 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
, 0);
297 void mlx5e_disable_vlan_filter(struct mlx5e_priv
*priv
)
299 if (priv
->fs
.vlan
.filter_disabled
)
302 priv
->fs
.vlan
.filter_disabled
= true;
303 if (priv
->netdev
->flags
& IFF_PROMISC
)
305 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
, 0);
308 int mlx5e_vlan_rx_add_vid(struct net_device
*dev
, __always_unused __be16 proto
,
311 struct mlx5e_priv
*priv
= netdev_priv(dev
);
313 set_bit(vid
, priv
->fs
.vlan
.active_vlans
);
315 return mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, vid
);
318 int mlx5e_vlan_rx_kill_vid(struct net_device
*dev
, __always_unused __be16 proto
,
321 struct mlx5e_priv
*priv
= netdev_priv(dev
);
323 clear_bit(vid
, priv
->fs
.vlan
.active_vlans
);
325 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, vid
);
330 static void mlx5e_add_vlan_rules(struct mlx5e_priv
*priv
)
334 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_UNTAGGED
, 0);
336 for_each_set_bit(i
, priv
->fs
.vlan
.active_vlans
, VLAN_N_VID
) {
337 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, i
);
340 if (priv
->fs
.vlan
.filter_disabled
&&
341 !(priv
->netdev
->flags
& IFF_PROMISC
))
342 mlx5e_add_any_vid_rules(priv
);
345 static void mlx5e_del_vlan_rules(struct mlx5e_priv
*priv
)
349 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_UNTAGGED
, 0);
351 for_each_set_bit(i
, priv
->fs
.vlan
.active_vlans
, VLAN_N_VID
) {
352 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, i
);
355 if (priv
->fs
.vlan
.filter_disabled
&&
356 !(priv
->netdev
->flags
& IFF_PROMISC
))
357 mlx5e_del_any_vid_rules(priv
);
360 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
361 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
362 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
364 static void mlx5e_execute_l2_action(struct mlx5e_priv
*priv
,
365 struct mlx5e_l2_hash_node
*hn
)
367 u8 action
= hn
->action
;
371 case MLX5E_ACTION_ADD
:
372 mlx5e_add_l2_flow_rule(priv
, &hn
->ai
, MLX5E_FULLMATCH
);
373 if (!is_multicast_ether_addr(hn
->ai
.addr
)) {
374 l2_err
= mlx5_mpfs_add_mac(priv
->mdev
, hn
->ai
.addr
);
377 hn
->action
= MLX5E_ACTION_NONE
;
380 case MLX5E_ACTION_DEL
:
381 if (!is_multicast_ether_addr(hn
->ai
.addr
) && hn
->mpfs
)
382 l2_err
= mlx5_mpfs_del_mac(priv
->mdev
, hn
->ai
.addr
);
383 mlx5e_del_l2_flow_rule(priv
, &hn
->ai
);
384 mlx5e_del_l2_from_hash(hn
);
389 netdev_warn(priv
->netdev
, "MPFS, failed to %s mac %pM, err(%d)\n",
390 action
== MLX5E_ACTION_ADD
? "add" : "del", hn
->ai
.addr
, l2_err
);
393 static void mlx5e_sync_netdev_addr(struct mlx5e_priv
*priv
)
395 struct net_device
*netdev
= priv
->netdev
;
396 struct netdev_hw_addr
*ha
;
398 netif_addr_lock_bh(netdev
);
400 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_uc
,
401 priv
->netdev
->dev_addr
);
403 netdev_for_each_uc_addr(ha
, netdev
)
404 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_uc
, ha
->addr
);
406 netdev_for_each_mc_addr(ha
, netdev
)
407 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_mc
, ha
->addr
);
409 netif_addr_unlock_bh(netdev
);
412 static void mlx5e_fill_addr_array(struct mlx5e_priv
*priv
, int list_type
,
413 u8 addr_array
[][ETH_ALEN
], int size
)
415 bool is_uc
= (list_type
== MLX5_NVPRT_LIST_TYPE_UC
);
416 struct net_device
*ndev
= priv
->netdev
;
417 struct mlx5e_l2_hash_node
*hn
;
418 struct hlist_head
*addr_list
;
419 struct hlist_node
*tmp
;
423 addr_list
= is_uc
? priv
->fs
.l2
.netdev_uc
: priv
->fs
.l2
.netdev_mc
;
425 if (is_uc
) /* Make sure our own address is pushed first */
426 ether_addr_copy(addr_array
[i
++], ndev
->dev_addr
);
427 else if (priv
->fs
.l2
.broadcast_enabled
)
428 ether_addr_copy(addr_array
[i
++], ndev
->broadcast
);
430 mlx5e_for_each_hash_node(hn
, tmp
, addr_list
, hi
) {
431 if (ether_addr_equal(ndev
->dev_addr
, hn
->ai
.addr
))
435 ether_addr_copy(addr_array
[i
++], hn
->ai
.addr
);
439 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv
*priv
,
442 bool is_uc
= (list_type
== MLX5_NVPRT_LIST_TYPE_UC
);
443 struct mlx5e_l2_hash_node
*hn
;
444 u8 (*addr_array
)[ETH_ALEN
] = NULL
;
445 struct hlist_head
*addr_list
;
446 struct hlist_node
*tmp
;
452 size
= is_uc
? 0 : (priv
->fs
.l2
.broadcast_enabled
? 1 : 0);
454 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_current_uc_list
) :
455 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_current_mc_list
);
457 addr_list
= is_uc
? priv
->fs
.l2
.netdev_uc
: priv
->fs
.l2
.netdev_mc
;
458 mlx5e_for_each_hash_node(hn
, tmp
, addr_list
, hi
)
461 if (size
> max_size
) {
462 netdev_warn(priv
->netdev
,
463 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
464 is_uc
? "UC" : "MC", size
, max_size
);
469 addr_array
= kcalloc(size
, ETH_ALEN
, GFP_KERNEL
);
474 mlx5e_fill_addr_array(priv
, list_type
, addr_array
, size
);
477 err
= mlx5_modify_nic_vport_mac_list(priv
->mdev
, list_type
, addr_array
, size
);
480 netdev_err(priv
->netdev
,
481 "Failed to modify vport %s list err(%d)\n",
482 is_uc
? "UC" : "MC", err
);
486 static void mlx5e_vport_context_update(struct mlx5e_priv
*priv
)
488 struct mlx5e_l2_table
*ea
= &priv
->fs
.l2
;
490 mlx5e_vport_context_update_addr_list(priv
, MLX5_NVPRT_LIST_TYPE_UC
);
491 mlx5e_vport_context_update_addr_list(priv
, MLX5_NVPRT_LIST_TYPE_MC
);
492 mlx5_modify_nic_vport_promisc(priv
->mdev
, 0,
493 ea
->allmulti_enabled
,
494 ea
->promisc_enabled
);
497 static void mlx5e_apply_netdev_addr(struct mlx5e_priv
*priv
)
499 struct mlx5e_l2_hash_node
*hn
;
500 struct hlist_node
*tmp
;
503 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_uc
, i
)
504 mlx5e_execute_l2_action(priv
, hn
);
506 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_mc
, i
)
507 mlx5e_execute_l2_action(priv
, hn
);
510 static void mlx5e_handle_netdev_addr(struct mlx5e_priv
*priv
)
512 struct mlx5e_l2_hash_node
*hn
;
513 struct hlist_node
*tmp
;
516 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_uc
, i
)
517 hn
->action
= MLX5E_ACTION_DEL
;
518 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_mc
, i
)
519 hn
->action
= MLX5E_ACTION_DEL
;
521 if (!test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
))
522 mlx5e_sync_netdev_addr(priv
);
524 mlx5e_apply_netdev_addr(priv
);
527 void mlx5e_set_rx_mode_work(struct work_struct
*work
)
529 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
532 struct mlx5e_l2_table
*ea
= &priv
->fs
.l2
;
533 struct net_device
*ndev
= priv
->netdev
;
535 bool rx_mode_enable
= !test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
536 bool promisc_enabled
= rx_mode_enable
&& (ndev
->flags
& IFF_PROMISC
);
537 bool allmulti_enabled
= rx_mode_enable
&& (ndev
->flags
& IFF_ALLMULTI
);
538 bool broadcast_enabled
= rx_mode_enable
;
540 bool enable_promisc
= !ea
->promisc_enabled
&& promisc_enabled
;
541 bool disable_promisc
= ea
->promisc_enabled
&& !promisc_enabled
;
542 bool enable_allmulti
= !ea
->allmulti_enabled
&& allmulti_enabled
;
543 bool disable_allmulti
= ea
->allmulti_enabled
&& !allmulti_enabled
;
544 bool enable_broadcast
= !ea
->broadcast_enabled
&& broadcast_enabled
;
545 bool disable_broadcast
= ea
->broadcast_enabled
&& !broadcast_enabled
;
547 if (enable_promisc
) {
548 mlx5e_add_l2_flow_rule(priv
, &ea
->promisc
, MLX5E_PROMISC
);
549 if (!priv
->fs
.vlan
.filter_disabled
)
550 mlx5e_add_any_vid_rules(priv
);
553 mlx5e_add_l2_flow_rule(priv
, &ea
->allmulti
, MLX5E_ALLMULTI
);
554 if (enable_broadcast
)
555 mlx5e_add_l2_flow_rule(priv
, &ea
->broadcast
, MLX5E_FULLMATCH
);
557 mlx5e_handle_netdev_addr(priv
);
559 if (disable_broadcast
)
560 mlx5e_del_l2_flow_rule(priv
, &ea
->broadcast
);
561 if (disable_allmulti
)
562 mlx5e_del_l2_flow_rule(priv
, &ea
->allmulti
);
563 if (disable_promisc
) {
564 if (!priv
->fs
.vlan
.filter_disabled
)
565 mlx5e_del_any_vid_rules(priv
);
566 mlx5e_del_l2_flow_rule(priv
, &ea
->promisc
);
569 ea
->promisc_enabled
= promisc_enabled
;
570 ea
->allmulti_enabled
= allmulti_enabled
;
571 ea
->broadcast_enabled
= broadcast_enabled
;
573 mlx5e_vport_context_update(priv
);
576 static void mlx5e_destroy_groups(struct mlx5e_flow_table
*ft
)
580 for (i
= ft
->num_groups
- 1; i
>= 0; i
--) {
581 if (!IS_ERR_OR_NULL(ft
->g
[i
]))
582 mlx5_destroy_flow_group(ft
->g
[i
]);
588 void mlx5e_init_l2_addr(struct mlx5e_priv
*priv
)
590 ether_addr_copy(priv
->fs
.l2
.broadcast
.addr
, priv
->netdev
->broadcast
);
593 void mlx5e_destroy_flow_table(struct mlx5e_flow_table
*ft
)
595 mlx5e_destroy_groups(ft
);
597 mlx5_destroy_flow_table(ft
->t
);
601 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table
*ttc
)
605 for (i
= 0; i
< MLX5E_NUM_TT
; i
++) {
606 if (!IS_ERR_OR_NULL(ttc
->rules
[i
])) {
607 mlx5_del_flow_rules(ttc
->rules
[i
]);
608 ttc
->rules
[i
] = NULL
;
612 for (i
= 0; i
< MLX5E_NUM_TUNNEL_TT
; i
++) {
613 if (!IS_ERR_OR_NULL(ttc
->tunnel_rules
[i
])) {
614 mlx5_del_flow_rules(ttc
->tunnel_rules
[i
]);
615 ttc
->tunnel_rules
[i
] = NULL
;
620 struct mlx5e_etype_proto
{
625 static struct mlx5e_etype_proto ttc_rules
[] = {
626 [MLX5E_TT_IPV4_TCP
] = {
628 .proto
= IPPROTO_TCP
,
630 [MLX5E_TT_IPV6_TCP
] = {
632 .proto
= IPPROTO_TCP
,
634 [MLX5E_TT_IPV4_UDP
] = {
636 .proto
= IPPROTO_UDP
,
638 [MLX5E_TT_IPV6_UDP
] = {
640 .proto
= IPPROTO_UDP
,
642 [MLX5E_TT_IPV4_IPSEC_AH
] = {
646 [MLX5E_TT_IPV6_IPSEC_AH
] = {
650 [MLX5E_TT_IPV4_IPSEC_ESP
] = {
652 .proto
= IPPROTO_ESP
,
654 [MLX5E_TT_IPV6_IPSEC_ESP
] = {
656 .proto
= IPPROTO_ESP
,
672 static struct mlx5e_etype_proto ttc_tunnel_rules
[] = {
673 [MLX5E_TT_IPV4_GRE
] = {
675 .proto
= IPPROTO_GRE
,
677 [MLX5E_TT_IPV6_GRE
] = {
679 .proto
= IPPROTO_GRE
,
683 static u8
mlx5e_etype_to_ipv(u16 ethertype
)
685 if (ethertype
== ETH_P_IP
)
688 if (ethertype
== ETH_P_IPV6
)
694 static struct mlx5_flow_handle
*
695 mlx5e_generate_ttc_rule(struct mlx5e_priv
*priv
,
696 struct mlx5_flow_table
*ft
,
697 struct mlx5_flow_destination
*dest
,
701 int match_ipv_outer
= MLX5_CAP_FLOWTABLE_NIC_RX(priv
->mdev
, ft_field_support
.outer_ip_version
);
702 MLX5_DECLARE_FLOW_ACT(flow_act
);
703 struct mlx5_flow_handle
*rule
;
704 struct mlx5_flow_spec
*spec
;
708 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
710 return ERR_PTR(-ENOMEM
);
713 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
714 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.ip_protocol
);
715 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.ip_protocol
, proto
);
718 ipv
= mlx5e_etype_to_ipv(etype
);
719 if (match_ipv_outer
&& ipv
) {
720 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
721 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.ip_version
);
722 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.ip_version
, ipv
);
724 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
725 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.ethertype
);
726 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.ethertype
, etype
);
729 rule
= mlx5_add_flow_rules(ft
, spec
, &flow_act
, dest
, 1);
732 netdev_err(priv
->netdev
, "%s: add rule failed\n", __func__
);
736 return err
? ERR_PTR(err
) : rule
;
739 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv
*priv
)
741 struct mlx5_flow_destination dest
;
742 struct mlx5e_ttc_table
*ttc
;
743 struct mlx5_flow_handle
**rules
;
744 struct mlx5_flow_table
*ft
;
752 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
753 for (tt
= 0; tt
< MLX5E_NUM_TT
; tt
++) {
754 if (tt
== MLX5E_TT_ANY
)
755 dest
.tir_num
= priv
->direct_tir
[0].tirn
;
757 dest
.tir_num
= priv
->indir_tir
[tt
].tirn
;
758 rules
[tt
] = mlx5e_generate_ttc_rule(priv
, ft
, &dest
,
760 ttc_rules
[tt
].proto
);
761 if (IS_ERR(rules
[tt
]))
765 if (!mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
768 rules
= ttc
->tunnel_rules
;
769 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
770 dest
.ft
= priv
->fs
.inner_ttc
.ft
.t
;
771 for (tt
= 0; tt
< MLX5E_NUM_TUNNEL_TT
; tt
++) {
772 rules
[tt
] = mlx5e_generate_ttc_rule(priv
, ft
, &dest
,
773 ttc_tunnel_rules
[tt
].etype
,
774 ttc_tunnel_rules
[tt
].proto
);
775 if (IS_ERR(rules
[tt
]))
782 err
= PTR_ERR(rules
[tt
]);
784 mlx5e_cleanup_ttc_rules(ttc
);
788 #define MLX5E_TTC_NUM_GROUPS 3
789 #define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
790 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
791 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
792 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
793 MLX5E_TTC_GROUP2_SIZE +\
794 MLX5E_TTC_GROUP3_SIZE)
796 #define MLX5E_INNER_TTC_NUM_GROUPS 3
797 #define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
798 #define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
799 #define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
800 #define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
801 MLX5E_INNER_TTC_GROUP2_SIZE +\
802 MLX5E_INNER_TTC_GROUP3_SIZE)
804 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table
*ttc
,
807 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
808 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
814 ft
->g
= kcalloc(MLX5E_TTC_NUM_GROUPS
,
815 sizeof(*ft
->g
), GFP_KERNEL
);
818 in
= kvzalloc(inlen
, GFP_KERNEL
);
825 mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
826 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.ip_protocol
);
828 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.ip_version
);
830 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.ethertype
);
831 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
832 MLX5_SET_CFG(in
, start_flow_index
, ix
);
833 ix
+= MLX5E_TTC_GROUP1_SIZE
;
834 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
835 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
836 if (IS_ERR(ft
->g
[ft
->num_groups
]))
841 MLX5_SET(fte_match_param
, mc
, outer_headers
.ip_protocol
, 0);
842 MLX5_SET_CFG(in
, start_flow_index
, ix
);
843 ix
+= MLX5E_TTC_GROUP2_SIZE
;
844 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
845 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
846 if (IS_ERR(ft
->g
[ft
->num_groups
]))
851 memset(in
, 0, inlen
);
852 MLX5_SET_CFG(in
, start_flow_index
, ix
);
853 ix
+= MLX5E_TTC_GROUP3_SIZE
;
854 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
855 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
856 if (IS_ERR(ft
->g
[ft
->num_groups
]))
864 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
865 ft
->g
[ft
->num_groups
] = NULL
;
871 static struct mlx5_flow_handle
*
872 mlx5e_generate_inner_ttc_rule(struct mlx5e_priv
*priv
,
873 struct mlx5_flow_table
*ft
,
874 struct mlx5_flow_destination
*dest
,
877 MLX5_DECLARE_FLOW_ACT(flow_act
);
878 struct mlx5_flow_handle
*rule
;
879 struct mlx5_flow_spec
*spec
;
883 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
885 return ERR_PTR(-ENOMEM
);
887 ipv
= mlx5e_etype_to_ipv(etype
);
889 spec
->match_criteria_enable
= MLX5_MATCH_INNER_HEADERS
;
890 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, inner_headers
.ip_version
);
891 MLX5_SET(fte_match_param
, spec
->match_value
, inner_headers
.ip_version
, ipv
);
895 spec
->match_criteria_enable
= MLX5_MATCH_INNER_HEADERS
;
896 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, inner_headers
.ip_protocol
);
897 MLX5_SET(fte_match_param
, spec
->match_value
, inner_headers
.ip_protocol
, proto
);
900 rule
= mlx5_add_flow_rules(ft
, spec
, &flow_act
, dest
, 1);
903 netdev_err(priv
->netdev
, "%s: add rule failed\n", __func__
);
907 return err
? ERR_PTR(err
) : rule
;
910 static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv
*priv
)
912 struct mlx5_flow_destination dest
;
913 struct mlx5_flow_handle
**rules
;
914 struct mlx5e_ttc_table
*ttc
;
915 struct mlx5_flow_table
*ft
;
919 ttc
= &priv
->fs
.inner_ttc
;
923 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
924 for (tt
= 0; tt
< MLX5E_NUM_TT
; tt
++) {
925 if (tt
== MLX5E_TT_ANY
)
926 dest
.tir_num
= priv
->direct_tir
[0].tirn
;
928 dest
.tir_num
= priv
->inner_indir_tir
[tt
].tirn
;
930 rules
[tt
] = mlx5e_generate_inner_ttc_rule(priv
, ft
, &dest
,
932 ttc_rules
[tt
].proto
);
933 if (IS_ERR(rules
[tt
]))
940 err
= PTR_ERR(rules
[tt
]);
942 mlx5e_cleanup_ttc_rules(ttc
);
946 static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table
*ttc
)
948 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
949 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
955 ft
->g
= kcalloc(MLX5E_INNER_TTC_NUM_GROUPS
, sizeof(*ft
->g
), GFP_KERNEL
);
958 in
= kvzalloc(inlen
, GFP_KERNEL
);
965 mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
966 MLX5_SET_TO_ONES(fte_match_param
, mc
, inner_headers
.ip_protocol
);
967 MLX5_SET_TO_ONES(fte_match_param
, mc
, inner_headers
.ip_version
);
968 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_INNER_HEADERS
);
969 MLX5_SET_CFG(in
, start_flow_index
, ix
);
970 ix
+= MLX5E_INNER_TTC_GROUP1_SIZE
;
971 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
972 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
973 if (IS_ERR(ft
->g
[ft
->num_groups
]))
978 MLX5_SET(fte_match_param
, mc
, inner_headers
.ip_protocol
, 0);
979 MLX5_SET_CFG(in
, start_flow_index
, ix
);
980 ix
+= MLX5E_INNER_TTC_GROUP2_SIZE
;
981 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
982 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
983 if (IS_ERR(ft
->g
[ft
->num_groups
]))
988 memset(in
, 0, inlen
);
989 MLX5_SET_CFG(in
, start_flow_index
, ix
);
990 ix
+= MLX5E_INNER_TTC_GROUP3_SIZE
;
991 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
992 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
993 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1001 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
1002 ft
->g
[ft
->num_groups
] = NULL
;
1008 static int mlx5e_create_inner_ttc_table(struct mlx5e_priv
*priv
)
1010 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.inner_ttc
;
1011 struct mlx5_flow_table_attr ft_attr
= {};
1012 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
1015 if (!mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
1018 ft_attr
.max_fte
= MLX5E_INNER_TTC_TABLE_SIZE
;
1019 ft_attr
.level
= MLX5E_INNER_TTC_FT_LEVEL
;
1020 ft_attr
.prio
= MLX5E_NIC_PRIO
;
1022 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, &ft_attr
);
1023 if (IS_ERR(ft
->t
)) {
1024 err
= PTR_ERR(ft
->t
);
1029 err
= mlx5e_create_inner_ttc_table_groups(ttc
);
1033 err
= mlx5e_generate_inner_ttc_table_rules(priv
);
1040 mlx5e_destroy_flow_table(ft
);
1044 static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv
*priv
)
1046 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.inner_ttc
;
1048 if (!mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
1051 mlx5e_cleanup_ttc_rules(ttc
);
1052 mlx5e_destroy_flow_table(&ttc
->ft
);
1055 void mlx5e_destroy_ttc_table(struct mlx5e_priv
*priv
)
1057 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.ttc
;
1059 mlx5e_cleanup_ttc_rules(ttc
);
1060 mlx5e_destroy_flow_table(&ttc
->ft
);
1063 int mlx5e_create_ttc_table(struct mlx5e_priv
*priv
)
1065 bool match_ipv_outer
= MLX5_CAP_FLOWTABLE_NIC_RX(priv
->mdev
, ft_field_support
.outer_ip_version
);
1066 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.ttc
;
1067 struct mlx5_flow_table_attr ft_attr
= {};
1068 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
1071 ft_attr
.max_fte
= MLX5E_TTC_TABLE_SIZE
;
1072 ft_attr
.level
= MLX5E_TTC_FT_LEVEL
;
1073 ft_attr
.prio
= MLX5E_NIC_PRIO
;
1075 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, &ft_attr
);
1076 if (IS_ERR(ft
->t
)) {
1077 err
= PTR_ERR(ft
->t
);
1082 err
= mlx5e_create_ttc_table_groups(ttc
, match_ipv_outer
);
1086 err
= mlx5e_generate_ttc_table_rules(priv
);
1092 mlx5e_destroy_flow_table(ft
);
1096 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv
*priv
,
1097 struct mlx5e_l2_rule
*ai
)
1099 if (!IS_ERR_OR_NULL(ai
->rule
)) {
1100 mlx5_del_flow_rules(ai
->rule
);
1105 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv
*priv
,
1106 struct mlx5e_l2_rule
*ai
, int type
)
1108 struct mlx5_flow_table
*ft
= priv
->fs
.l2
.ft
.t
;
1109 struct mlx5_flow_destination dest
;
1110 MLX5_DECLARE_FLOW_ACT(flow_act
);
1111 struct mlx5_flow_spec
*spec
;
1116 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1120 mc_dmac
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
1121 outer_headers
.dmac_47_16
);
1122 mv_dmac
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
1123 outer_headers
.dmac_47_16
);
1125 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
1126 dest
.ft
= priv
->fs
.ttc
.ft
.t
;
1129 case MLX5E_FULLMATCH
:
1130 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
1131 eth_broadcast_addr(mc_dmac
);
1132 ether_addr_copy(mv_dmac
, ai
->addr
);
1135 case MLX5E_ALLMULTI
:
1136 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
1145 ai
->rule
= mlx5_add_flow_rules(ft
, spec
, &flow_act
, &dest
, 1);
1146 if (IS_ERR(ai
->rule
)) {
1147 netdev_err(priv
->netdev
, "%s: add l2 rule(mac:%pM) failed\n",
1149 err
= PTR_ERR(ai
->rule
);
1158 #define MLX5E_NUM_L2_GROUPS 3
1159 #define MLX5E_L2_GROUP1_SIZE BIT(0)
1160 #define MLX5E_L2_GROUP2_SIZE BIT(15)
1161 #define MLX5E_L2_GROUP3_SIZE BIT(0)
1162 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1163 MLX5E_L2_GROUP2_SIZE +\
1164 MLX5E_L2_GROUP3_SIZE)
1165 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table
*l2_table
)
1167 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1168 struct mlx5e_flow_table
*ft
= &l2_table
->ft
;
1175 ft
->g
= kcalloc(MLX5E_NUM_L2_GROUPS
, sizeof(*ft
->g
), GFP_KERNEL
);
1178 in
= kvzalloc(inlen
, GFP_KERNEL
);
1184 mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
1185 mc_dmac
= MLX5_ADDR_OF(fte_match_param
, mc
,
1186 outer_headers
.dmac_47_16
);
1187 /* Flow Group for promiscuous */
1188 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1189 ix
+= MLX5E_L2_GROUP1_SIZE
;
1190 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1191 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1192 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1193 goto err_destroy_groups
;
1196 /* Flow Group for full match */
1197 eth_broadcast_addr(mc_dmac
);
1198 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1199 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1200 ix
+= MLX5E_L2_GROUP2_SIZE
;
1201 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1202 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1203 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1204 goto err_destroy_groups
;
1207 /* Flow Group for allmulti */
1208 eth_zero_addr(mc_dmac
);
1210 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1211 ix
+= MLX5E_L2_GROUP3_SIZE
;
1212 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1213 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1214 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1215 goto err_destroy_groups
;
1222 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
1223 ft
->g
[ft
->num_groups
] = NULL
;
1224 mlx5e_destroy_groups(ft
);
1230 static void mlx5e_destroy_l2_table(struct mlx5e_priv
*priv
)
1232 mlx5e_destroy_flow_table(&priv
->fs
.l2
.ft
);
1235 static int mlx5e_create_l2_table(struct mlx5e_priv
*priv
)
1237 struct mlx5e_l2_table
*l2_table
= &priv
->fs
.l2
;
1238 struct mlx5e_flow_table
*ft
= &l2_table
->ft
;
1239 struct mlx5_flow_table_attr ft_attr
= {};
1244 ft_attr
.max_fte
= MLX5E_L2_TABLE_SIZE
;
1245 ft_attr
.level
= MLX5E_L2_FT_LEVEL
;
1246 ft_attr
.prio
= MLX5E_NIC_PRIO
;
1248 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, &ft_attr
);
1249 if (IS_ERR(ft
->t
)) {
1250 err
= PTR_ERR(ft
->t
);
1255 err
= mlx5e_create_l2_table_groups(l2_table
);
1257 goto err_destroy_flow_table
;
1261 err_destroy_flow_table
:
1262 mlx5_destroy_flow_table(ft
->t
);
1268 #define MLX5E_NUM_VLAN_GROUPS 3
1269 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1270 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1271 #define MLX5E_VLAN_GROUP2_SIZE BIT(0)
1272 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1273 MLX5E_VLAN_GROUP1_SIZE +\
1274 MLX5E_VLAN_GROUP2_SIZE)
1276 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table
*ft
, u32
*in
,
1281 u8
*mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
1283 memset(in
, 0, inlen
);
1284 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1285 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.cvlan_tag
);
1286 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.first_vid
);
1287 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1288 ix
+= MLX5E_VLAN_GROUP0_SIZE
;
1289 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1290 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1291 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1292 goto err_destroy_groups
;
1295 memset(in
, 0, inlen
);
1296 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1297 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.cvlan_tag
);
1298 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1299 ix
+= MLX5E_VLAN_GROUP1_SIZE
;
1300 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1301 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1302 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1303 goto err_destroy_groups
;
1306 memset(in
, 0, inlen
);
1307 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1308 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.svlan_tag
);
1309 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1310 ix
+= MLX5E_VLAN_GROUP2_SIZE
;
1311 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1312 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1313 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1314 goto err_destroy_groups
;
1320 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
1321 ft
->g
[ft
->num_groups
] = NULL
;
1322 mlx5e_destroy_groups(ft
);
1327 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table
*ft
)
1330 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1333 in
= kvzalloc(inlen
, GFP_KERNEL
);
1337 err
= __mlx5e_create_vlan_table_groups(ft
, in
, inlen
);
1343 static int mlx5e_create_vlan_table(struct mlx5e_priv
*priv
)
1345 struct mlx5e_flow_table
*ft
= &priv
->fs
.vlan
.ft
;
1346 struct mlx5_flow_table_attr ft_attr
= {};
1351 ft_attr
.max_fte
= MLX5E_VLAN_TABLE_SIZE
;
1352 ft_attr
.level
= MLX5E_VLAN_FT_LEVEL
;
1353 ft_attr
.prio
= MLX5E_NIC_PRIO
;
1355 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, &ft_attr
);
1357 if (IS_ERR(ft
->t
)) {
1358 err
= PTR_ERR(ft
->t
);
1362 ft
->g
= kcalloc(MLX5E_NUM_VLAN_GROUPS
, sizeof(*ft
->g
), GFP_KERNEL
);
1365 goto err_destroy_vlan_table
;
1368 err
= mlx5e_create_vlan_table_groups(ft
);
1372 mlx5e_add_vlan_rules(priv
);
1378 err_destroy_vlan_table
:
1379 mlx5_destroy_flow_table(ft
->t
);
1385 static void mlx5e_destroy_vlan_table(struct mlx5e_priv
*priv
)
1387 mlx5e_del_vlan_rules(priv
);
1388 mlx5e_destroy_flow_table(&priv
->fs
.vlan
.ft
);
1391 int mlx5e_create_flow_steering(struct mlx5e_priv
*priv
)
1395 priv
->fs
.ns
= mlx5_get_flow_namespace(priv
->mdev
,
1396 MLX5_FLOW_NAMESPACE_KERNEL
);
1401 err
= mlx5e_arfs_create_tables(priv
);
1403 netdev_err(priv
->netdev
, "Failed to create arfs tables, err=%d\n",
1405 priv
->netdev
->hw_features
&= ~NETIF_F_NTUPLE
;
1408 err
= mlx5e_create_inner_ttc_table(priv
);
1410 netdev_err(priv
->netdev
, "Failed to create inner ttc table, err=%d\n",
1412 goto err_destroy_arfs_tables
;
1415 err
= mlx5e_create_ttc_table(priv
);
1417 netdev_err(priv
->netdev
, "Failed to create ttc table, err=%d\n",
1419 goto err_destroy_inner_ttc_table
;
1422 err
= mlx5e_create_l2_table(priv
);
1424 netdev_err(priv
->netdev
, "Failed to create l2 table, err=%d\n",
1426 goto err_destroy_ttc_table
;
1429 err
= mlx5e_create_vlan_table(priv
);
1431 netdev_err(priv
->netdev
, "Failed to create vlan table, err=%d\n",
1433 goto err_destroy_l2_table
;
1436 mlx5e_ethtool_init_steering(priv
);
1440 err_destroy_l2_table
:
1441 mlx5e_destroy_l2_table(priv
);
1442 err_destroy_ttc_table
:
1443 mlx5e_destroy_ttc_table(priv
);
1444 err_destroy_inner_ttc_table
:
1445 mlx5e_destroy_inner_ttc_table(priv
);
1446 err_destroy_arfs_tables
:
1447 mlx5e_arfs_destroy_tables(priv
);
1452 void mlx5e_destroy_flow_steering(struct mlx5e_priv
*priv
)
1454 mlx5e_destroy_vlan_table(priv
);
1455 mlx5e_destroy_l2_table(priv
);
1456 mlx5e_destroy_ttc_table(priv
);
1457 mlx5e_destroy_inner_ttc_table(priv
);
1458 mlx5e_arfs_destroy_tables(priv
);
1459 mlx5e_ethtool_cleanup_steering(priv
);