2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
41 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv
*priv
,
42 struct mlx5e_l2_rule
*ai
, int type
);
43 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv
*priv
,
44 struct mlx5e_l2_rule
*ai
);
60 MLX5E_ACTION_NONE
= 0,
65 struct mlx5e_l2_hash_node
{
66 struct hlist_node hlist
;
68 struct mlx5e_l2_rule ai
;
72 static inline int mlx5e_hash_l2(u8
*addr
)
77 static void mlx5e_add_l2_to_hash(struct hlist_head
*hash
, u8
*addr
)
79 struct mlx5e_l2_hash_node
*hn
;
80 int ix
= mlx5e_hash_l2(addr
);
83 hlist_for_each_entry(hn
, &hash
[ix
], hlist
)
84 if (ether_addr_equal_64bits(hn
->ai
.addr
, addr
)) {
90 hn
->action
= MLX5E_ACTION_NONE
;
94 hn
= kzalloc(sizeof(*hn
), GFP_ATOMIC
);
98 ether_addr_copy(hn
->ai
.addr
, addr
);
99 hn
->action
= MLX5E_ACTION_ADD
;
101 hlist_add_head(&hn
->hlist
, &hash
[ix
]);
104 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node
*hn
)
106 hlist_del(&hn
->hlist
);
110 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv
*priv
)
112 struct net_device
*ndev
= priv
->netdev
;
121 for_each_set_bit(vlan
, priv
->fs
.vlan
.active_cvlans
, VLAN_N_VID
)
124 max_list_size
= 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_vlan_list
);
126 if (list_size
> max_list_size
) {
128 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
129 list_size
, max_list_size
);
130 list_size
= max_list_size
;
133 vlans
= kcalloc(list_size
, sizeof(*vlans
), GFP_KERNEL
);
138 for_each_set_bit(vlan
, priv
->fs
.vlan
.active_cvlans
, VLAN_N_VID
) {
144 err
= mlx5_modify_nic_vport_vlans(priv
->mdev
, vlans
, list_size
);
146 netdev_err(ndev
, "Failed to modify vport vlans list err(%d)\n",
153 enum mlx5e_vlan_rule_type
{
154 MLX5E_VLAN_RULE_TYPE_UNTAGGED
,
155 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
,
156 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID
,
157 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID
,
158 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID
,
161 static int __mlx5e_add_vlan_rule(struct mlx5e_priv
*priv
,
162 enum mlx5e_vlan_rule_type rule_type
,
163 u16 vid
, struct mlx5_flow_spec
*spec
)
165 struct mlx5_flow_table
*ft
= priv
->fs
.vlan
.ft
.t
;
166 struct mlx5_flow_destination dest
= {};
167 struct mlx5_flow_handle
**rule_p
;
168 MLX5_DECLARE_FLOW_ACT(flow_act
);
171 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
172 dest
.ft
= priv
->fs
.l2
.ft
.t
;
174 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
177 case MLX5E_VLAN_RULE_TYPE_UNTAGGED
:
178 /* cvlan_tag enabled in match criteria and
179 * disabled in match value means both S & C tags
180 * don't exist (untagged of both)
182 rule_p
= &priv
->fs
.vlan
.untagged_rule
;
183 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
184 outer_headers
.cvlan_tag
);
186 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
:
187 rule_p
= &priv
->fs
.vlan
.any_cvlan_rule
;
188 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
189 outer_headers
.cvlan_tag
);
190 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.cvlan_tag
, 1);
192 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID
:
193 rule_p
= &priv
->fs
.vlan
.any_svlan_rule
;
194 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
195 outer_headers
.svlan_tag
);
196 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.svlan_tag
, 1);
198 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID
:
199 rule_p
= &priv
->fs
.vlan
.active_svlans_rule
[vid
];
200 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
201 outer_headers
.svlan_tag
);
202 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.svlan_tag
, 1);
203 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
204 outer_headers
.first_vid
);
205 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.first_vid
,
208 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
209 rule_p
= &priv
->fs
.vlan
.active_cvlans_rule
[vid
];
210 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
211 outer_headers
.cvlan_tag
);
212 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.cvlan_tag
, 1);
213 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
,
214 outer_headers
.first_vid
);
215 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.first_vid
,
220 *rule_p
= mlx5_add_flow_rules(ft
, spec
, &flow_act
, &dest
, 1);
222 if (IS_ERR(*rule_p
)) {
223 err
= PTR_ERR(*rule_p
);
225 netdev_err(priv
->netdev
, "%s: add rule failed\n", __func__
);
231 static int mlx5e_add_vlan_rule(struct mlx5e_priv
*priv
,
232 enum mlx5e_vlan_rule_type rule_type
, u16 vid
)
234 struct mlx5_flow_spec
*spec
;
237 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
241 if (rule_type
== MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID
)
242 mlx5e_vport_context_update_vlans(priv
);
244 err
= __mlx5e_add_vlan_rule(priv
, rule_type
, vid
, spec
);
251 static void mlx5e_del_vlan_rule(struct mlx5e_priv
*priv
,
252 enum mlx5e_vlan_rule_type rule_type
, u16 vid
)
255 case MLX5E_VLAN_RULE_TYPE_UNTAGGED
:
256 if (priv
->fs
.vlan
.untagged_rule
) {
257 mlx5_del_flow_rules(priv
->fs
.vlan
.untagged_rule
);
258 priv
->fs
.vlan
.untagged_rule
= NULL
;
261 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
:
262 if (priv
->fs
.vlan
.any_cvlan_rule
) {
263 mlx5_del_flow_rules(priv
->fs
.vlan
.any_cvlan_rule
);
264 priv
->fs
.vlan
.any_cvlan_rule
= NULL
;
267 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID
:
268 if (priv
->fs
.vlan
.any_svlan_rule
) {
269 mlx5_del_flow_rules(priv
->fs
.vlan
.any_svlan_rule
);
270 priv
->fs
.vlan
.any_svlan_rule
= NULL
;
273 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID
:
274 if (priv
->fs
.vlan
.active_svlans_rule
[vid
]) {
275 mlx5_del_flow_rules(priv
->fs
.vlan
.active_svlans_rule
[vid
]);
276 priv
->fs
.vlan
.active_svlans_rule
[vid
] = NULL
;
279 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID
:
280 if (priv
->fs
.vlan
.active_cvlans_rule
[vid
]) {
281 mlx5_del_flow_rules(priv
->fs
.vlan
.active_cvlans_rule
[vid
]);
282 priv
->fs
.vlan
.active_cvlans_rule
[vid
] = NULL
;
284 mlx5e_vport_context_update_vlans(priv
);
289 static void mlx5e_del_any_vid_rules(struct mlx5e_priv
*priv
)
291 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
, 0);
292 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID
, 0);
295 static int mlx5e_add_any_vid_rules(struct mlx5e_priv
*priv
)
299 err
= mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
, 0);
303 return mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID
, 0);
306 void mlx5e_enable_cvlan_filter(struct mlx5e_priv
*priv
)
308 if (!priv
->fs
.vlan
.cvlan_filter_disabled
)
311 priv
->fs
.vlan
.cvlan_filter_disabled
= false;
312 if (priv
->netdev
->flags
& IFF_PROMISC
)
314 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
, 0);
317 void mlx5e_disable_cvlan_filter(struct mlx5e_priv
*priv
)
319 if (priv
->fs
.vlan
.cvlan_filter_disabled
)
322 priv
->fs
.vlan
.cvlan_filter_disabled
= true;
323 if (priv
->netdev
->flags
& IFF_PROMISC
)
325 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID
, 0);
328 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv
*priv
, u16 vid
)
332 set_bit(vid
, priv
->fs
.vlan
.active_cvlans
);
334 err
= mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID
, vid
);
336 clear_bit(vid
, priv
->fs
.vlan
.active_cvlans
);
341 static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv
*priv
, u16 vid
)
343 struct net_device
*netdev
= priv
->netdev
;
346 set_bit(vid
, priv
->fs
.vlan
.active_svlans
);
348 err
= mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID
, vid
);
350 clear_bit(vid
, priv
->fs
.vlan
.active_svlans
);
354 /* Need to fix some features.. */
355 netdev_update_features(netdev
);
359 int mlx5e_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
361 struct mlx5e_priv
*priv
= netdev_priv(dev
);
363 if (be16_to_cpu(proto
) == ETH_P_8021Q
)
364 return mlx5e_vlan_rx_add_cvid(priv
, vid
);
365 else if (be16_to_cpu(proto
) == ETH_P_8021AD
)
366 return mlx5e_vlan_rx_add_svid(priv
, vid
);
371 int mlx5e_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
373 struct mlx5e_priv
*priv
= netdev_priv(dev
);
375 if (be16_to_cpu(proto
) == ETH_P_8021Q
) {
376 clear_bit(vid
, priv
->fs
.vlan
.active_cvlans
);
377 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID
, vid
);
378 } else if (be16_to_cpu(proto
) == ETH_P_8021AD
) {
379 clear_bit(vid
, priv
->fs
.vlan
.active_svlans
);
380 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID
, vid
);
381 netdev_update_features(dev
);
387 static void mlx5e_add_vlan_rules(struct mlx5e_priv
*priv
)
391 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_UNTAGGED
, 0);
393 for_each_set_bit(i
, priv
->fs
.vlan
.active_cvlans
, VLAN_N_VID
) {
394 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID
, i
);
397 for_each_set_bit(i
, priv
->fs
.vlan
.active_svlans
, VLAN_N_VID
)
398 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID
, i
);
400 if (priv
->fs
.vlan
.cvlan_filter_disabled
&&
401 !(priv
->netdev
->flags
& IFF_PROMISC
))
402 mlx5e_add_any_vid_rules(priv
);
405 static void mlx5e_del_vlan_rules(struct mlx5e_priv
*priv
)
409 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_UNTAGGED
, 0);
411 for_each_set_bit(i
, priv
->fs
.vlan
.active_cvlans
, VLAN_N_VID
) {
412 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID
, i
);
415 for_each_set_bit(i
, priv
->fs
.vlan
.active_svlans
, VLAN_N_VID
)
416 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID
, i
);
418 if (priv
->fs
.vlan
.cvlan_filter_disabled
&&
419 !(priv
->netdev
->flags
& IFF_PROMISC
))
420 mlx5e_del_any_vid_rules(priv
);
423 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
424 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
425 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
427 static void mlx5e_execute_l2_action(struct mlx5e_priv
*priv
,
428 struct mlx5e_l2_hash_node
*hn
)
430 u8 action
= hn
->action
;
431 u8 mac_addr
[ETH_ALEN
];
434 ether_addr_copy(mac_addr
, hn
->ai
.addr
);
437 case MLX5E_ACTION_ADD
:
438 mlx5e_add_l2_flow_rule(priv
, &hn
->ai
, MLX5E_FULLMATCH
);
439 if (!is_multicast_ether_addr(mac_addr
)) {
440 l2_err
= mlx5_mpfs_add_mac(priv
->mdev
, mac_addr
);
443 hn
->action
= MLX5E_ACTION_NONE
;
446 case MLX5E_ACTION_DEL
:
447 if (!is_multicast_ether_addr(mac_addr
) && hn
->mpfs
)
448 l2_err
= mlx5_mpfs_del_mac(priv
->mdev
, mac_addr
);
449 mlx5e_del_l2_flow_rule(priv
, &hn
->ai
);
450 mlx5e_del_l2_from_hash(hn
);
455 netdev_warn(priv
->netdev
, "MPFS, failed to %s mac %pM, err(%d)\n",
456 action
== MLX5E_ACTION_ADD
? "add" : "del", mac_addr
, l2_err
);
459 static void mlx5e_sync_netdev_addr(struct mlx5e_priv
*priv
)
461 struct net_device
*netdev
= priv
->netdev
;
462 struct netdev_hw_addr
*ha
;
464 netif_addr_lock_bh(netdev
);
466 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_uc
,
467 priv
->netdev
->dev_addr
);
469 netdev_for_each_uc_addr(ha
, netdev
)
470 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_uc
, ha
->addr
);
472 netdev_for_each_mc_addr(ha
, netdev
)
473 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_mc
, ha
->addr
);
475 netif_addr_unlock_bh(netdev
);
478 static void mlx5e_fill_addr_array(struct mlx5e_priv
*priv
, int list_type
,
479 u8 addr_array
[][ETH_ALEN
], int size
)
481 bool is_uc
= (list_type
== MLX5_NVPRT_LIST_TYPE_UC
);
482 struct net_device
*ndev
= priv
->netdev
;
483 struct mlx5e_l2_hash_node
*hn
;
484 struct hlist_head
*addr_list
;
485 struct hlist_node
*tmp
;
489 addr_list
= is_uc
? priv
->fs
.l2
.netdev_uc
: priv
->fs
.l2
.netdev_mc
;
491 if (is_uc
) /* Make sure our own address is pushed first */
492 ether_addr_copy(addr_array
[i
++], ndev
->dev_addr
);
493 else if (priv
->fs
.l2
.broadcast_enabled
)
494 ether_addr_copy(addr_array
[i
++], ndev
->broadcast
);
496 mlx5e_for_each_hash_node(hn
, tmp
, addr_list
, hi
) {
497 if (ether_addr_equal(ndev
->dev_addr
, hn
->ai
.addr
))
501 ether_addr_copy(addr_array
[i
++], hn
->ai
.addr
);
505 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv
*priv
,
508 bool is_uc
= (list_type
== MLX5_NVPRT_LIST_TYPE_UC
);
509 struct mlx5e_l2_hash_node
*hn
;
510 u8 (*addr_array
)[ETH_ALEN
] = NULL
;
511 struct hlist_head
*addr_list
;
512 struct hlist_node
*tmp
;
518 size
= is_uc
? 0 : (priv
->fs
.l2
.broadcast_enabled
? 1 : 0);
520 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_current_uc_list
) :
521 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_current_mc_list
);
523 addr_list
= is_uc
? priv
->fs
.l2
.netdev_uc
: priv
->fs
.l2
.netdev_mc
;
524 mlx5e_for_each_hash_node(hn
, tmp
, addr_list
, hi
)
527 if (size
> max_size
) {
528 netdev_warn(priv
->netdev
,
529 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
530 is_uc
? "UC" : "MC", size
, max_size
);
535 addr_array
= kcalloc(size
, ETH_ALEN
, GFP_KERNEL
);
540 mlx5e_fill_addr_array(priv
, list_type
, addr_array
, size
);
543 err
= mlx5_modify_nic_vport_mac_list(priv
->mdev
, list_type
, addr_array
, size
);
546 netdev_err(priv
->netdev
,
547 "Failed to modify vport %s list err(%d)\n",
548 is_uc
? "UC" : "MC", err
);
552 static void mlx5e_vport_context_update(struct mlx5e_priv
*priv
)
554 struct mlx5e_l2_table
*ea
= &priv
->fs
.l2
;
556 mlx5e_vport_context_update_addr_list(priv
, MLX5_NVPRT_LIST_TYPE_UC
);
557 mlx5e_vport_context_update_addr_list(priv
, MLX5_NVPRT_LIST_TYPE_MC
);
558 mlx5_modify_nic_vport_promisc(priv
->mdev
, 0,
559 ea
->allmulti_enabled
,
560 ea
->promisc_enabled
);
563 static void mlx5e_apply_netdev_addr(struct mlx5e_priv
*priv
)
565 struct mlx5e_l2_hash_node
*hn
;
566 struct hlist_node
*tmp
;
569 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_uc
, i
)
570 mlx5e_execute_l2_action(priv
, hn
);
572 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_mc
, i
)
573 mlx5e_execute_l2_action(priv
, hn
);
576 static void mlx5e_handle_netdev_addr(struct mlx5e_priv
*priv
)
578 struct mlx5e_l2_hash_node
*hn
;
579 struct hlist_node
*tmp
;
582 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_uc
, i
)
583 hn
->action
= MLX5E_ACTION_DEL
;
584 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_mc
, i
)
585 hn
->action
= MLX5E_ACTION_DEL
;
587 if (!test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
))
588 mlx5e_sync_netdev_addr(priv
);
590 mlx5e_apply_netdev_addr(priv
);
593 void mlx5e_set_rx_mode_work(struct work_struct
*work
)
595 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
598 struct mlx5e_l2_table
*ea
= &priv
->fs
.l2
;
599 struct net_device
*ndev
= priv
->netdev
;
601 bool rx_mode_enable
= !test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
602 bool promisc_enabled
= rx_mode_enable
&& (ndev
->flags
& IFF_PROMISC
);
603 bool allmulti_enabled
= rx_mode_enable
&& (ndev
->flags
& IFF_ALLMULTI
);
604 bool broadcast_enabled
= rx_mode_enable
;
606 bool enable_promisc
= !ea
->promisc_enabled
&& promisc_enabled
;
607 bool disable_promisc
= ea
->promisc_enabled
&& !promisc_enabled
;
608 bool enable_allmulti
= !ea
->allmulti_enabled
&& allmulti_enabled
;
609 bool disable_allmulti
= ea
->allmulti_enabled
&& !allmulti_enabled
;
610 bool enable_broadcast
= !ea
->broadcast_enabled
&& broadcast_enabled
;
611 bool disable_broadcast
= ea
->broadcast_enabled
&& !broadcast_enabled
;
613 if (enable_promisc
) {
614 if (!priv
->channels
.params
.vlan_strip_disable
)
615 netdev_warn_once(ndev
,
616 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
617 mlx5e_add_l2_flow_rule(priv
, &ea
->promisc
, MLX5E_PROMISC
);
618 if (!priv
->fs
.vlan
.cvlan_filter_disabled
)
619 mlx5e_add_any_vid_rules(priv
);
622 mlx5e_add_l2_flow_rule(priv
, &ea
->allmulti
, MLX5E_ALLMULTI
);
623 if (enable_broadcast
)
624 mlx5e_add_l2_flow_rule(priv
, &ea
->broadcast
, MLX5E_FULLMATCH
);
626 mlx5e_handle_netdev_addr(priv
);
628 if (disable_broadcast
)
629 mlx5e_del_l2_flow_rule(priv
, &ea
->broadcast
);
630 if (disable_allmulti
)
631 mlx5e_del_l2_flow_rule(priv
, &ea
->allmulti
);
632 if (disable_promisc
) {
633 if (!priv
->fs
.vlan
.cvlan_filter_disabled
)
634 mlx5e_del_any_vid_rules(priv
);
635 mlx5e_del_l2_flow_rule(priv
, &ea
->promisc
);
638 ea
->promisc_enabled
= promisc_enabled
;
639 ea
->allmulti_enabled
= allmulti_enabled
;
640 ea
->broadcast_enabled
= broadcast_enabled
;
642 mlx5e_vport_context_update(priv
);
645 static void mlx5e_destroy_groups(struct mlx5e_flow_table
*ft
)
649 for (i
= ft
->num_groups
- 1; i
>= 0; i
--) {
650 if (!IS_ERR_OR_NULL(ft
->g
[i
]))
651 mlx5_destroy_flow_group(ft
->g
[i
]);
657 void mlx5e_init_l2_addr(struct mlx5e_priv
*priv
)
659 ether_addr_copy(priv
->fs
.l2
.broadcast
.addr
, priv
->netdev
->broadcast
);
662 void mlx5e_destroy_flow_table(struct mlx5e_flow_table
*ft
)
664 mlx5e_destroy_groups(ft
);
666 mlx5_destroy_flow_table(ft
->t
);
670 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table
*ttc
)
674 for (i
= 0; i
< MLX5E_NUM_TT
; i
++) {
675 if (!IS_ERR_OR_NULL(ttc
->rules
[i
])) {
676 mlx5_del_flow_rules(ttc
->rules
[i
]);
677 ttc
->rules
[i
] = NULL
;
681 for (i
= 0; i
< MLX5E_NUM_TUNNEL_TT
; i
++) {
682 if (!IS_ERR_OR_NULL(ttc
->tunnel_rules
[i
])) {
683 mlx5_del_flow_rules(ttc
->tunnel_rules
[i
]);
684 ttc
->tunnel_rules
[i
] = NULL
;
689 struct mlx5e_etype_proto
{
694 static struct mlx5e_etype_proto ttc_rules
[] = {
695 [MLX5E_TT_IPV4_TCP
] = {
697 .proto
= IPPROTO_TCP
,
699 [MLX5E_TT_IPV6_TCP
] = {
701 .proto
= IPPROTO_TCP
,
703 [MLX5E_TT_IPV4_UDP
] = {
705 .proto
= IPPROTO_UDP
,
707 [MLX5E_TT_IPV6_UDP
] = {
709 .proto
= IPPROTO_UDP
,
711 [MLX5E_TT_IPV4_IPSEC_AH
] = {
715 [MLX5E_TT_IPV6_IPSEC_AH
] = {
719 [MLX5E_TT_IPV4_IPSEC_ESP
] = {
721 .proto
= IPPROTO_ESP
,
723 [MLX5E_TT_IPV6_IPSEC_ESP
] = {
725 .proto
= IPPROTO_ESP
,
741 static struct mlx5e_etype_proto ttc_tunnel_rules
[] = {
742 [MLX5E_TT_IPV4_GRE
] = {
744 .proto
= IPPROTO_GRE
,
746 [MLX5E_TT_IPV6_GRE
] = {
748 .proto
= IPPROTO_GRE
,
752 static u8
mlx5e_etype_to_ipv(u16 ethertype
)
754 if (ethertype
== ETH_P_IP
)
757 if (ethertype
== ETH_P_IPV6
)
763 static struct mlx5_flow_handle
*
764 mlx5e_generate_ttc_rule(struct mlx5e_priv
*priv
,
765 struct mlx5_flow_table
*ft
,
766 struct mlx5_flow_destination
*dest
,
770 int match_ipv_outer
= MLX5_CAP_FLOWTABLE_NIC_RX(priv
->mdev
, ft_field_support
.outer_ip_version
);
771 MLX5_DECLARE_FLOW_ACT(flow_act
);
772 struct mlx5_flow_handle
*rule
;
773 struct mlx5_flow_spec
*spec
;
777 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
779 return ERR_PTR(-ENOMEM
);
782 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
783 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.ip_protocol
);
784 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.ip_protocol
, proto
);
787 ipv
= mlx5e_etype_to_ipv(etype
);
788 if (match_ipv_outer
&& ipv
) {
789 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
790 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.ip_version
);
791 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.ip_version
, ipv
);
793 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
794 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.ethertype
);
795 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.ethertype
, etype
);
798 rule
= mlx5_add_flow_rules(ft
, spec
, &flow_act
, dest
, 1);
801 netdev_err(priv
->netdev
, "%s: add rule failed\n", __func__
);
805 return err
? ERR_PTR(err
) : rule
;
808 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv
*priv
)
810 struct mlx5_flow_destination dest
= {};
811 struct mlx5e_ttc_table
*ttc
;
812 struct mlx5_flow_handle
**rules
;
813 struct mlx5_flow_table
*ft
;
821 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
822 for (tt
= 0; tt
< MLX5E_NUM_TT
; tt
++) {
823 if (tt
== MLX5E_TT_ANY
)
824 dest
.tir_num
= priv
->direct_tir
[0].tirn
;
826 dest
.tir_num
= priv
->indir_tir
[tt
].tirn
;
827 rules
[tt
] = mlx5e_generate_ttc_rule(priv
, ft
, &dest
,
829 ttc_rules
[tt
].proto
);
830 if (IS_ERR(rules
[tt
]))
834 if (!mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
837 rules
= ttc
->tunnel_rules
;
838 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
839 dest
.ft
= priv
->fs
.inner_ttc
.ft
.t
;
840 for (tt
= 0; tt
< MLX5E_NUM_TUNNEL_TT
; tt
++) {
841 rules
[tt
] = mlx5e_generate_ttc_rule(priv
, ft
, &dest
,
842 ttc_tunnel_rules
[tt
].etype
,
843 ttc_tunnel_rules
[tt
].proto
);
844 if (IS_ERR(rules
[tt
]))
851 err
= PTR_ERR(rules
[tt
]);
853 mlx5e_cleanup_ttc_rules(ttc
);
857 #define MLX5E_TTC_NUM_GROUPS 3
858 #define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
859 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
860 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
861 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
862 MLX5E_TTC_GROUP2_SIZE +\
863 MLX5E_TTC_GROUP3_SIZE)
865 #define MLX5E_INNER_TTC_NUM_GROUPS 3
866 #define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
867 #define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
868 #define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
869 #define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
870 MLX5E_INNER_TTC_GROUP2_SIZE +\
871 MLX5E_INNER_TTC_GROUP3_SIZE)
873 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table
*ttc
,
876 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
877 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
883 ft
->g
= kcalloc(MLX5E_TTC_NUM_GROUPS
,
884 sizeof(*ft
->g
), GFP_KERNEL
);
887 in
= kvzalloc(inlen
, GFP_KERNEL
);
894 mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
895 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.ip_protocol
);
897 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.ip_version
);
899 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.ethertype
);
900 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
901 MLX5_SET_CFG(in
, start_flow_index
, ix
);
902 ix
+= MLX5E_TTC_GROUP1_SIZE
;
903 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
904 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
905 if (IS_ERR(ft
->g
[ft
->num_groups
]))
910 MLX5_SET(fte_match_param
, mc
, outer_headers
.ip_protocol
, 0);
911 MLX5_SET_CFG(in
, start_flow_index
, ix
);
912 ix
+= MLX5E_TTC_GROUP2_SIZE
;
913 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
914 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
915 if (IS_ERR(ft
->g
[ft
->num_groups
]))
920 memset(in
, 0, inlen
);
921 MLX5_SET_CFG(in
, start_flow_index
, ix
);
922 ix
+= MLX5E_TTC_GROUP3_SIZE
;
923 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
924 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
925 if (IS_ERR(ft
->g
[ft
->num_groups
]))
933 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
934 ft
->g
[ft
->num_groups
] = NULL
;
940 static struct mlx5_flow_handle
*
941 mlx5e_generate_inner_ttc_rule(struct mlx5e_priv
*priv
,
942 struct mlx5_flow_table
*ft
,
943 struct mlx5_flow_destination
*dest
,
946 MLX5_DECLARE_FLOW_ACT(flow_act
);
947 struct mlx5_flow_handle
*rule
;
948 struct mlx5_flow_spec
*spec
;
952 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
954 return ERR_PTR(-ENOMEM
);
956 ipv
= mlx5e_etype_to_ipv(etype
);
958 spec
->match_criteria_enable
= MLX5_MATCH_INNER_HEADERS
;
959 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, inner_headers
.ip_version
);
960 MLX5_SET(fte_match_param
, spec
->match_value
, inner_headers
.ip_version
, ipv
);
964 spec
->match_criteria_enable
= MLX5_MATCH_INNER_HEADERS
;
965 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, inner_headers
.ip_protocol
);
966 MLX5_SET(fte_match_param
, spec
->match_value
, inner_headers
.ip_protocol
, proto
);
969 rule
= mlx5_add_flow_rules(ft
, spec
, &flow_act
, dest
, 1);
972 netdev_err(priv
->netdev
, "%s: add rule failed\n", __func__
);
976 return err
? ERR_PTR(err
) : rule
;
979 static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv
*priv
)
981 struct mlx5_flow_destination dest
= {};
982 struct mlx5_flow_handle
**rules
;
983 struct mlx5e_ttc_table
*ttc
;
984 struct mlx5_flow_table
*ft
;
988 ttc
= &priv
->fs
.inner_ttc
;
992 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
993 for (tt
= 0; tt
< MLX5E_NUM_TT
; tt
++) {
994 if (tt
== MLX5E_TT_ANY
)
995 dest
.tir_num
= priv
->direct_tir
[0].tirn
;
997 dest
.tir_num
= priv
->inner_indir_tir
[tt
].tirn
;
999 rules
[tt
] = mlx5e_generate_inner_ttc_rule(priv
, ft
, &dest
,
1000 ttc_rules
[tt
].etype
,
1001 ttc_rules
[tt
].proto
);
1002 if (IS_ERR(rules
[tt
]))
1009 err
= PTR_ERR(rules
[tt
]);
1011 mlx5e_cleanup_ttc_rules(ttc
);
1015 static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table
*ttc
)
1017 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1018 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
1024 ft
->g
= kcalloc(MLX5E_INNER_TTC_NUM_GROUPS
, sizeof(*ft
->g
), GFP_KERNEL
);
1027 in
= kvzalloc(inlen
, GFP_KERNEL
);
1034 mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
1035 MLX5_SET_TO_ONES(fte_match_param
, mc
, inner_headers
.ip_protocol
);
1036 MLX5_SET_TO_ONES(fte_match_param
, mc
, inner_headers
.ip_version
);
1037 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_INNER_HEADERS
);
1038 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1039 ix
+= MLX5E_INNER_TTC_GROUP1_SIZE
;
1040 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1041 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1042 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1047 MLX5_SET(fte_match_param
, mc
, inner_headers
.ip_protocol
, 0);
1048 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1049 ix
+= MLX5E_INNER_TTC_GROUP2_SIZE
;
1050 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1051 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1052 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1057 memset(in
, 0, inlen
);
1058 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1059 ix
+= MLX5E_INNER_TTC_GROUP3_SIZE
;
1060 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1061 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1062 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1070 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
1071 ft
->g
[ft
->num_groups
] = NULL
;
1077 int mlx5e_create_inner_ttc_table(struct mlx5e_priv
*priv
)
1079 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.inner_ttc
;
1080 struct mlx5_flow_table_attr ft_attr
= {};
1081 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
1084 if (!mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
1087 ft_attr
.max_fte
= MLX5E_INNER_TTC_TABLE_SIZE
;
1088 ft_attr
.level
= MLX5E_INNER_TTC_FT_LEVEL
;
1089 ft_attr
.prio
= MLX5E_NIC_PRIO
;
1091 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, &ft_attr
);
1092 if (IS_ERR(ft
->t
)) {
1093 err
= PTR_ERR(ft
->t
);
1098 err
= mlx5e_create_inner_ttc_table_groups(ttc
);
1102 err
= mlx5e_generate_inner_ttc_table_rules(priv
);
1109 mlx5e_destroy_flow_table(ft
);
1113 void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv
*priv
)
1115 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.inner_ttc
;
1117 if (!mlx5e_tunnel_inner_ft_supported(priv
->mdev
))
1120 mlx5e_cleanup_ttc_rules(ttc
);
1121 mlx5e_destroy_flow_table(&ttc
->ft
);
1124 void mlx5e_destroy_ttc_table(struct mlx5e_priv
*priv
)
1126 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.ttc
;
1128 mlx5e_cleanup_ttc_rules(ttc
);
1129 mlx5e_destroy_flow_table(&ttc
->ft
);
1132 int mlx5e_create_ttc_table(struct mlx5e_priv
*priv
)
1134 bool match_ipv_outer
= MLX5_CAP_FLOWTABLE_NIC_RX(priv
->mdev
, ft_field_support
.outer_ip_version
);
1135 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.ttc
;
1136 struct mlx5_flow_table_attr ft_attr
= {};
1137 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
1140 ft_attr
.max_fte
= MLX5E_TTC_TABLE_SIZE
;
1141 ft_attr
.level
= MLX5E_TTC_FT_LEVEL
;
1142 ft_attr
.prio
= MLX5E_NIC_PRIO
;
1144 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, &ft_attr
);
1145 if (IS_ERR(ft
->t
)) {
1146 err
= PTR_ERR(ft
->t
);
1151 err
= mlx5e_create_ttc_table_groups(ttc
, match_ipv_outer
);
1155 err
= mlx5e_generate_ttc_table_rules(priv
);
1161 mlx5e_destroy_flow_table(ft
);
1165 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv
*priv
,
1166 struct mlx5e_l2_rule
*ai
)
1168 if (!IS_ERR_OR_NULL(ai
->rule
)) {
1169 mlx5_del_flow_rules(ai
->rule
);
1174 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv
*priv
,
1175 struct mlx5e_l2_rule
*ai
, int type
)
1177 struct mlx5_flow_table
*ft
= priv
->fs
.l2
.ft
.t
;
1178 struct mlx5_flow_destination dest
= {};
1179 MLX5_DECLARE_FLOW_ACT(flow_act
);
1180 struct mlx5_flow_spec
*spec
;
1185 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1189 mc_dmac
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
1190 outer_headers
.dmac_47_16
);
1191 mv_dmac
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
1192 outer_headers
.dmac_47_16
);
1194 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
1195 dest
.ft
= priv
->fs
.ttc
.ft
.t
;
1198 case MLX5E_FULLMATCH
:
1199 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
1200 eth_broadcast_addr(mc_dmac
);
1201 ether_addr_copy(mv_dmac
, ai
->addr
);
1204 case MLX5E_ALLMULTI
:
1205 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
1214 ai
->rule
= mlx5_add_flow_rules(ft
, spec
, &flow_act
, &dest
, 1);
1215 if (IS_ERR(ai
->rule
)) {
1216 netdev_err(priv
->netdev
, "%s: add l2 rule(mac:%pM) failed\n",
1218 err
= PTR_ERR(ai
->rule
);
1227 #define MLX5E_NUM_L2_GROUPS 3
1228 #define MLX5E_L2_GROUP1_SIZE BIT(0)
1229 #define MLX5E_L2_GROUP2_SIZE BIT(15)
1230 #define MLX5E_L2_GROUP3_SIZE BIT(0)
1231 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1232 MLX5E_L2_GROUP2_SIZE +\
1233 MLX5E_L2_GROUP3_SIZE)
1234 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table
*l2_table
)
1236 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1237 struct mlx5e_flow_table
*ft
= &l2_table
->ft
;
1244 ft
->g
= kcalloc(MLX5E_NUM_L2_GROUPS
, sizeof(*ft
->g
), GFP_KERNEL
);
1247 in
= kvzalloc(inlen
, GFP_KERNEL
);
1253 mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
1254 mc_dmac
= MLX5_ADDR_OF(fte_match_param
, mc
,
1255 outer_headers
.dmac_47_16
);
1256 /* Flow Group for promiscuous */
1257 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1258 ix
+= MLX5E_L2_GROUP1_SIZE
;
1259 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1260 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1261 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1262 goto err_destroy_groups
;
1265 /* Flow Group for full match */
1266 eth_broadcast_addr(mc_dmac
);
1267 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1268 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1269 ix
+= MLX5E_L2_GROUP2_SIZE
;
1270 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1271 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1272 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1273 goto err_destroy_groups
;
1276 /* Flow Group for allmulti */
1277 eth_zero_addr(mc_dmac
);
1279 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1280 ix
+= MLX5E_L2_GROUP3_SIZE
;
1281 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1282 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1283 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1284 goto err_destroy_groups
;
1291 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
1292 ft
->g
[ft
->num_groups
] = NULL
;
1293 mlx5e_destroy_groups(ft
);
1299 static void mlx5e_destroy_l2_table(struct mlx5e_priv
*priv
)
1301 mlx5e_destroy_flow_table(&priv
->fs
.l2
.ft
);
1304 static int mlx5e_create_l2_table(struct mlx5e_priv
*priv
)
1306 struct mlx5e_l2_table
*l2_table
= &priv
->fs
.l2
;
1307 struct mlx5e_flow_table
*ft
= &l2_table
->ft
;
1308 struct mlx5_flow_table_attr ft_attr
= {};
1313 ft_attr
.max_fte
= MLX5E_L2_TABLE_SIZE
;
1314 ft_attr
.level
= MLX5E_L2_FT_LEVEL
;
1315 ft_attr
.prio
= MLX5E_NIC_PRIO
;
1317 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, &ft_attr
);
1318 if (IS_ERR(ft
->t
)) {
1319 err
= PTR_ERR(ft
->t
);
1324 err
= mlx5e_create_l2_table_groups(l2_table
);
1326 goto err_destroy_flow_table
;
1330 err_destroy_flow_table
:
1331 mlx5_destroy_flow_table(ft
->t
);
1337 #define MLX5E_NUM_VLAN_GROUPS 4
1338 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1339 #define MLX5E_VLAN_GROUP1_SIZE BIT(12)
1340 #define MLX5E_VLAN_GROUP2_SIZE BIT(1)
1341 #define MLX5E_VLAN_GROUP3_SIZE BIT(0)
1342 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1343 MLX5E_VLAN_GROUP1_SIZE +\
1344 MLX5E_VLAN_GROUP2_SIZE +\
1345 MLX5E_VLAN_GROUP3_SIZE)
1347 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table
*ft
, u32
*in
,
1352 u8
*mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
1354 memset(in
, 0, inlen
);
1355 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1356 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.cvlan_tag
);
1357 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.first_vid
);
1358 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1359 ix
+= MLX5E_VLAN_GROUP0_SIZE
;
1360 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1361 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1362 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1363 goto err_destroy_groups
;
1366 memset(in
, 0, inlen
);
1367 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1368 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.svlan_tag
);
1369 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.first_vid
);
1370 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1371 ix
+= MLX5E_VLAN_GROUP1_SIZE
;
1372 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1373 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1374 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1375 goto err_destroy_groups
;
1378 memset(in
, 0, inlen
);
1379 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1380 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.cvlan_tag
);
1381 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1382 ix
+= MLX5E_VLAN_GROUP2_SIZE
;
1383 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1384 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1385 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1386 goto err_destroy_groups
;
1389 memset(in
, 0, inlen
);
1390 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1391 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.svlan_tag
);
1392 MLX5_SET_CFG(in
, start_flow_index
, ix
);
1393 ix
+= MLX5E_VLAN_GROUP3_SIZE
;
1394 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
1395 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1396 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1397 goto err_destroy_groups
;
1403 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
1404 ft
->g
[ft
->num_groups
] = NULL
;
1405 mlx5e_destroy_groups(ft
);
1410 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table
*ft
)
1413 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1416 in
= kvzalloc(inlen
, GFP_KERNEL
);
1420 err
= __mlx5e_create_vlan_table_groups(ft
, in
, inlen
);
1426 static int mlx5e_create_vlan_table(struct mlx5e_priv
*priv
)
1428 struct mlx5e_flow_table
*ft
= &priv
->fs
.vlan
.ft
;
1429 struct mlx5_flow_table_attr ft_attr
= {};
1434 ft_attr
.max_fte
= MLX5E_VLAN_TABLE_SIZE
;
1435 ft_attr
.level
= MLX5E_VLAN_FT_LEVEL
;
1436 ft_attr
.prio
= MLX5E_NIC_PRIO
;
1438 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, &ft_attr
);
1440 if (IS_ERR(ft
->t
)) {
1441 err
= PTR_ERR(ft
->t
);
1445 ft
->g
= kcalloc(MLX5E_NUM_VLAN_GROUPS
, sizeof(*ft
->g
), GFP_KERNEL
);
1448 goto err_destroy_vlan_table
;
1451 err
= mlx5e_create_vlan_table_groups(ft
);
1455 mlx5e_add_vlan_rules(priv
);
1461 err_destroy_vlan_table
:
1462 mlx5_destroy_flow_table(ft
->t
);
1468 static void mlx5e_destroy_vlan_table(struct mlx5e_priv
*priv
)
1470 mlx5e_del_vlan_rules(priv
);
1471 mlx5e_destroy_flow_table(&priv
->fs
.vlan
.ft
);
1474 int mlx5e_create_flow_steering(struct mlx5e_priv
*priv
)
1478 priv
->fs
.ns
= mlx5_get_flow_namespace(priv
->mdev
,
1479 MLX5_FLOW_NAMESPACE_KERNEL
);
1484 err
= mlx5e_arfs_create_tables(priv
);
1486 netdev_err(priv
->netdev
, "Failed to create arfs tables, err=%d\n",
1488 priv
->netdev
->hw_features
&= ~NETIF_F_NTUPLE
;
1491 err
= mlx5e_create_inner_ttc_table(priv
);
1493 netdev_err(priv
->netdev
, "Failed to create inner ttc table, err=%d\n",
1495 goto err_destroy_arfs_tables
;
1498 err
= mlx5e_create_ttc_table(priv
);
1500 netdev_err(priv
->netdev
, "Failed to create ttc table, err=%d\n",
1502 goto err_destroy_inner_ttc_table
;
1505 err
= mlx5e_create_l2_table(priv
);
1507 netdev_err(priv
->netdev
, "Failed to create l2 table, err=%d\n",
1509 goto err_destroy_ttc_table
;
1512 err
= mlx5e_create_vlan_table(priv
);
1514 netdev_err(priv
->netdev
, "Failed to create vlan table, err=%d\n",
1516 goto err_destroy_l2_table
;
1519 mlx5e_ethtool_init_steering(priv
);
1523 err_destroy_l2_table
:
1524 mlx5e_destroy_l2_table(priv
);
1525 err_destroy_ttc_table
:
1526 mlx5e_destroy_ttc_table(priv
);
1527 err_destroy_inner_ttc_table
:
1528 mlx5e_destroy_inner_ttc_table(priv
);
1529 err_destroy_arfs_tables
:
1530 mlx5e_arfs_destroy_tables(priv
);
1535 void mlx5e_destroy_flow_steering(struct mlx5e_priv
*priv
)
1537 mlx5e_destroy_vlan_table(priv
);
1538 mlx5e_destroy_l2_table(priv
);
1539 mlx5e_destroy_ttc_table(priv
);
1540 mlx5e_destroy_inner_ttc_table(priv
);
1541 mlx5e_arfs_destroy_tables(priv
);
1542 mlx5e_ethtool_cleanup_steering(priv
);