2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
40 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv
*priv
,
41 struct mlx5e_l2_rule
*ai
, int type
);
42 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv
*priv
,
43 struct mlx5e_l2_rule
*ai
);
47 MLX5E_VLAN_FT_LEVEL
= 0,
52 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
68 MLX5E_ACTION_NONE
= 0,
73 struct mlx5e_l2_hash_node
{
74 struct hlist_node hlist
;
76 struct mlx5e_l2_rule ai
;
79 static inline int mlx5e_hash_l2(u8
*addr
)
84 static void mlx5e_add_l2_to_hash(struct hlist_head
*hash
, u8
*addr
)
86 struct mlx5e_l2_hash_node
*hn
;
87 int ix
= mlx5e_hash_l2(addr
);
90 hlist_for_each_entry(hn
, &hash
[ix
], hlist
)
91 if (ether_addr_equal_64bits(hn
->ai
.addr
, addr
)) {
97 hn
->action
= MLX5E_ACTION_NONE
;
101 hn
= kzalloc(sizeof(*hn
), GFP_ATOMIC
);
105 ether_addr_copy(hn
->ai
.addr
, addr
);
106 hn
->action
= MLX5E_ACTION_ADD
;
108 hlist_add_head(&hn
->hlist
, &hash
[ix
]);
111 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node
*hn
)
113 hlist_del(&hn
->hlist
);
117 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv
*priv
)
119 struct net_device
*ndev
= priv
->netdev
;
128 for_each_set_bit(vlan
, priv
->fs
.vlan
.active_vlans
, VLAN_N_VID
)
131 max_list_size
= 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_vlan_list
);
133 if (list_size
> max_list_size
) {
135 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
136 list_size
, max_list_size
);
137 list_size
= max_list_size
;
140 vlans
= kcalloc(list_size
, sizeof(*vlans
), GFP_KERNEL
);
145 for_each_set_bit(vlan
, priv
->fs
.vlan
.active_vlans
, VLAN_N_VID
) {
151 err
= mlx5_modify_nic_vport_vlans(priv
->mdev
, vlans
, list_size
);
153 netdev_err(ndev
, "Failed to modify vport vlans list err(%d)\n",
160 enum mlx5e_vlan_rule_type
{
161 MLX5E_VLAN_RULE_TYPE_UNTAGGED
,
162 MLX5E_VLAN_RULE_TYPE_ANY_VID
,
163 MLX5E_VLAN_RULE_TYPE_MATCH_VID
,
166 static int __mlx5e_add_vlan_rule(struct mlx5e_priv
*priv
,
167 enum mlx5e_vlan_rule_type rule_type
,
168 u16 vid
, u32
*mc
, u32
*mv
)
170 struct mlx5_flow_table
*ft
= priv
->fs
.vlan
.ft
.t
;
171 struct mlx5_flow_destination dest
;
172 u8 match_criteria_enable
= 0;
173 struct mlx5_flow_rule
**rule_p
;
176 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
177 dest
.ft
= priv
->fs
.l2
.ft
.t
;
179 match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
180 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.vlan_tag
);
183 case MLX5E_VLAN_RULE_TYPE_UNTAGGED
:
184 rule_p
= &priv
->fs
.vlan
.untagged_rule
;
186 case MLX5E_VLAN_RULE_TYPE_ANY_VID
:
187 rule_p
= &priv
->fs
.vlan
.any_vlan_rule
;
188 MLX5_SET(fte_match_param
, mv
, outer_headers
.vlan_tag
, 1);
190 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
191 rule_p
= &priv
->fs
.vlan
.active_vlans_rule
[vid
];
192 MLX5_SET(fte_match_param
, mv
, outer_headers
.vlan_tag
, 1);
193 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.first_vid
);
194 MLX5_SET(fte_match_param
, mv
, outer_headers
.first_vid
, vid
);
198 *rule_p
= mlx5_add_flow_rule(ft
, match_criteria_enable
, mc
, mv
,
199 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
200 MLX5_FS_DEFAULT_FLOW_TAG
,
203 if (IS_ERR(*rule_p
)) {
204 err
= PTR_ERR(*rule_p
);
206 netdev_err(priv
->netdev
, "%s: add rule failed\n", __func__
);
212 static int mlx5e_add_vlan_rule(struct mlx5e_priv
*priv
,
213 enum mlx5e_vlan_rule_type rule_type
, u16 vid
)
219 match_value
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param
));
220 match_criteria
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param
));
221 if (!match_value
|| !match_criteria
) {
222 netdev_err(priv
->netdev
, "%s: alloc failed\n", __func__
);
224 goto add_vlan_rule_out
;
227 if (rule_type
== MLX5E_VLAN_RULE_TYPE_MATCH_VID
)
228 mlx5e_vport_context_update_vlans(priv
);
230 err
= __mlx5e_add_vlan_rule(priv
, rule_type
, vid
, match_criteria
,
234 kvfree(match_criteria
);
240 static void mlx5e_del_vlan_rule(struct mlx5e_priv
*priv
,
241 enum mlx5e_vlan_rule_type rule_type
, u16 vid
)
244 case MLX5E_VLAN_RULE_TYPE_UNTAGGED
:
245 if (priv
->fs
.vlan
.untagged_rule
) {
246 mlx5_del_flow_rule(priv
->fs
.vlan
.untagged_rule
);
247 priv
->fs
.vlan
.untagged_rule
= NULL
;
250 case MLX5E_VLAN_RULE_TYPE_ANY_VID
:
251 if (priv
->fs
.vlan
.any_vlan_rule
) {
252 mlx5_del_flow_rule(priv
->fs
.vlan
.any_vlan_rule
);
253 priv
->fs
.vlan
.any_vlan_rule
= NULL
;
256 case MLX5E_VLAN_RULE_TYPE_MATCH_VID
:
257 mlx5e_vport_context_update_vlans(priv
);
258 if (priv
->fs
.vlan
.active_vlans_rule
[vid
]) {
259 mlx5_del_flow_rule(priv
->fs
.vlan
.active_vlans_rule
[vid
]);
260 priv
->fs
.vlan
.active_vlans_rule
[vid
] = NULL
;
262 mlx5e_vport_context_update_vlans(priv
);
267 void mlx5e_enable_vlan_filter(struct mlx5e_priv
*priv
)
269 if (!priv
->fs
.vlan
.filter_disabled
)
272 priv
->fs
.vlan
.filter_disabled
= false;
273 if (priv
->netdev
->flags
& IFF_PROMISC
)
275 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
, 0);
278 void mlx5e_disable_vlan_filter(struct mlx5e_priv
*priv
)
280 if (priv
->fs
.vlan
.filter_disabled
)
283 priv
->fs
.vlan
.filter_disabled
= true;
284 if (priv
->netdev
->flags
& IFF_PROMISC
)
286 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
, 0);
289 int mlx5e_vlan_rx_add_vid(struct net_device
*dev
, __always_unused __be16 proto
,
292 struct mlx5e_priv
*priv
= netdev_priv(dev
);
294 set_bit(vid
, priv
->fs
.vlan
.active_vlans
);
296 return mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, vid
);
299 int mlx5e_vlan_rx_kill_vid(struct net_device
*dev
, __always_unused __be16 proto
,
302 struct mlx5e_priv
*priv
= netdev_priv(dev
);
304 clear_bit(vid
, priv
->fs
.vlan
.active_vlans
);
306 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_MATCH_VID
, vid
);
311 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
312 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
313 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
315 static void mlx5e_execute_l2_action(struct mlx5e_priv
*priv
,
316 struct mlx5e_l2_hash_node
*hn
)
318 switch (hn
->action
) {
319 case MLX5E_ACTION_ADD
:
320 mlx5e_add_l2_flow_rule(priv
, &hn
->ai
, MLX5E_FULLMATCH
);
321 hn
->action
= MLX5E_ACTION_NONE
;
324 case MLX5E_ACTION_DEL
:
325 mlx5e_del_l2_flow_rule(priv
, &hn
->ai
);
326 mlx5e_del_l2_from_hash(hn
);
331 static void mlx5e_sync_netdev_addr(struct mlx5e_priv
*priv
)
333 struct net_device
*netdev
= priv
->netdev
;
334 struct netdev_hw_addr
*ha
;
336 netif_addr_lock_bh(netdev
);
338 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_uc
,
339 priv
->netdev
->dev_addr
);
341 netdev_for_each_uc_addr(ha
, netdev
)
342 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_uc
, ha
->addr
);
344 netdev_for_each_mc_addr(ha
, netdev
)
345 mlx5e_add_l2_to_hash(priv
->fs
.l2
.netdev_mc
, ha
->addr
);
347 netif_addr_unlock_bh(netdev
);
350 static void mlx5e_fill_addr_array(struct mlx5e_priv
*priv
, int list_type
,
351 u8 addr_array
[][ETH_ALEN
], int size
)
353 bool is_uc
= (list_type
== MLX5_NVPRT_LIST_TYPE_UC
);
354 struct net_device
*ndev
= priv
->netdev
;
355 struct mlx5e_l2_hash_node
*hn
;
356 struct hlist_head
*addr_list
;
357 struct hlist_node
*tmp
;
361 addr_list
= is_uc
? priv
->fs
.l2
.netdev_uc
: priv
->fs
.l2
.netdev_mc
;
363 if (is_uc
) /* Make sure our own address is pushed first */
364 ether_addr_copy(addr_array
[i
++], ndev
->dev_addr
);
365 else if (priv
->fs
.l2
.broadcast_enabled
)
366 ether_addr_copy(addr_array
[i
++], ndev
->broadcast
);
368 mlx5e_for_each_hash_node(hn
, tmp
, addr_list
, hi
) {
369 if (ether_addr_equal(ndev
->dev_addr
, hn
->ai
.addr
))
373 ether_addr_copy(addr_array
[i
++], hn
->ai
.addr
);
377 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv
*priv
,
380 bool is_uc
= (list_type
== MLX5_NVPRT_LIST_TYPE_UC
);
381 struct mlx5e_l2_hash_node
*hn
;
382 u8 (*addr_array
)[ETH_ALEN
] = NULL
;
383 struct hlist_head
*addr_list
;
384 struct hlist_node
*tmp
;
390 size
= is_uc
? 0 : (priv
->fs
.l2
.broadcast_enabled
? 1 : 0);
392 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_current_uc_list
) :
393 1 << MLX5_CAP_GEN(priv
->mdev
, log_max_current_mc_list
);
395 addr_list
= is_uc
? priv
->fs
.l2
.netdev_uc
: priv
->fs
.l2
.netdev_mc
;
396 mlx5e_for_each_hash_node(hn
, tmp
, addr_list
, hi
)
399 if (size
> max_size
) {
400 netdev_warn(priv
->netdev
,
401 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
402 is_uc
? "UC" : "MC", size
, max_size
);
407 addr_array
= kcalloc(size
, ETH_ALEN
, GFP_KERNEL
);
412 mlx5e_fill_addr_array(priv
, list_type
, addr_array
, size
);
415 err
= mlx5_modify_nic_vport_mac_list(priv
->mdev
, list_type
, addr_array
, size
);
418 netdev_err(priv
->netdev
,
419 "Failed to modify vport %s list err(%d)\n",
420 is_uc
? "UC" : "MC", err
);
424 static void mlx5e_vport_context_update(struct mlx5e_priv
*priv
)
426 struct mlx5e_l2_table
*ea
= &priv
->fs
.l2
;
428 mlx5e_vport_context_update_addr_list(priv
, MLX5_NVPRT_LIST_TYPE_UC
);
429 mlx5e_vport_context_update_addr_list(priv
, MLX5_NVPRT_LIST_TYPE_MC
);
430 mlx5_modify_nic_vport_promisc(priv
->mdev
, 0,
431 ea
->allmulti_enabled
,
432 ea
->promisc_enabled
);
435 static void mlx5e_apply_netdev_addr(struct mlx5e_priv
*priv
)
437 struct mlx5e_l2_hash_node
*hn
;
438 struct hlist_node
*tmp
;
441 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_uc
, i
)
442 mlx5e_execute_l2_action(priv
, hn
);
444 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_mc
, i
)
445 mlx5e_execute_l2_action(priv
, hn
);
448 static void mlx5e_handle_netdev_addr(struct mlx5e_priv
*priv
)
450 struct mlx5e_l2_hash_node
*hn
;
451 struct hlist_node
*tmp
;
454 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_uc
, i
)
455 hn
->action
= MLX5E_ACTION_DEL
;
456 mlx5e_for_each_hash_node(hn
, tmp
, priv
->fs
.l2
.netdev_mc
, i
)
457 hn
->action
= MLX5E_ACTION_DEL
;
459 if (!test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
))
460 mlx5e_sync_netdev_addr(priv
);
462 mlx5e_apply_netdev_addr(priv
);
465 void mlx5e_set_rx_mode_work(struct work_struct
*work
)
467 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
470 struct mlx5e_l2_table
*ea
= &priv
->fs
.l2
;
471 struct net_device
*ndev
= priv
->netdev
;
473 bool rx_mode_enable
= !test_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
474 bool promisc_enabled
= rx_mode_enable
&& (ndev
->flags
& IFF_PROMISC
);
475 bool allmulti_enabled
= rx_mode_enable
&& (ndev
->flags
& IFF_ALLMULTI
);
476 bool broadcast_enabled
= rx_mode_enable
;
478 bool enable_promisc
= !ea
->promisc_enabled
&& promisc_enabled
;
479 bool disable_promisc
= ea
->promisc_enabled
&& !promisc_enabled
;
480 bool enable_allmulti
= !ea
->allmulti_enabled
&& allmulti_enabled
;
481 bool disable_allmulti
= ea
->allmulti_enabled
&& !allmulti_enabled
;
482 bool enable_broadcast
= !ea
->broadcast_enabled
&& broadcast_enabled
;
483 bool disable_broadcast
= ea
->broadcast_enabled
&& !broadcast_enabled
;
485 if (enable_promisc
) {
486 mlx5e_add_l2_flow_rule(priv
, &ea
->promisc
, MLX5E_PROMISC
);
487 if (!priv
->fs
.vlan
.filter_disabled
)
488 mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
,
492 mlx5e_add_l2_flow_rule(priv
, &ea
->allmulti
, MLX5E_ALLMULTI
);
493 if (enable_broadcast
)
494 mlx5e_add_l2_flow_rule(priv
, &ea
->broadcast
, MLX5E_FULLMATCH
);
496 mlx5e_handle_netdev_addr(priv
);
498 if (disable_broadcast
)
499 mlx5e_del_l2_flow_rule(priv
, &ea
->broadcast
);
500 if (disable_allmulti
)
501 mlx5e_del_l2_flow_rule(priv
, &ea
->allmulti
);
502 if (disable_promisc
) {
503 if (!priv
->fs
.vlan
.filter_disabled
)
504 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_ANY_VID
,
506 mlx5e_del_l2_flow_rule(priv
, &ea
->promisc
);
509 ea
->promisc_enabled
= promisc_enabled
;
510 ea
->allmulti_enabled
= allmulti_enabled
;
511 ea
->broadcast_enabled
= broadcast_enabled
;
513 mlx5e_vport_context_update(priv
);
516 static void mlx5e_destroy_groups(struct mlx5e_flow_table
*ft
)
520 for (i
= ft
->num_groups
- 1; i
>= 0; i
--) {
521 if (!IS_ERR_OR_NULL(ft
->g
[i
]))
522 mlx5_destroy_flow_group(ft
->g
[i
]);
528 void mlx5e_init_l2_addr(struct mlx5e_priv
*priv
)
530 ether_addr_copy(priv
->fs
.l2
.broadcast
.addr
, priv
->netdev
->broadcast
);
533 static void mlx5e_destroy_flow_table(struct mlx5e_flow_table
*ft
)
535 mlx5e_destroy_groups(ft
);
537 mlx5_destroy_flow_table(ft
->t
);
541 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table
*ttc
)
545 for (i
= 0; i
< MLX5E_NUM_TT
; i
++) {
546 if (!IS_ERR_OR_NULL(ttc
->rules
[i
])) {
547 mlx5_del_flow_rule(ttc
->rules
[i
]);
548 ttc
->rules
[i
] = NULL
;
557 [MLX5E_TT_IPV4_TCP
] = {
559 .proto
= IPPROTO_TCP
,
561 [MLX5E_TT_IPV6_TCP
] = {
563 .proto
= IPPROTO_TCP
,
565 [MLX5E_TT_IPV4_UDP
] = {
567 .proto
= IPPROTO_UDP
,
569 [MLX5E_TT_IPV6_UDP
] = {
571 .proto
= IPPROTO_UDP
,
573 [MLX5E_TT_IPV4_IPSEC_AH
] = {
577 [MLX5E_TT_IPV6_IPSEC_AH
] = {
581 [MLX5E_TT_IPV4_IPSEC_ESP
] = {
583 .proto
= IPPROTO_ESP
,
585 [MLX5E_TT_IPV6_IPSEC_ESP
] = {
587 .proto
= IPPROTO_ESP
,
603 static struct mlx5_flow_rule
*mlx5e_generate_ttc_rule(struct mlx5e_priv
*priv
,
604 struct mlx5_flow_table
*ft
,
605 struct mlx5_flow_destination
*dest
,
609 struct mlx5_flow_rule
*rule
;
610 u8 match_criteria_enable
= 0;
615 match_value
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param
));
616 match_criteria
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param
));
617 if (!match_value
|| !match_criteria
) {
618 netdev_err(priv
->netdev
, "%s: alloc failed\n", __func__
);
624 match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
625 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.ip_protocol
);
626 MLX5_SET(fte_match_param
, match_value
, outer_headers
.ip_protocol
, proto
);
629 match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
630 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.ethertype
);
631 MLX5_SET(fte_match_param
, match_value
, outer_headers
.ethertype
, etype
);
634 rule
= mlx5_add_flow_rule(ft
, match_criteria_enable
,
635 match_criteria
, match_value
,
636 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
637 MLX5_FS_DEFAULT_FLOW_TAG
,
641 netdev_err(priv
->netdev
, "%s: add rule failed\n", __func__
);
644 kvfree(match_criteria
);
646 return err
? ERR_PTR(err
) : rule
;
649 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv
*priv
)
651 struct mlx5_flow_destination dest
;
652 struct mlx5e_ttc_table
*ttc
;
653 struct mlx5_flow_rule
**rules
;
654 struct mlx5_flow_table
*ft
;
662 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
663 for (tt
= 0; tt
< MLX5E_NUM_TT
; tt
++) {
664 if (tt
== MLX5E_TT_ANY
)
665 dest
.tir_num
= priv
->direct_tir
[0].tirn
;
667 dest
.tir_num
= priv
->indir_tirn
[tt
];
668 rules
[tt
] = mlx5e_generate_ttc_rule(priv
, ft
, &dest
,
670 ttc_rules
[tt
].proto
);
671 if (IS_ERR(rules
[tt
]))
678 err
= PTR_ERR(rules
[tt
]);
680 mlx5e_cleanup_ttc_rules(ttc
);
684 #define MLX5E_TTC_NUM_GROUPS 3
685 #define MLX5E_TTC_GROUP1_SIZE BIT(3)
686 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
687 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
688 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
689 MLX5E_TTC_GROUP2_SIZE +\
690 MLX5E_TTC_GROUP3_SIZE)
691 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table
*ttc
)
693 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
694 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
700 ft
->g
= kcalloc(MLX5E_TTC_NUM_GROUPS
,
701 sizeof(*ft
->g
), GFP_KERNEL
);
704 in
= mlx5_vzalloc(inlen
);
711 mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
712 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.ip_protocol
);
713 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.ethertype
);
714 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
715 MLX5_SET_CFG(in
, start_flow_index
, ix
);
716 ix
+= MLX5E_TTC_GROUP1_SIZE
;
717 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
718 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
719 if (IS_ERR(ft
->g
[ft
->num_groups
]))
724 MLX5_SET(fte_match_param
, mc
, outer_headers
.ip_protocol
, 0);
725 MLX5_SET_CFG(in
, start_flow_index
, ix
);
726 ix
+= MLX5E_TTC_GROUP2_SIZE
;
727 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
728 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
729 if (IS_ERR(ft
->g
[ft
->num_groups
]))
734 memset(in
, 0, inlen
);
735 MLX5_SET_CFG(in
, start_flow_index
, ix
);
736 ix
+= MLX5E_TTC_GROUP3_SIZE
;
737 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
738 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
739 if (IS_ERR(ft
->g
[ft
->num_groups
]))
747 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
748 ft
->g
[ft
->num_groups
] = NULL
;
754 static void mlx5e_destroy_ttc_table(struct mlx5e_priv
*priv
)
756 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.ttc
;
758 mlx5e_cleanup_ttc_rules(ttc
);
759 mlx5e_destroy_flow_table(&ttc
->ft
);
762 static int mlx5e_create_ttc_table(struct mlx5e_priv
*priv
)
764 struct mlx5e_ttc_table
*ttc
= &priv
->fs
.ttc
;
765 struct mlx5e_flow_table
*ft
= &ttc
->ft
;
768 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, MLX5E_NIC_PRIO
,
769 MLX5E_TTC_TABLE_SIZE
, MLX5E_TTC_FT_LEVEL
);
771 err
= PTR_ERR(ft
->t
);
776 err
= mlx5e_create_ttc_table_groups(ttc
);
780 err
= mlx5e_generate_ttc_table_rules(priv
);
786 mlx5e_destroy_flow_table(ft
);
790 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv
*priv
,
791 struct mlx5e_l2_rule
*ai
)
793 if (!IS_ERR_OR_NULL(ai
->rule
)) {
794 mlx5_del_flow_rule(ai
->rule
);
799 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv
*priv
,
800 struct mlx5e_l2_rule
*ai
, int type
)
802 struct mlx5_flow_table
*ft
= priv
->fs
.l2
.ft
.t
;
803 struct mlx5_flow_destination dest
;
804 u8 match_criteria_enable
= 0;
811 match_value
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param
));
812 match_criteria
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param
));
813 if (!match_value
|| !match_criteria
) {
814 netdev_err(priv
->netdev
, "%s: alloc failed\n", __func__
);
816 goto add_l2_rule_out
;
819 mc_dmac
= MLX5_ADDR_OF(fte_match_param
, match_criteria
,
820 outer_headers
.dmac_47_16
);
821 mv_dmac
= MLX5_ADDR_OF(fte_match_param
, match_value
,
822 outer_headers
.dmac_47_16
);
824 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
825 dest
.ft
= priv
->fs
.ttc
.ft
.t
;
828 case MLX5E_FULLMATCH
:
829 match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
830 eth_broadcast_addr(mc_dmac
);
831 ether_addr_copy(mv_dmac
, ai
->addr
);
835 match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
844 ai
->rule
= mlx5_add_flow_rule(ft
, match_criteria_enable
, match_criteria
,
846 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
847 MLX5_FS_DEFAULT_FLOW_TAG
, &dest
);
848 if (IS_ERR(ai
->rule
)) {
849 netdev_err(priv
->netdev
, "%s: add l2 rule(mac:%pM) failed\n",
851 err
= PTR_ERR(ai
->rule
);
856 kvfree(match_criteria
);
862 #define MLX5E_NUM_L2_GROUPS 3
863 #define MLX5E_L2_GROUP1_SIZE BIT(0)
864 #define MLX5E_L2_GROUP2_SIZE BIT(15)
865 #define MLX5E_L2_GROUP3_SIZE BIT(0)
866 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
867 MLX5E_L2_GROUP2_SIZE +\
868 MLX5E_L2_GROUP3_SIZE)
869 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table
*l2_table
)
871 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
872 struct mlx5e_flow_table
*ft
= &l2_table
->ft
;
879 ft
->g
= kcalloc(MLX5E_NUM_L2_GROUPS
, sizeof(*ft
->g
), GFP_KERNEL
);
882 in
= mlx5_vzalloc(inlen
);
888 mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
889 mc_dmac
= MLX5_ADDR_OF(fte_match_param
, mc
,
890 outer_headers
.dmac_47_16
);
891 /* Flow Group for promiscuous */
892 MLX5_SET_CFG(in
, start_flow_index
, ix
);
893 ix
+= MLX5E_L2_GROUP1_SIZE
;
894 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
895 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
896 if (IS_ERR(ft
->g
[ft
->num_groups
]))
897 goto err_destroy_groups
;
900 /* Flow Group for full match */
901 eth_broadcast_addr(mc_dmac
);
902 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
903 MLX5_SET_CFG(in
, start_flow_index
, ix
);
904 ix
+= MLX5E_L2_GROUP2_SIZE
;
905 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
906 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
907 if (IS_ERR(ft
->g
[ft
->num_groups
]))
908 goto err_destroy_groups
;
911 /* Flow Group for allmulti */
912 eth_zero_addr(mc_dmac
);
914 MLX5_SET_CFG(in
, start_flow_index
, ix
);
915 ix
+= MLX5E_L2_GROUP3_SIZE
;
916 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
917 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
918 if (IS_ERR(ft
->g
[ft
->num_groups
]))
919 goto err_destroy_groups
;
926 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
927 ft
->g
[ft
->num_groups
] = NULL
;
928 mlx5e_destroy_groups(ft
);
934 static void mlx5e_destroy_l2_table(struct mlx5e_priv
*priv
)
936 mlx5e_destroy_flow_table(&priv
->fs
.l2
.ft
);
939 static int mlx5e_create_l2_table(struct mlx5e_priv
*priv
)
941 struct mlx5e_l2_table
*l2_table
= &priv
->fs
.l2
;
942 struct mlx5e_flow_table
*ft
= &l2_table
->ft
;
946 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, MLX5E_NIC_PRIO
,
947 MLX5E_L2_TABLE_SIZE
, MLX5E_L2_FT_LEVEL
);
950 err
= PTR_ERR(ft
->t
);
955 err
= mlx5e_create_l2_table_groups(l2_table
);
957 goto err_destroy_flow_table
;
961 err_destroy_flow_table
:
962 mlx5_destroy_flow_table(ft
->t
);
968 #define MLX5E_NUM_VLAN_GROUPS 2
969 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
970 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
971 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
972 MLX5E_VLAN_GROUP1_SIZE)
974 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table
*ft
, u32
*in
,
979 u8
*mc
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
981 memset(in
, 0, inlen
);
982 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
983 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.vlan_tag
);
984 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.first_vid
);
985 MLX5_SET_CFG(in
, start_flow_index
, ix
);
986 ix
+= MLX5E_VLAN_GROUP0_SIZE
;
987 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
988 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
989 if (IS_ERR(ft
->g
[ft
->num_groups
]))
990 goto err_destroy_groups
;
993 memset(in
, 0, inlen
);
994 MLX5_SET_CFG(in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
995 MLX5_SET_TO_ONES(fte_match_param
, mc
, outer_headers
.vlan_tag
);
996 MLX5_SET_CFG(in
, start_flow_index
, ix
);
997 ix
+= MLX5E_VLAN_GROUP1_SIZE
;
998 MLX5_SET_CFG(in
, end_flow_index
, ix
- 1);
999 ft
->g
[ft
->num_groups
] = mlx5_create_flow_group(ft
->t
, in
);
1000 if (IS_ERR(ft
->g
[ft
->num_groups
]))
1001 goto err_destroy_groups
;
1007 err
= PTR_ERR(ft
->g
[ft
->num_groups
]);
1008 ft
->g
[ft
->num_groups
] = NULL
;
1009 mlx5e_destroy_groups(ft
);
1014 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table
*ft
)
1017 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1020 in
= mlx5_vzalloc(inlen
);
1024 err
= __mlx5e_create_vlan_table_groups(ft
, in
, inlen
);
1030 static int mlx5e_create_vlan_table(struct mlx5e_priv
*priv
)
1032 struct mlx5e_flow_table
*ft
= &priv
->fs
.vlan
.ft
;
1036 ft
->t
= mlx5_create_flow_table(priv
->fs
.ns
, MLX5E_NIC_PRIO
,
1037 MLX5E_VLAN_TABLE_SIZE
, MLX5E_VLAN_FT_LEVEL
);
1039 if (IS_ERR(ft
->t
)) {
1040 err
= PTR_ERR(ft
->t
);
1044 ft
->g
= kcalloc(MLX5E_NUM_VLAN_GROUPS
, sizeof(*ft
->g
), GFP_KERNEL
);
1047 goto err_destroy_vlan_table
;
1050 err
= mlx5e_create_vlan_table_groups(ft
);
1054 err
= mlx5e_add_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_UNTAGGED
, 0);
1056 goto err_destroy_vlan_flow_groups
;
1060 err_destroy_vlan_flow_groups
:
1061 mlx5e_destroy_groups(ft
);
1064 err_destroy_vlan_table
:
1065 mlx5_destroy_flow_table(ft
->t
);
1071 static void mlx5e_destroy_vlan_table(struct mlx5e_priv
*priv
)
1073 mlx5e_destroy_flow_table(&priv
->fs
.vlan
.ft
);
1076 int mlx5e_create_flow_steering(struct mlx5e_priv
*priv
)
1080 priv
->fs
.ns
= mlx5_get_flow_namespace(priv
->mdev
,
1081 MLX5_FLOW_NAMESPACE_KERNEL
);
1086 err
= mlx5e_create_ttc_table(priv
);
1088 netdev_err(priv
->netdev
, "Failed to create ttc table, err=%d\n",
1093 err
= mlx5e_create_l2_table(priv
);
1095 netdev_err(priv
->netdev
, "Failed to create l2 table, err=%d\n",
1097 goto err_destroy_ttc_table
;
1100 err
= mlx5e_create_vlan_table(priv
);
1102 netdev_err(priv
->netdev
, "Failed to create vlan table, err=%d\n",
1104 goto err_destroy_l2_table
;
1109 err_destroy_l2_table
:
1110 mlx5e_destroy_l2_table(priv
);
1111 err_destroy_ttc_table
:
1112 mlx5e_destroy_ttc_table(priv
);
1117 void mlx5e_destroy_flow_steering(struct mlx5e_priv
*priv
)
1119 mlx5e_del_vlan_rule(priv
, MLX5E_VLAN_RULE_TYPE_UNTAGGED
, 0);
1120 mlx5e_destroy_vlan_table(priv
);
1121 mlx5e_destroy_l2_table(priv
);
1122 mlx5e_destroy_ttc_table(priv
);