2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/fs.h>
36 struct mlx5e_ethtool_rule
{
37 struct list_head list
;
38 struct ethtool_rx_flow_spec flow_spec
;
39 struct mlx5_flow_handle
*rule
;
40 struct mlx5e_ethtool_table
*eth_ft
;
43 static void put_flow_table(struct mlx5e_ethtool_table
*eth_ft
)
45 if (!--eth_ft
->num_rules
) {
46 mlx5_destroy_flow_table(eth_ft
->ft
);
51 #define MLX5E_ETHTOOL_L3_L4_PRIO 0
52 #define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
53 #define MLX5E_ETHTOOL_NUM_ENTRIES 64000
54 #define MLX5E_ETHTOOL_NUM_GROUPS 10
55 static struct mlx5e_ethtool_table
*get_flow_table(struct mlx5e_priv
*priv
,
56 struct ethtool_rx_flow_spec
*fs
,
59 struct mlx5e_ethtool_table
*eth_ft
;
60 struct mlx5_flow_namespace
*ns
;
61 struct mlx5_flow_table
*ft
;
66 switch (fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
69 max_tuples
= ETHTOOL_NUM_L3_L4_FTS
;
70 prio
= MLX5E_ETHTOOL_L3_L4_PRIO
+ (max_tuples
- num_tuples
);
71 eth_ft
= &priv
->fs
.ethtool
.l3_l4_ft
[prio
];
74 max_tuples
= ETHTOOL_NUM_L3_L4_FTS
;
75 prio
= MLX5E_ETHTOOL_L3_L4_PRIO
+ (max_tuples
- num_tuples
);
76 eth_ft
= &priv
->fs
.ethtool
.l3_l4_ft
[prio
];
79 max_tuples
= ETHTOOL_NUM_L2_FTS
;
80 prio
= max_tuples
- num_tuples
;
81 eth_ft
= &priv
->fs
.ethtool
.l2_ft
[prio
];
82 prio
+= MLX5E_ETHTOOL_L2_PRIO
;
85 return ERR_PTR(-EINVAL
);
92 ns
= mlx5_get_flow_namespace(priv
->mdev
,
93 MLX5_FLOW_NAMESPACE_ETHTOOL
);
95 return ERR_PTR(-EOPNOTSUPP
);
97 table_size
= min_t(u32
, BIT(MLX5_CAP_FLOWTABLE(priv
->mdev
,
98 flow_table_properties_nic_receive
.log_max_ft_size
)),
99 MLX5E_ETHTOOL_NUM_ENTRIES
);
100 ft
= mlx5_create_auto_grouped_flow_table(ns
, prio
,
102 MLX5E_ETHTOOL_NUM_GROUPS
, 0, 0);
110 static void mask_spec(u8
*mask
, u8
*val
, size_t size
)
114 for (i
= 0; i
< size
; i
++, mask
++, val
++)
115 *((u8
*)val
) = *((u8
*)mask
) & *((u8
*)val
);
118 static void set_ips(void *outer_headers_v
, void *outer_headers_c
, __be32 ip4src_m
,
119 __be32 ip4src_v
, __be32 ip4dst_m
, __be32 ip4dst_v
)
122 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, outer_headers_v
,
123 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
),
124 &ip4src_v
, sizeof(ip4src_v
));
125 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, outer_headers_c
,
126 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
),
127 0xff, sizeof(ip4src_m
));
130 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, outer_headers_v
,
131 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
),
132 &ip4dst_v
, sizeof(ip4dst_v
));
133 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, outer_headers_c
,
134 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
),
135 0xff, sizeof(ip4dst_m
));
137 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_v
,
138 ethertype
, ETH_P_IP
);
139 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_c
,
143 static int set_flow_attrs(u32
*match_c
, u32
*match_v
,
144 struct ethtool_rx_flow_spec
*fs
)
146 void *outer_headers_c
= MLX5_ADDR_OF(fte_match_param
, match_c
,
148 void *outer_headers_v
= MLX5_ADDR_OF(fte_match_param
, match_v
,
150 u32 flow_type
= fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
);
151 struct ethtool_tcpip4_spec
*l4_mask
;
152 struct ethtool_tcpip4_spec
*l4_val
;
153 struct ethtool_usrip4_spec
*l3_mask
;
154 struct ethtool_usrip4_spec
*l3_val
;
155 struct ethhdr
*eth_val
;
156 struct ethhdr
*eth_mask
;
160 l4_mask
= &fs
->m_u
.tcp_ip4_spec
;
161 l4_val
= &fs
->h_u
.tcp_ip4_spec
;
162 set_ips(outer_headers_v
, outer_headers_c
, l4_mask
->ip4src
,
163 l4_val
->ip4src
, l4_mask
->ip4dst
, l4_val
->ip4dst
);
166 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_c
, tcp_sport
,
168 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_v
, tcp_sport
,
169 ntohs(l4_val
->psrc
));
172 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_c
, tcp_dport
,
174 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_v
, tcp_dport
,
175 ntohs(l4_val
->pdst
));
177 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_c
, ip_protocol
,
179 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_v
, ip_protocol
,
183 l4_mask
= &fs
->m_u
.tcp_ip4_spec
;
184 l4_val
= &fs
->h_u
.tcp_ip4_spec
;
185 set_ips(outer_headers_v
, outer_headers_c
, l4_mask
->ip4src
,
186 l4_val
->ip4src
, l4_mask
->ip4dst
, l4_val
->ip4dst
);
189 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_c
, udp_sport
,
191 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_v
, udp_sport
,
192 ntohs(l4_val
->psrc
));
195 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_c
, udp_dport
,
197 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_v
, udp_dport
,
198 ntohs(l4_val
->pdst
));
200 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_c
, ip_protocol
,
202 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_v
, ip_protocol
,
206 l3_mask
= &fs
->m_u
.usr_ip4_spec
;
207 l3_val
= &fs
->h_u
.usr_ip4_spec
;
208 set_ips(outer_headers_v
, outer_headers_c
, l3_mask
->ip4src
,
209 l3_val
->ip4src
, l3_mask
->ip4dst
, l3_val
->ip4dst
);
212 eth_mask
= &fs
->m_u
.ether_spec
;
213 eth_val
= &fs
->h_u
.ether_spec
;
215 mask_spec((u8
*)eth_mask
, (u8
*)eth_val
, sizeof(*eth_mask
));
216 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
,
217 outer_headers_c
, smac_47_16
),
219 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
,
220 outer_headers_v
, smac_47_16
),
222 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
,
223 outer_headers_c
, dmac_47_16
),
225 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
,
226 outer_headers_v
, dmac_47_16
),
228 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_c
, ethertype
,
229 ntohs(eth_mask
->h_proto
));
230 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_v
, ethertype
,
231 ntohs(eth_val
->h_proto
));
237 if ((fs
->flow_type
& FLOW_EXT
) &&
238 (fs
->m_ext
.vlan_tci
& cpu_to_be16(VLAN_VID_MASK
))) {
239 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_c
,
241 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_v
,
243 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_c
,
245 MLX5_SET(fte_match_set_lyr_2_4
, outer_headers_v
,
246 first_vid
, ntohs(fs
->h_ext
.vlan_tci
));
248 if (fs
->flow_type
& FLOW_MAC_EXT
&&
249 !is_zero_ether_addr(fs
->m_ext
.h_dest
)) {
250 mask_spec(fs
->m_ext
.h_dest
, fs
->h_ext
.h_dest
, ETH_ALEN
);
251 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
,
252 outer_headers_c
, dmac_47_16
),
254 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
,
255 outer_headers_v
, dmac_47_16
),
262 static void add_rule_to_list(struct mlx5e_priv
*priv
,
263 struct mlx5e_ethtool_rule
*rule
)
265 struct mlx5e_ethtool_rule
*iter
;
266 struct list_head
*head
= &priv
->fs
.ethtool
.rules
;
268 list_for_each_entry(iter
, &priv
->fs
.ethtool
.rules
, list
) {
269 if (iter
->flow_spec
.location
> rule
->flow_spec
.location
)
273 priv
->fs
.ethtool
.tot_num_rules
++;
274 list_add(&rule
->list
, head
);
277 static bool outer_header_zero(u32
*match_criteria
)
279 int size
= MLX5_ST_SZ_BYTES(fte_match_param
);
280 char *outer_headers_c
= MLX5_ADDR_OF(fte_match_param
, match_criteria
,
283 return outer_headers_c
[0] == 0 && !memcmp(outer_headers_c
,
288 static struct mlx5_flow_handle
*
289 add_ethtool_flow_rule(struct mlx5e_priv
*priv
,
290 struct mlx5_flow_table
*ft
,
291 struct ethtool_rx_flow_spec
*fs
)
293 struct mlx5_flow_destination
*dst
= NULL
;
294 struct mlx5_flow_act flow_act
= {0};
295 struct mlx5_flow_spec
*spec
;
296 struct mlx5_flow_handle
*rule
;
299 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
301 return ERR_PTR(-ENOMEM
);
302 err
= set_flow_attrs(spec
->match_criteria
, spec
->match_value
,
307 if (fs
->ring_cookie
== RX_CLS_FLOW_DISC
) {
308 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_DROP
;
310 dst
= kzalloc(sizeof(*dst
), GFP_KERNEL
);
316 dst
->type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
317 dst
->tir_num
= priv
->direct_tir
[fs
->ring_cookie
].tirn
;
318 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
321 spec
->match_criteria_enable
= (!outer_header_zero(spec
->match_criteria
));
322 flow_act
.flow_tag
= MLX5_FS_DEFAULT_FLOW_TAG
;
323 rule
= mlx5_add_flow_rules(ft
, spec
, &flow_act
, dst
, 1);
326 netdev_err(priv
->netdev
, "%s: failed to add ethtool steering rule: %d\n",
333 return err
? ERR_PTR(err
) : rule
;
336 static void del_ethtool_rule(struct mlx5e_priv
*priv
,
337 struct mlx5e_ethtool_rule
*eth_rule
)
340 mlx5_del_flow_rules(eth_rule
->rule
);
341 list_del(ð_rule
->list
);
342 priv
->fs
.ethtool
.tot_num_rules
--;
343 put_flow_table(eth_rule
->eth_ft
);
347 static struct mlx5e_ethtool_rule
*find_ethtool_rule(struct mlx5e_priv
*priv
,
350 struct mlx5e_ethtool_rule
*iter
;
352 list_for_each_entry(iter
, &priv
->fs
.ethtool
.rules
, list
) {
353 if (iter
->flow_spec
.location
== location
)
359 static struct mlx5e_ethtool_rule
*get_ethtool_rule(struct mlx5e_priv
*priv
,
362 struct mlx5e_ethtool_rule
*eth_rule
;
364 eth_rule
= find_ethtool_rule(priv
, location
);
366 del_ethtool_rule(priv
, eth_rule
);
368 eth_rule
= kzalloc(sizeof(*eth_rule
), GFP_KERNEL
);
370 return ERR_PTR(-ENOMEM
);
372 add_rule_to_list(priv
, eth_rule
);
376 #define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
378 #define all_ones(field) (field == (__force typeof(field))-1)
379 #define all_zeros_or_all_ones(field) \
380 ((field) == 0 || (field) == (__force typeof(field))-1)
382 static int validate_flow(struct mlx5e_priv
*priv
,
383 struct ethtool_rx_flow_spec
*fs
)
385 struct ethtool_tcpip4_spec
*l4_mask
;
386 struct ethtool_usrip4_spec
*l3_mask
;
387 struct ethhdr
*eth_mask
;
390 if (fs
->location
>= MAX_NUM_OF_ETHTOOL_RULES
)
393 if (fs
->ring_cookie
>= priv
->channels
.params
.num_channels
&&
394 fs
->ring_cookie
!= RX_CLS_FLOW_DISC
)
397 switch (fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
399 eth_mask
= &fs
->m_u
.ether_spec
;
400 if (!is_zero_ether_addr(eth_mask
->h_dest
))
402 if (!is_zero_ether_addr(eth_mask
->h_source
))
404 if (eth_mask
->h_proto
)
409 if (fs
->m_u
.tcp_ip4_spec
.tos
)
411 l4_mask
= &fs
->m_u
.tcp_ip4_spec
;
412 if (l4_mask
->ip4src
) {
413 if (!all_ones(l4_mask
->ip4src
))
417 if (l4_mask
->ip4dst
) {
418 if (!all_ones(l4_mask
->ip4dst
))
423 if (!all_ones(l4_mask
->psrc
))
428 if (!all_ones(l4_mask
->pdst
))
432 /* Flow is TCP/UDP */
436 l3_mask
= &fs
->m_u
.usr_ip4_spec
;
437 if (l3_mask
->l4_4_bytes
|| l3_mask
->tos
|| l3_mask
->proto
||
438 fs
->h_u
.usr_ip4_spec
.ip_ver
!= ETH_RX_NFC_IP4
)
440 if (l3_mask
->ip4src
) {
441 if (!all_ones(l3_mask
->ip4src
))
445 if (l3_mask
->ip4dst
) {
446 if (!all_ones(l3_mask
->ip4dst
))
456 if ((fs
->flow_type
& FLOW_EXT
)) {
457 if (fs
->m_ext
.vlan_etype
||
458 (fs
->m_ext
.vlan_tci
!= cpu_to_be16(VLAN_VID_MASK
)))
461 if (fs
->m_ext
.vlan_tci
) {
462 if (be16_to_cpu(fs
->h_ext
.vlan_tci
) >= VLAN_N_VID
)
468 if (fs
->flow_type
& FLOW_MAC_EXT
&&
469 !is_zero_ether_addr(fs
->m_ext
.h_dest
))
475 int mlx5e_ethtool_flow_replace(struct mlx5e_priv
*priv
,
476 struct ethtool_rx_flow_spec
*fs
)
478 struct mlx5e_ethtool_table
*eth_ft
;
479 struct mlx5e_ethtool_rule
*eth_rule
;
480 struct mlx5_flow_handle
*rule
;
484 num_tuples
= validate_flow(priv
, fs
);
485 if (num_tuples
<= 0) {
486 netdev_warn(priv
->netdev
, "%s: flow is not valid\n", __func__
);
490 eth_ft
= get_flow_table(priv
, fs
, num_tuples
);
492 return PTR_ERR(eth_ft
);
494 eth_rule
= get_ethtool_rule(priv
, fs
->location
);
495 if (IS_ERR(eth_rule
)) {
496 put_flow_table(eth_ft
);
497 return PTR_ERR(eth_rule
);
500 eth_rule
->flow_spec
= *fs
;
501 eth_rule
->eth_ft
= eth_ft
;
504 goto del_ethtool_rule
;
506 rule
= add_ethtool_flow_rule(priv
, eth_ft
->ft
, fs
);
509 goto del_ethtool_rule
;
512 eth_rule
->rule
= rule
;
517 del_ethtool_rule(priv
, eth_rule
);
522 int mlx5e_ethtool_flow_remove(struct mlx5e_priv
*priv
,
525 struct mlx5e_ethtool_rule
*eth_rule
;
528 if (location
>= MAX_NUM_OF_ETHTOOL_RULES
)
531 eth_rule
= find_ethtool_rule(priv
, location
);
537 del_ethtool_rule(priv
, eth_rule
);
542 int mlx5e_ethtool_get_flow(struct mlx5e_priv
*priv
, struct ethtool_rxnfc
*info
,
545 struct mlx5e_ethtool_rule
*eth_rule
;
547 if (location
< 0 || location
>= MAX_NUM_OF_ETHTOOL_RULES
)
550 list_for_each_entry(eth_rule
, &priv
->fs
.ethtool
.rules
, list
) {
551 if (eth_rule
->flow_spec
.location
== location
) {
552 info
->fs
= eth_rule
->flow_spec
;
560 int mlx5e_ethtool_get_all_flows(struct mlx5e_priv
*priv
, struct ethtool_rxnfc
*info
,
567 info
->data
= MAX_NUM_OF_ETHTOOL_RULES
;
568 while ((!err
|| err
== -ENOENT
) && idx
< info
->rule_cnt
) {
569 err
= mlx5e_ethtool_get_flow(priv
, info
, location
);
571 rule_locs
[idx
++] = location
;
577 void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv
*priv
)
579 struct mlx5e_ethtool_rule
*iter
;
580 struct mlx5e_ethtool_rule
*temp
;
582 list_for_each_entry_safe(iter
, temp
, &priv
->fs
.ethtool
.rules
, list
)
583 del_ethtool_rule(priv
, iter
);
586 void mlx5e_ethtool_init_steering(struct mlx5e_priv
*priv
)
588 INIT_LIST_HEAD(&priv
->fs
.ethtool
.rules
);