2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <net/devlink.h>
39 #include <net/ip_tunnels.h>
40 #include <linux/mlx5/device.h>
42 #define MLX5_MAX_UC_PER_VPORT(dev) \
43 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
45 #define MLX5_MAX_MC_PER_VPORT(dev) \
46 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
48 #define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE))
49 #define MLX5_L2_ADDR_HASH(addr) (addr[5])
51 #define FDB_UPLINK_VPORT 0xffff
53 #define MLX5_MIN_BW_SHARE 1
55 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
56 min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
58 /* L2 -mac address based- hash helpers */
60 struct hlist_node hlist
;
64 #define for_each_l2hash_node(hn, tmp, hash, i) \
65 for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \
66 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
68 #define l2addr_hash_find(hash, mac, type) ({ \
69 int ix = MLX5_L2_ADDR_HASH(mac); \
73 hlist_for_each_entry(ptr, &hash[ix], node.hlist) \
74 if (ether_addr_equal(ptr->node.addr, mac)) {\
83 #define l2addr_hash_add(hash, mac, type, gfp) ({ \
84 int ix = MLX5_L2_ADDR_HASH(mac); \
87 ptr = kzalloc(sizeof(type), gfp); \
89 ether_addr_copy(ptr->node.addr, mac); \
90 hlist_add_head(&ptr->node.hlist, &hash[ix]);\
95 #define l2addr_hash_del(ptr) ({ \
96 hlist_del(&ptr->node.hlist); \
100 struct vport_ingress
{
101 struct mlx5_flow_table
*acl
;
102 struct mlx5_flow_group
*allow_untagged_spoofchk_grp
;
103 struct mlx5_flow_group
*allow_spoofchk_only_grp
;
104 struct mlx5_flow_group
*allow_untagged_only_grp
;
105 struct mlx5_flow_group
*drop_grp
;
106 struct mlx5_flow_handle
*allow_rule
;
107 struct mlx5_flow_handle
*drop_rule
;
110 struct vport_egress
{
111 struct mlx5_flow_table
*acl
;
112 struct mlx5_flow_group
*allowed_vlans_grp
;
113 struct mlx5_flow_group
*drop_grp
;
114 struct mlx5_flow_handle
*allowed_vlan
;
115 struct mlx5_flow_handle
*drop_rule
;
118 struct mlx5_vport_info
{
131 struct mlx5_core_dev
*dev
;
133 struct hlist_head uc_list
[MLX5_L2_ADDR_HASH_SIZE
];
134 struct hlist_head mc_list
[MLX5_L2_ADDR_HASH_SIZE
];
135 struct mlx5_flow_handle
*promisc_rule
;
136 struct mlx5_flow_handle
*allmulti_rule
;
137 struct work_struct vport_change_handler
;
139 struct vport_ingress ingress
;
140 struct vport_egress egress
;
142 struct mlx5_vport_info info
;
154 struct mlx5_l2_table
{
155 struct hlist_head l2_hash
[MLX5_L2_ADDR_HASH_SIZE
];
157 unsigned long *bitmap
;
160 struct mlx5_eswitch_fdb
{
164 struct mlx5_flow_group
*addr_grp
;
165 struct mlx5_flow_group
*allmulti_grp
;
166 struct mlx5_flow_group
*promisc_grp
;
169 struct offloads_fdb
{
170 struct mlx5_flow_table
*fdb
;
171 struct mlx5_flow_group
*send_to_vport_grp
;
172 struct mlx5_flow_group
*miss_grp
;
173 struct mlx5_flow_handle
*miss_rule
;
174 int vlan_push_pop_refcount
;
186 struct mlx5_flow_handle
*send_to_vport_rule
;
187 struct list_head list
;
190 struct mlx5_eswitch_rep
{
191 int (*load
)(struct mlx5_eswitch
*esw
,
192 struct mlx5_eswitch_rep
*rep
);
193 void (*unload
)(struct mlx5_eswitch
*esw
,
194 struct mlx5_eswitch_rep
*rep
);
197 struct net_device
*netdev
;
199 struct mlx5_flow_handle
*vport_rx_rule
;
200 struct list_head vport_sqs_list
;
206 struct mlx5_esw_offload
{
207 struct mlx5_flow_table
*ft_offloads
;
208 struct mlx5_flow_group
*vport_rx_group
;
209 struct mlx5_eswitch_rep
*vport_reps
;
210 DECLARE_HASHTABLE(encap_tbl
, 8);
214 struct mlx5_eswitch
{
215 struct mlx5_core_dev
*dev
;
216 struct mlx5_l2_table l2_table
;
217 struct mlx5_eswitch_fdb fdb_table
;
218 struct hlist_head mc_table
[MLX5_L2_ADDR_HASH_SIZE
];
219 struct workqueue_struct
*work_queue
;
220 struct mlx5_vport
*vports
;
223 /* Synchronize between vport change events
224 * and async SRIOV admin state changes
226 struct mutex state_lock
;
227 struct esw_mc_addr
*mc_promisc
;
234 struct mlx5_esw_offload offloads
;
238 void esw_offloads_cleanup(struct mlx5_eswitch
*esw
, int nvports
);
239 int esw_offloads_init(struct mlx5_eswitch
*esw
, int nvports
);
242 int mlx5_eswitch_init(struct mlx5_core_dev
*dev
);
243 void mlx5_eswitch_cleanup(struct mlx5_eswitch
*esw
);
244 void mlx5_eswitch_attach(struct mlx5_eswitch
*esw
);
245 void mlx5_eswitch_detach(struct mlx5_eswitch
*esw
);
246 void mlx5_eswitch_vport_event(struct mlx5_eswitch
*esw
, struct mlx5_eqe
*eqe
);
247 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch
*esw
, int nvfs
, int mode
);
248 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch
*esw
);
249 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch
*esw
,
250 int vport
, u8 mac
[ETH_ALEN
]);
251 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch
*esw
,
252 int vport
, int link_state
);
253 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
254 int vport
, u16 vlan
, u8 qos
);
255 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch
*esw
,
256 int vport
, bool spoofchk
);
257 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch
*esw
,
258 int vport_num
, bool setting
);
259 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch
*esw
, int vport
,
260 u32 max_rate
, u32 min_rate
);
261 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch
*esw
,
262 int vport
, struct ifla_vf_info
*ivi
);
263 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch
*esw
,
265 struct ifla_vf_stats
*vf_stats
);
267 struct mlx5_flow_spec
;
268 struct mlx5_esw_flow_attr
;
270 struct mlx5_flow_handle
*
271 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch
*esw
,
272 struct mlx5_flow_spec
*spec
,
273 struct mlx5_esw_flow_attr
*attr
);
274 struct mlx5_flow_handle
*
275 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, int vport
, u32 tirn
);
278 SET_VLAN_STRIP
= BIT(0),
279 SET_VLAN_INSERT
= BIT(1)
282 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
283 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
285 struct mlx5_encap_entry
{
286 struct hlist_node encap_hlist
;
287 struct list_head flows
;
290 struct ip_tunnel_info tun_info
;
291 unsigned char h_dest
[ETH_ALEN
]; /* destination eth addr */
293 struct net_device
*out_dev
;
297 struct mlx5_esw_flow_attr
{
298 struct mlx5_eswitch_rep
*in_rep
;
299 struct mlx5_eswitch_rep
*out_rep
;
304 struct mlx5_encap_entry
*encap
;
307 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch
*esw
,
308 struct mlx5_eswitch_rep
*rep
,
309 u16
*sqns_array
, int sqns_num
);
310 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch
*esw
,
311 struct mlx5_eswitch_rep
*rep
);
313 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
);
314 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
);
315 int mlx5_devlink_eswitch_inline_mode_set(struct devlink
*devlink
, u8 mode
);
316 int mlx5_devlink_eswitch_inline_mode_get(struct devlink
*devlink
, u8
*mode
);
317 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch
*esw
, int nvfs
, u8
*mode
);
318 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch
*esw
,
320 struct mlx5_eswitch_rep
*rep
);
321 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch
*esw
,
323 struct net_device
*mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch
*esw
);
325 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch
*esw
,
326 struct mlx5_esw_flow_attr
*attr
);
327 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch
*esw
,
328 struct mlx5_esw_flow_attr
*attr
);
329 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
330 int vport
, u16 vlan
, u8 qos
, u8 set_flags
);
332 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
334 #define esw_info(dev, format, ...) \
335 pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
337 #define esw_warn(dev, format, ...) \
338 pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
340 #define esw_debug(dev, format, ...) \
341 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
342 #endif /* __MLX5_ESWITCH_H__ */