2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <net/devlink.h>
39 #include <net/ip_tunnels.h>
40 #include <linux/mlx5/device.h>
42 #define MLX5_MAX_UC_PER_VPORT(dev) \
43 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
45 #define MLX5_MAX_MC_PER_VPORT(dev) \
46 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
48 #define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE))
49 #define MLX5_L2_ADDR_HASH(addr) (addr[5])
51 #define FDB_UPLINK_VPORT 0xffff
53 #define MLX5_MIN_BW_SHARE 1
55 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
56 min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
58 /* L2 -mac address based- hash helpers */
60 struct hlist_node hlist
;
64 #define for_each_l2hash_node(hn, tmp, hash, i) \
65 for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \
66 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
68 #define l2addr_hash_find(hash, mac, type) ({ \
69 int ix = MLX5_L2_ADDR_HASH(mac); \
73 hlist_for_each_entry(ptr, &hash[ix], node.hlist) \
74 if (ether_addr_equal(ptr->node.addr, mac)) {\
83 #define l2addr_hash_add(hash, mac, type, gfp) ({ \
84 int ix = MLX5_L2_ADDR_HASH(mac); \
87 ptr = kzalloc(sizeof(type), gfp); \
89 ether_addr_copy(ptr->node.addr, mac); \
90 hlist_add_head(&ptr->node.hlist, &hash[ix]);\
95 #define l2addr_hash_del(ptr) ({ \
96 hlist_del(&ptr->node.hlist); \
100 struct vport_ingress
{
101 struct mlx5_flow_table
*acl
;
102 struct mlx5_flow_group
*allow_untagged_spoofchk_grp
;
103 struct mlx5_flow_group
*allow_spoofchk_only_grp
;
104 struct mlx5_flow_group
*allow_untagged_only_grp
;
105 struct mlx5_flow_group
*drop_grp
;
106 struct mlx5_flow_handle
*allow_rule
;
107 struct mlx5_flow_handle
*drop_rule
;
110 struct vport_egress
{
111 struct mlx5_flow_table
*acl
;
112 struct mlx5_flow_group
*allowed_vlans_grp
;
113 struct mlx5_flow_group
*drop_grp
;
114 struct mlx5_flow_handle
*allowed_vlan
;
115 struct mlx5_flow_handle
*drop_rule
;
118 struct mlx5_vport_info
{
131 struct mlx5_core_dev
*dev
;
133 struct hlist_head uc_list
[MLX5_L2_ADDR_HASH_SIZE
];
134 struct hlist_head mc_list
[MLX5_L2_ADDR_HASH_SIZE
];
135 struct mlx5_flow_handle
*promisc_rule
;
136 struct mlx5_flow_handle
*allmulti_rule
;
137 struct work_struct vport_change_handler
;
139 struct vport_ingress ingress
;
140 struct vport_egress egress
;
142 struct mlx5_vport_info info
;
154 struct mlx5_l2_table
{
155 struct hlist_head l2_hash
[MLX5_L2_ADDR_HASH_SIZE
];
157 unsigned long *bitmap
;
160 struct mlx5_eswitch_fdb
{
164 struct mlx5_flow_group
*addr_grp
;
165 struct mlx5_flow_group
*allmulti_grp
;
166 struct mlx5_flow_group
*promisc_grp
;
169 struct offloads_fdb
{
170 struct mlx5_flow_table
*fdb
;
171 struct mlx5_flow_group
*send_to_vport_grp
;
172 struct mlx5_flow_group
*miss_grp
;
173 struct mlx5_flow_handle
*miss_rule
;
174 int vlan_push_pop_refcount
;
186 struct mlx5_flow_handle
*send_to_vport_rule
;
187 struct list_head list
;
190 struct mlx5_eswitch_rep
{
191 int (*load
)(struct mlx5_eswitch
*esw
,
192 struct mlx5_eswitch_rep
*rep
);
193 void (*unload
)(struct mlx5_eswitch
*esw
,
194 struct mlx5_eswitch_rep
*rep
);
197 struct net_device
*netdev
;
199 struct mlx5_flow_handle
*vport_rx_rule
;
200 struct list_head vport_sqs_list
;
206 struct mlx5_esw_offload
{
207 struct mlx5_flow_table
*ft_offloads
;
208 struct mlx5_flow_group
*vport_rx_group
;
209 struct mlx5_eswitch_rep
*vport_reps
;
210 DECLARE_HASHTABLE(encap_tbl
, 8);
215 struct mlx5_eswitch
{
216 struct mlx5_core_dev
*dev
;
217 struct mlx5_l2_table l2_table
;
218 struct mlx5_eswitch_fdb fdb_table
;
219 struct hlist_head mc_table
[MLX5_L2_ADDR_HASH_SIZE
];
220 struct workqueue_struct
*work_queue
;
221 struct mlx5_vport
*vports
;
224 /* Synchronize between vport change events
225 * and async SRIOV admin state changes
227 struct mutex state_lock
;
228 struct esw_mc_addr
*mc_promisc
;
235 struct mlx5_esw_offload offloads
;
239 void esw_offloads_cleanup(struct mlx5_eswitch
*esw
, int nvports
);
240 int esw_offloads_init(struct mlx5_eswitch
*esw
, int nvports
);
243 int mlx5_eswitch_init(struct mlx5_core_dev
*dev
);
244 void mlx5_eswitch_cleanup(struct mlx5_eswitch
*esw
);
245 void mlx5_eswitch_attach(struct mlx5_eswitch
*esw
);
246 void mlx5_eswitch_detach(struct mlx5_eswitch
*esw
);
247 void mlx5_eswitch_vport_event(struct mlx5_eswitch
*esw
, struct mlx5_eqe
*eqe
);
248 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch
*esw
, int nvfs
, int mode
);
249 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch
*esw
);
250 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch
*esw
,
251 int vport
, u8 mac
[ETH_ALEN
]);
252 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch
*esw
,
253 int vport
, int link_state
);
254 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
255 int vport
, u16 vlan
, u8 qos
);
256 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch
*esw
,
257 int vport
, bool spoofchk
);
258 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch
*esw
,
259 int vport_num
, bool setting
);
260 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch
*esw
, int vport
,
261 u32 max_rate
, u32 min_rate
);
262 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch
*esw
,
263 int vport
, struct ifla_vf_info
*ivi
);
264 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch
*esw
,
266 struct ifla_vf_stats
*vf_stats
);
268 struct mlx5_flow_spec
;
269 struct mlx5_esw_flow_attr
;
271 struct mlx5_flow_handle
*
272 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch
*esw
,
273 struct mlx5_flow_spec
*spec
,
274 struct mlx5_esw_flow_attr
*attr
);
276 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch
*esw
,
277 struct mlx5_flow_handle
*rule
,
278 struct mlx5_esw_flow_attr
*attr
);
280 struct mlx5_flow_handle
*
281 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, int vport
, u32 tirn
);
284 SET_VLAN_STRIP
= BIT(0),
285 SET_VLAN_INSERT
= BIT(1)
288 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
289 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
291 struct mlx5_encap_entry
{
292 struct hlist_node encap_hlist
;
293 struct list_head flows
;
296 struct ip_tunnel_info tun_info
;
297 unsigned char h_dest
[ETH_ALEN
]; /* destination eth addr */
299 struct net_device
*out_dev
;
303 struct mlx5_esw_flow_attr
{
304 struct mlx5_eswitch_rep
*in_rep
;
305 struct mlx5_eswitch_rep
*out_rep
;
310 struct mlx5_encap_entry
*encap
;
313 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch
*esw
,
314 struct mlx5_eswitch_rep
*rep
,
315 u16
*sqns_array
, int sqns_num
);
316 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch
*esw
,
317 struct mlx5_eswitch_rep
*rep
);
319 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
);
320 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
);
321 int mlx5_devlink_eswitch_inline_mode_set(struct devlink
*devlink
, u8 mode
);
322 int mlx5_devlink_eswitch_inline_mode_get(struct devlink
*devlink
, u8
*mode
);
323 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch
*esw
, int nvfs
, u8
*mode
);
324 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch
*esw
,
326 struct mlx5_eswitch_rep
*rep
);
327 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch
*esw
,
329 struct net_device
*mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch
*esw
);
331 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch
*esw
,
332 struct mlx5_esw_flow_attr
*attr
);
333 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch
*esw
,
334 struct mlx5_esw_flow_attr
*attr
);
335 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
336 int vport
, u16 vlan
, u8 qos
, u8 set_flags
);
338 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
340 #define esw_info(dev, format, ...) \
341 pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
343 #define esw_warn(dev, format, ...) \
344 pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
346 #define esw_debug(dev, format, ...) \
347 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
348 #endif /* __MLX5_ESWITCH_H__ */