2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <net/devlink.h>
39 #include <linux/mlx5/device.h>
41 #define MLX5_MAX_UC_PER_VPORT(dev) \
42 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
44 #define MLX5_MAX_MC_PER_VPORT(dev) \
45 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
47 #define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE))
48 #define MLX5_L2_ADDR_HASH(addr) (addr[5])
50 #define FDB_UPLINK_VPORT 0xffff
52 #define MLX5_MIN_BW_SHARE 1
54 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
55 min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
57 /* L2 -mac address based- hash helpers */
59 struct hlist_node hlist
;
63 #define for_each_l2hash_node(hn, tmp, hash, i) \
64 for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \
65 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
67 #define l2addr_hash_find(hash, mac, type) ({ \
68 int ix = MLX5_L2_ADDR_HASH(mac); \
72 hlist_for_each_entry(ptr, &hash[ix], node.hlist) \
73 if (ether_addr_equal(ptr->node.addr, mac)) {\
82 #define l2addr_hash_add(hash, mac, type, gfp) ({ \
83 int ix = MLX5_L2_ADDR_HASH(mac); \
86 ptr = kzalloc(sizeof(type), gfp); \
88 ether_addr_copy(ptr->node.addr, mac); \
89 hlist_add_head(&ptr->node.hlist, &hash[ix]);\
94 #define l2addr_hash_del(ptr) ({ \
95 hlist_del(&ptr->node.hlist); \
99 struct vport_ingress
{
100 struct mlx5_flow_table
*acl
;
101 struct mlx5_flow_group
*allow_untagged_spoofchk_grp
;
102 struct mlx5_flow_group
*allow_spoofchk_only_grp
;
103 struct mlx5_flow_group
*allow_untagged_only_grp
;
104 struct mlx5_flow_group
*drop_grp
;
105 struct mlx5_flow_handle
*allow_rule
;
106 struct mlx5_flow_handle
*drop_rule
;
109 struct vport_egress
{
110 struct mlx5_flow_table
*acl
;
111 struct mlx5_flow_group
*allowed_vlans_grp
;
112 struct mlx5_flow_group
*drop_grp
;
113 struct mlx5_flow_handle
*allowed_vlan
;
114 struct mlx5_flow_handle
*drop_rule
;
117 struct mlx5_vport_info
{
130 struct mlx5_core_dev
*dev
;
132 struct hlist_head uc_list
[MLX5_L2_ADDR_HASH_SIZE
];
133 struct hlist_head mc_list
[MLX5_L2_ADDR_HASH_SIZE
];
134 struct mlx5_flow_handle
*promisc_rule
;
135 struct mlx5_flow_handle
*allmulti_rule
;
136 struct work_struct vport_change_handler
;
138 struct vport_ingress ingress
;
139 struct vport_egress egress
;
141 struct mlx5_vport_info info
;
153 struct mlx5_l2_table
{
154 struct hlist_head l2_hash
[MLX5_L2_ADDR_HASH_SIZE
];
156 unsigned long *bitmap
;
159 struct mlx5_eswitch_fdb
{
163 struct mlx5_flow_group
*addr_grp
;
164 struct mlx5_flow_group
*allmulti_grp
;
165 struct mlx5_flow_group
*promisc_grp
;
168 struct offloads_fdb
{
169 struct mlx5_flow_table
*fdb
;
170 struct mlx5_flow_group
*send_to_vport_grp
;
171 struct mlx5_flow_group
*miss_grp
;
172 struct mlx5_flow_handle
*miss_rule
;
173 int vlan_push_pop_refcount
;
185 struct mlx5_flow_handle
*send_to_vport_rule
;
186 struct list_head list
;
189 struct mlx5_eswitch_rep
{
190 int (*load
)(struct mlx5_eswitch
*esw
,
191 struct mlx5_eswitch_rep
*rep
);
192 void (*unload
)(struct mlx5_eswitch
*esw
,
193 struct mlx5_eswitch_rep
*rep
);
196 struct net_device
*netdev
;
198 struct mlx5_flow_handle
*vport_rx_rule
;
199 struct list_head vport_sqs_list
;
205 struct mlx5_esw_offload
{
206 struct mlx5_flow_table
*ft_offloads
;
207 struct mlx5_flow_group
*vport_rx_group
;
208 struct mlx5_eswitch_rep
*vport_reps
;
209 DECLARE_HASHTABLE(encap_tbl
, 8);
210 DECLARE_HASHTABLE(mod_hdr_tbl
, 8);
216 /* E-Switch MC FDB table hash node */
217 struct esw_mc_addr
{ /* SRIOV only */
218 struct l2addr_node node
;
219 struct mlx5_flow_handle
*uplink_rule
; /* Forward to uplink rule */
223 struct mlx5_eswitch
{
224 struct mlx5_core_dev
*dev
;
225 struct mlx5_l2_table l2_table
;
226 struct mlx5_eswitch_fdb fdb_table
;
227 struct hlist_head mc_table
[MLX5_L2_ADDR_HASH_SIZE
];
228 struct workqueue_struct
*work_queue
;
229 struct mlx5_vport
*vports
;
232 /* Synchronize between vport change events
233 * and async SRIOV admin state changes
235 struct mutex state_lock
;
236 struct esw_mc_addr mc_promisc
;
243 struct mlx5_esw_offload offloads
;
247 void esw_offloads_cleanup(struct mlx5_eswitch
*esw
, int nvports
);
248 int esw_offloads_init(struct mlx5_eswitch
*esw
, int nvports
);
251 int mlx5_eswitch_init(struct mlx5_core_dev
*dev
);
252 void mlx5_eswitch_cleanup(struct mlx5_eswitch
*esw
);
253 void mlx5_eswitch_attach(struct mlx5_eswitch
*esw
);
254 void mlx5_eswitch_detach(struct mlx5_eswitch
*esw
);
255 void mlx5_eswitch_vport_event(struct mlx5_eswitch
*esw
, struct mlx5_eqe
*eqe
);
256 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch
*esw
, int nvfs
, int mode
);
257 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch
*esw
);
258 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch
*esw
,
259 int vport
, u8 mac
[ETH_ALEN
]);
260 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch
*esw
,
261 int vport
, int link_state
);
262 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
263 int vport
, u16 vlan
, u8 qos
);
264 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch
*esw
,
265 int vport
, bool spoofchk
);
266 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch
*esw
,
267 int vport_num
, bool setting
);
268 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch
*esw
, int vport
,
269 u32 max_rate
, u32 min_rate
);
270 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch
*esw
,
271 int vport
, struct ifla_vf_info
*ivi
);
272 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch
*esw
,
274 struct ifla_vf_stats
*vf_stats
);
276 struct mlx5_flow_spec
;
277 struct mlx5_esw_flow_attr
;
279 struct mlx5_flow_handle
*
280 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch
*esw
,
281 struct mlx5_flow_spec
*spec
,
282 struct mlx5_esw_flow_attr
*attr
);
284 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch
*esw
,
285 struct mlx5_flow_handle
*rule
,
286 struct mlx5_esw_flow_attr
*attr
);
288 struct mlx5_flow_handle
*
289 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, int vport
, u32 tirn
);
292 SET_VLAN_STRIP
= BIT(0),
293 SET_VLAN_INSERT
= BIT(1)
296 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x4000
297 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x8000
299 struct mlx5_esw_flow_attr
{
300 struct mlx5_eswitch_rep
*in_rep
;
301 struct mlx5_eswitch_rep
*out_rep
;
308 struct mlx5e_tc_flow_parse_attr
*parse_attr
;
311 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch
*esw
,
312 struct mlx5_eswitch_rep
*rep
,
313 u16
*sqns_array
, int sqns_num
);
314 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch
*esw
,
315 struct mlx5_eswitch_rep
*rep
);
317 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
);
318 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
);
319 int mlx5_devlink_eswitch_inline_mode_set(struct devlink
*devlink
, u8 mode
);
320 int mlx5_devlink_eswitch_inline_mode_get(struct devlink
*devlink
, u8
*mode
);
321 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch
*esw
, int nvfs
, u8
*mode
);
322 int mlx5_devlink_eswitch_encap_mode_set(struct devlink
*devlink
, u8 encap
);
323 int mlx5_devlink_eswitch_encap_mode_get(struct devlink
*devlink
, u8
*encap
);
324 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch
*esw
,
326 struct mlx5_eswitch_rep
*rep
);
327 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch
*esw
,
329 struct net_device
*mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch
*esw
);
331 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch
*esw
,
332 struct mlx5_esw_flow_attr
*attr
);
333 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch
*esw
,
334 struct mlx5_esw_flow_attr
*attr
);
335 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
336 int vport
, u16 vlan
, u8 qos
, u8 set_flags
);
338 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
340 #define esw_info(dev, format, ...) \
341 pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
343 #define esw_warn(dev, format, ...) \
344 pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
346 #define esw_debug(dev, format, ...) \
347 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
348 #endif /* __MLX5_ESWITCH_H__ */