2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <linux/atomic.h>
39 #include <linux/xarray.h>
40 #include <net/devlink.h>
41 #include <linux/mlx5/device.h>
42 #include <linux/mlx5/eswitch.h>
43 #include <linux/mlx5/vport.h>
44 #include <linux/mlx5/fs.h>
46 #include "lib/fs_chains.h"
49 #include "en/tc/sample.h"
51 enum mlx5_mapped_obj_type
{
52 MLX5_MAPPED_OBJ_CHAIN
,
53 MLX5_MAPPED_OBJ_SAMPLE
,
56 struct mlx5_mapped_obj
{
57 enum mlx5_mapped_obj_type type
;
69 #ifdef CONFIG_MLX5_ESWITCH
71 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
73 #define MLX5_MAX_UC_PER_VPORT(dev) \
74 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
76 #define MLX5_MAX_MC_PER_VPORT(dev) \
77 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
79 #define mlx5_esw_has_fwd_fdb(dev) \
80 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
82 #define esw_chains(esw) \
83 ((esw)->fdb_table.offloads.esw_chains_priv)
88 MAPPING_TYPE_TUNNEL_ENC_OPTS
,
93 struct vport_ingress
{
94 struct mlx5_flow_table
*acl
;
95 struct mlx5_flow_handle
*allow_rule
;
97 struct mlx5_flow_group
*allow_spoofchk_only_grp
;
98 struct mlx5_flow_group
*allow_untagged_spoofchk_grp
;
99 struct mlx5_flow_group
*allow_untagged_only_grp
;
100 struct mlx5_flow_group
*drop_grp
;
101 struct mlx5_flow_handle
*drop_rule
;
102 struct mlx5_fc
*drop_counter
;
105 /* Optional group to add an FTE to do internal priority
106 * tagging on ingress packets.
108 struct mlx5_flow_group
*metadata_prio_tag_grp
;
109 /* Group to add default match-all FTE entry to tag ingress
110 * packet with metadata.
112 struct mlx5_flow_group
*metadata_allmatch_grp
;
113 struct mlx5_modify_hdr
*modify_metadata
;
114 struct mlx5_flow_handle
*modify_metadata_rule
;
118 struct vport_egress
{
119 struct mlx5_flow_table
*acl
;
120 struct mlx5_flow_handle
*allowed_vlan
;
121 struct mlx5_flow_group
*vlan_grp
;
124 struct mlx5_flow_group
*drop_grp
;
125 struct mlx5_flow_handle
*drop_rule
;
126 struct mlx5_fc
*drop_counter
;
129 struct mlx5_flow_group
*fwd_grp
;
130 struct mlx5_flow_handle
*fwd_rule
;
131 struct mlx5_flow_handle
*bounce_rule
;
132 struct mlx5_flow_group
*bounce_grp
;
137 struct mlx5_vport_drop_stats
{
142 struct mlx5_vport_info
{
152 /* Vport context events */
153 enum mlx5_eswitch_vport_event
{
154 MLX5_VPORT_UC_ADDR_CHANGE
= BIT(0),
155 MLX5_VPORT_MC_ADDR_CHANGE
= BIT(1),
156 MLX5_VPORT_PROMISC_CHANGE
= BIT(3),
160 struct mlx5_core_dev
*dev
;
161 struct hlist_head uc_list
[MLX5_L2_ADDR_HASH_SIZE
];
162 struct hlist_head mc_list
[MLX5_L2_ADDR_HASH_SIZE
];
163 struct mlx5_flow_handle
*promisc_rule
;
164 struct mlx5_flow_handle
*allmulti_rule
;
165 struct work_struct vport_change_handler
;
167 struct vport_ingress ingress
;
168 struct vport_egress egress
;
169 u32 default_metadata
;
172 struct mlx5_vport_info info
;
180 struct mlx5_esw_rate_group
*group
;
185 enum mlx5_eswitch_vport_event enabled_events
;
187 struct devlink_port
*dl_port
;
190 struct mlx5_esw_indir_table
;
192 struct mlx5_eswitch_fdb
{
195 struct mlx5_flow_table
*fdb
;
196 struct mlx5_flow_group
*addr_grp
;
197 struct mlx5_flow_group
*allmulti_grp
;
198 struct mlx5_flow_group
*promisc_grp
;
199 struct mlx5_flow_table
*vepa_fdb
;
200 struct mlx5_flow_handle
*vepa_uplink_rule
;
201 struct mlx5_flow_handle
*vepa_star_rule
;
204 struct offloads_fdb
{
205 struct mlx5_flow_namespace
*ns
;
206 struct mlx5_flow_table
*tc_miss_table
;
207 struct mlx5_flow_table
*slow_fdb
;
208 struct mlx5_flow_group
*send_to_vport_grp
;
209 struct mlx5_flow_group
*send_to_vport_meta_grp
;
210 struct mlx5_flow_group
*peer_miss_grp
;
211 struct mlx5_flow_handle
**peer_miss_rules
;
212 struct mlx5_flow_group
*miss_grp
;
213 struct mlx5_flow_handle
**send_to_vport_meta_rules
;
214 struct mlx5_flow_handle
*miss_rule_uni
;
215 struct mlx5_flow_handle
*miss_rule_multi
;
216 int vlan_push_pop_refcount
;
218 struct mlx5_fs_chains
*esw_chains_priv
;
220 DECLARE_HASHTABLE(table
, 8);
221 /* Protects vports.table */
225 struct mlx5_esw_indir_table
*indir
;
232 struct mlx5_esw_offload
{
233 struct mlx5_flow_table
*ft_offloads_restore
;
234 struct mlx5_flow_group
*restore_group
;
235 struct mlx5_modify_hdr
*restore_copy_hdr_id
;
236 struct mapping_ctx
*reg_c0_obj_pool
;
238 struct mlx5_flow_table
*ft_offloads
;
239 struct mlx5_flow_group
*vport_rx_group
;
240 struct xarray vport_reps
;
241 struct list_head peer_flows
;
242 struct mutex peer_mutex
;
243 struct mutex encap_tbl_lock
; /* protects encap_tbl */
244 DECLARE_HASHTABLE(encap_tbl
, 8);
245 struct mutex decap_tbl_lock
; /* protects decap_tbl */
246 DECLARE_HASHTABLE(decap_tbl
, 8);
247 struct mod_hdr_tbl mod_hdr
;
248 DECLARE_HASHTABLE(termtbl_tbl
, 8);
249 struct mutex termtbl_mutex
; /* protects termtbl hash */
250 struct xarray vhca_map
;
251 const struct mlx5_eswitch_rep_ops
*rep_ops
[NUM_REP_TYPES
];
253 atomic64_t num_flows
;
254 enum devlink_eswitch_encap_mode encap
;
255 struct ida vport_metadata_ida
;
256 unsigned int host_number
; /* ECPF supports one external host */
259 /* E-Switch MC FDB table hash node */
260 struct esw_mc_addr
{ /* SRIOV only */
261 struct l2addr_node node
;
262 struct mlx5_flow_handle
*uplink_rule
; /* Forward to uplink rule */
266 struct mlx5_host_work
{
267 struct work_struct work
;
268 struct mlx5_eswitch
*esw
;
271 struct mlx5_esw_functions
{
277 MLX5_ESWITCH_VPORT_MATCH_METADATA
= BIT(0),
278 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED
= BIT(1),
281 struct mlx5_esw_bridge_offloads
;
283 struct mlx5_eswitch
{
284 struct mlx5_core_dev
*dev
;
286 struct mlx5_eswitch_fdb fdb_table
;
287 /* legacy data structures */
288 struct hlist_head mc_table
[MLX5_L2_ADDR_HASH_SIZE
];
289 struct esw_mc_addr mc_promisc
;
291 struct workqueue_struct
*work_queue
;
292 struct xarray vports
;
296 /* Synchronize between vport change events
297 * and async SRIOV admin state changes
299 struct mutex state_lock
;
301 /* Protects eswitch mode change that occurs via one or more
302 * user commands, i.e. sriov state change, devlink commands.
304 struct rw_semaphore mode_lock
;
305 atomic64_t user_count
;
310 struct mlx5_esw_rate_group
*group0
;
311 struct list_head groups
; /* Protected by esw->state_lock */
314 struct mlx5_esw_bridge_offloads
*br_offloads
;
315 struct mlx5_esw_offload offloads
;
318 u16 first_host_vport
;
319 struct mlx5_esw_functions esw_funcs
;
323 struct blocking_notifier_head n_head
;
324 struct lock_class_key mode_lock_key
;
327 void esw_offloads_disable(struct mlx5_eswitch
*esw
);
328 int esw_offloads_enable(struct mlx5_eswitch
*esw
);
329 void esw_offloads_cleanup_reps(struct mlx5_eswitch
*esw
);
330 int esw_offloads_init_reps(struct mlx5_eswitch
*esw
);
332 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch
*esw
);
333 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch
*esw
, bool enable
);
334 u32
mlx5_esw_match_metadata_alloc(struct mlx5_eswitch
*esw
);
335 void mlx5_esw_match_metadata_free(struct mlx5_eswitch
*esw
, u32 metadata
);
337 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch
*esw
, u16 vport_num
, u32 rate_mbps
);
340 int mlx5_eswitch_init(struct mlx5_core_dev
*dev
);
341 void mlx5_eswitch_cleanup(struct mlx5_eswitch
*esw
);
343 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
344 int mlx5_eswitch_enable_locked(struct mlx5_eswitch
*esw
, int mode
, int num_vfs
);
345 int mlx5_eswitch_enable(struct mlx5_eswitch
*esw
, int num_vfs
);
346 void mlx5_eswitch_disable_locked(struct mlx5_eswitch
*esw
, bool clear_vf
);
347 void mlx5_eswitch_disable(struct mlx5_eswitch
*esw
, bool clear_vf
);
348 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch
*esw
,
349 u16 vport
, const u8
*mac
);
350 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch
*esw
,
351 u16 vport
, int link_state
);
352 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
353 u16 vport
, u16 vlan
, u8 qos
);
354 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch
*esw
,
355 u16 vport
, bool spoofchk
);
356 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch
*esw
,
357 u16 vport_num
, bool setting
);
358 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch
*esw
, u16 vport
,
359 u32 max_rate
, u32 min_rate
);
360 int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch
*esw
,
361 struct mlx5_vport
*vport
,
362 struct mlx5_esw_rate_group
*group
,
363 struct netlink_ext_ack
*extack
);
364 int mlx5_eswitch_set_vepa(struct mlx5_eswitch
*esw
, u8 setting
);
365 int mlx5_eswitch_get_vepa(struct mlx5_eswitch
*esw
, u8
*setting
);
366 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch
*esw
,
367 u16 vport
, struct ifla_vf_info
*ivi
);
368 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch
*esw
,
370 struct ifla_vf_stats
*vf_stats
);
371 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle
*rule
);
373 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev
*dev
, u16 vport
,
374 bool other_vport
, void *in
);
376 struct mlx5_flow_spec
;
377 struct mlx5_esw_flow_attr
;
378 struct mlx5_termtbl_handle
;
381 mlx5_eswitch_termtbl_required(struct mlx5_eswitch
*esw
,
382 struct mlx5_flow_attr
*attr
,
383 struct mlx5_flow_act
*flow_act
,
384 struct mlx5_flow_spec
*spec
);
386 struct mlx5_flow_handle
*
387 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch
*esw
,
388 struct mlx5_flow_table
*ft
,
389 struct mlx5_flow_spec
*spec
,
390 struct mlx5_esw_flow_attr
*attr
,
391 struct mlx5_flow_act
*flow_act
,
392 struct mlx5_flow_destination
*dest
,
396 mlx5_eswitch_termtbl_put(struct mlx5_eswitch
*esw
,
397 struct mlx5_termtbl_handle
*tt
);
400 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch
*esw
, struct mlx5_flow_spec
*spec
);
402 struct mlx5_flow_handle
*
403 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch
*esw
,
404 struct mlx5_flow_spec
*spec
,
405 struct mlx5_flow_attr
*attr
);
406 struct mlx5_flow_handle
*
407 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch
*esw
,
408 struct mlx5_flow_spec
*spec
,
409 struct mlx5_flow_attr
*attr
);
411 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch
*esw
,
412 struct mlx5_flow_handle
*rule
,
413 struct mlx5_flow_attr
*attr
);
415 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch
*esw
,
416 struct mlx5_flow_handle
*rule
,
417 struct mlx5_flow_attr
*attr
);
419 struct mlx5_flow_handle
*
420 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, u16 vport
,
421 struct mlx5_flow_destination
*dest
);
424 SET_VLAN_STRIP
= BIT(0),
425 SET_VLAN_INSERT
= BIT(1)
428 enum mlx5_flow_match_level
{
429 MLX5_MATCH_NONE
= MLX5_INLINE_MODE_NONE
,
430 MLX5_MATCH_L2
= MLX5_INLINE_MODE_L2
,
431 MLX5_MATCH_L3
= MLX5_INLINE_MODE_IP
,
432 MLX5_MATCH_L4
= MLX5_INLINE_MODE_TCP_UDP
,
435 /* current maximum for flow based vport multicasting */
436 #define MLX5_MAX_FLOW_FWD_VPORTS 2
439 MLX5_ESW_DEST_ENCAP
= BIT(0),
440 MLX5_ESW_DEST_ENCAP_VALID
= BIT(1),
441 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE
= BIT(2),
445 MLX5_ESW_ATTR_FLAG_VLAN_HANDLED
= BIT(0),
446 MLX5_ESW_ATTR_FLAG_SLOW_PATH
= BIT(1),
447 MLX5_ESW_ATTR_FLAG_NO_IN_PORT
= BIT(2),
448 MLX5_ESW_ATTR_FLAG_SRC_REWRITE
= BIT(3),
449 MLX5_ESW_ATTR_FLAG_SAMPLE
= BIT(4),
452 struct mlx5_esw_flow_attr
{
453 struct mlx5_eswitch_rep
*in_rep
;
454 struct mlx5_core_dev
*in_mdev
;
455 struct mlx5_core_dev
*counter_dev
;
460 __be16 vlan_proto
[MLX5_FS_VLAN_DEPTH
];
461 u16 vlan_vid
[MLX5_FS_VLAN_DEPTH
];
462 u8 vlan_prio
[MLX5_FS_VLAN_DEPTH
];
466 struct mlx5_eswitch_rep
*rep
;
467 struct mlx5_pkt_reformat
*pkt_reformat
;
468 struct mlx5_core_dev
*mdev
;
469 struct mlx5_termtbl_handle
*termtbl
;
470 int src_port_rewrite_act_id
;
471 } dests
[MLX5_MAX_FLOW_FWD_VPORTS
];
472 struct mlx5_rx_tun_attr
*rx_tun_attr
;
473 struct mlx5_pkt_reformat
*decap_pkt_reformat
;
476 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
,
477 struct netlink_ext_ack
*extack
);
478 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
);
479 int mlx5_devlink_eswitch_inline_mode_set(struct devlink
*devlink
, u8 mode
,
480 struct netlink_ext_ack
*extack
);
481 int mlx5_devlink_eswitch_inline_mode_get(struct devlink
*devlink
, u8
*mode
);
482 int mlx5_devlink_eswitch_encap_mode_set(struct devlink
*devlink
,
483 enum devlink_eswitch_encap_mode encap
,
484 struct netlink_ext_ack
*extack
);
485 int mlx5_devlink_eswitch_encap_mode_get(struct devlink
*devlink
,
486 enum devlink_eswitch_encap_mode
*encap
);
487 int mlx5_devlink_port_function_hw_addr_get(struct devlink_port
*port
,
488 u8
*hw_addr
, int *hw_addr_len
,
489 struct netlink_ext_ack
*extack
);
490 int mlx5_devlink_port_function_hw_addr_set(struct devlink_port
*port
,
491 const u8
*hw_addr
, int hw_addr_len
,
492 struct netlink_ext_ack
*extack
);
494 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch
*esw
, u8 rep_type
);
496 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch
*esw
,
497 struct mlx5_flow_attr
*attr
);
498 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch
*esw
,
499 struct mlx5_flow_attr
*attr
);
500 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
501 u16 vport
, u16 vlan
, u8 qos
, u8 set_flags
);
503 static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch
*esw
)
505 return esw
->qos
.enabled
;
508 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev
*dev
,
511 bool ret
= MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, pop_vlan
) &&
512 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, push_vlan
);
517 return ret
&& MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, pop_vlan_2
) &&
518 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, push_vlan_2
);
521 bool mlx5_esw_lag_prereq(struct mlx5_core_dev
*dev0
,
522 struct mlx5_core_dev
*dev1
);
523 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev
*dev0
,
524 struct mlx5_core_dev
*dev1
);
526 const u32
*mlx5_esw_query_functions(struct mlx5_core_dev
*dev
);
528 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
530 #define esw_info(__dev, format, ...) \
531 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
533 #define esw_warn(__dev, format, ...) \
534 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
536 #define esw_debug(dev, format, ...) \
537 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
539 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch
*esw
)
541 return esw
&& MLX5_ESWITCH_MANAGER(esw
->dev
);
544 /* The returned number is valid only when the dev is eswitch manager. */
545 static inline u16
mlx5_eswitch_manager_vport(struct mlx5_core_dev
*dev
)
547 return mlx5_core_is_ecpf_esw_manager(dev
) ?
548 MLX5_VPORT_ECPF
: MLX5_VPORT_PF
;
552 mlx5_esw_is_manager_vport(const struct mlx5_eswitch
*esw
, u16 vport_num
)
554 return esw
->manager_vport
== vport_num
;
557 static inline u16
mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev
*dev
)
559 return mlx5_core_is_ecpf_esw_manager(dev
) ?
560 MLX5_VPORT_PF
: MLX5_VPORT_FIRST_VF
;
563 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev
*dev
)
565 return mlx5_core_is_ecpf_esw_manager(dev
);
568 static inline unsigned int
569 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev
*dev
,
572 return (MLX5_CAP_GEN(dev
, vhca_id
) << 16) | vport_num
;
576 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index
)
578 return dl_port_index
& 0xffff;
581 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
582 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch
*esw
);
584 /* Each mark identifies eswitch vport type.
585 * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
587 * MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
588 * MLX5_ESW_VPT_SF identifies SF vport.
590 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0
591 #define MLX5_ESW_VPT_VF XA_MARK_1
592 #define MLX5_ESW_VPT_SF XA_MARK_2
594 /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
595 * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
598 #define mlx5_esw_for_each_vport(esw, index, vport) \
599 xa_for_each(&((esw)->vports), index, vport)
601 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \
602 for (index = 0, entry = xa_find(xa, &index, last, filter); \
603 entry; entry = xa_find_after(xa, &index, last, filter))
605 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \
606 mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
608 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \
609 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
611 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
612 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
614 struct mlx5_eswitch
*mlx5_devlink_eswitch_get(struct devlink
*devlink
);
615 struct mlx5_vport
*__must_check
616 mlx5_eswitch_get_vport(struct mlx5_eswitch
*esw
, u16 vport_num
);
618 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch
*esw
, u16 vport_num
);
619 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch
*esw
, u16 vport_num
);
621 int mlx5_esw_funcs_changed_handler(struct notifier_block
*nb
, unsigned long type
, void *data
);
624 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch
*esw
,
625 enum mlx5_eswitch_vport_event enabled_events
);
626 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch
*esw
);
628 int mlx5_esw_vport_enable(struct mlx5_eswitch
*esw
, u16 vport_num
,
629 enum mlx5_eswitch_vport_event enabled_events
);
630 void mlx5_esw_vport_disable(struct mlx5_eswitch
*esw
, u16 vport_num
);
633 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch
*esw
,
634 struct mlx5_vport
*vport
);
636 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch
*esw
,
637 struct mlx5_vport
*vport
);
639 struct esw_vport_tbl_namespace
{
645 struct mlx5_vport_tbl_attr
{
649 const struct esw_vport_tbl_namespace
*vport_ns
;
652 struct mlx5_flow_table
*
653 mlx5_esw_vporttbl_get(struct mlx5_eswitch
*esw
, struct mlx5_vport_tbl_attr
*attr
);
655 mlx5_esw_vporttbl_put(struct mlx5_eswitch
*esw
, struct mlx5_vport_tbl_attr
*attr
);
657 struct mlx5_flow_handle
*
658 esw_add_restore_rule(struct mlx5_eswitch
*esw
, u32 tag
);
660 int esw_offloads_load_rep(struct mlx5_eswitch
*esw
, u16 vport_num
);
661 void esw_offloads_unload_rep(struct mlx5_eswitch
*esw
, u16 vport_num
);
663 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch
*esw
, u16 vport_num
);
664 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch
*esw
, u16 vport_num
);
666 int mlx5_eswitch_load_vport(struct mlx5_eswitch
*esw
, u16 vport_num
,
667 enum mlx5_eswitch_vport_event enabled_events
);
668 void mlx5_eswitch_unload_vport(struct mlx5_eswitch
*esw
, u16 vport_num
);
670 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch
*esw
, u16 num_vfs
,
671 enum mlx5_eswitch_vport_event enabled_events
);
672 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch
*esw
, u16 num_vfs
);
674 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch
*esw
, u16 vport_num
);
675 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch
*esw
, u16 vport_num
);
676 struct devlink_port
*mlx5_esw_offloads_devlink_port(struct mlx5_eswitch
*esw
, u16 vport_num
);
678 int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch
*esw
, struct devlink_port
*dl_port
,
679 u16 vport_num
, u32 controller
, u32 sfnum
);
680 void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch
*esw
, u16 vport_num
);
682 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch
*esw
, struct devlink_port
*dl_port
,
683 u16 vport_num
, u32 controller
, u32 sfnum
);
684 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch
*esw
, u16 vport_num
);
685 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev
*dev
, u16
*max_sfs
, u16
*sf_base_id
);
687 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch
*esw
, u16 vport_num
);
688 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch
*esw
, u16 vport_num
);
689 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch
*esw
, u16 vhca_id
, u16
*vport_num
);
692 * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
694 * @new_mode: New mode of eswitch.
696 struct mlx5_esw_event_info
{
700 int mlx5_esw_event_notifier_register(struct mlx5_eswitch
*esw
, struct notifier_block
*n
);
701 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch
*esw
, struct notifier_block
*n
);
703 bool mlx5_esw_hold(struct mlx5_core_dev
*dev
);
704 void mlx5_esw_release(struct mlx5_core_dev
*dev
);
705 void mlx5_esw_get(struct mlx5_core_dev
*dev
);
706 void mlx5_esw_put(struct mlx5_core_dev
*dev
);
707 int mlx5_esw_try_lock(struct mlx5_eswitch
*esw
);
708 void mlx5_esw_unlock(struct mlx5_eswitch
*esw
);
709 void mlx5_esw_lock(struct mlx5_eswitch
*esw
);
711 void esw_vport_change_handle_locked(struct mlx5_vport
*vport
);
713 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch
*esw
, u32 controller
);
715 int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch
*master_esw
,
716 struct mlx5_eswitch
*slave_esw
);
717 void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch
*master_esw
,
718 struct mlx5_eswitch
*slave_esw
);
719 int mlx5_eswitch_reload_reps(struct mlx5_eswitch
*esw
);
721 #else /* CONFIG_MLX5_ESWITCH */
722 /* eswitch API stubs */
723 static inline int mlx5_eswitch_init(struct mlx5_core_dev
*dev
) { return 0; }
724 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch
*esw
) {}
725 static inline int mlx5_eswitch_enable(struct mlx5_eswitch
*esw
, int num_vfs
) { return 0; }
726 static inline void mlx5_eswitch_disable(struct mlx5_eswitch
*esw
, bool clear_vf
) {}
727 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev
*dev0
, struct mlx5_core_dev
*dev1
) { return true; }
728 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev
*dev
) { return false; }
730 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch
*esw
, u16 vport
, int link_state
) { return 0; }
731 static inline const u32
*mlx5_esw_query_functions(struct mlx5_core_dev
*dev
)
733 return ERR_PTR(-EOPNOTSUPP
);
736 static inline void mlx5_esw_unlock(struct mlx5_eswitch
*esw
) { return; }
737 static inline void mlx5_esw_lock(struct mlx5_eswitch
*esw
) { return; }
739 static inline struct mlx5_flow_handle
*
740 esw_add_restore_rule(struct mlx5_eswitch
*esw
, u32 tag
)
742 return ERR_PTR(-EOPNOTSUPP
);
745 static inline unsigned int
746 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev
*dev
,
753 mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch
*master_esw
,
754 struct mlx5_eswitch
*slave_esw
)
760 mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch
*master_esw
,
761 struct mlx5_eswitch
*slave_esw
) {}
764 mlx5_eswitch_reload_reps(struct mlx5_eswitch
*esw
)
768 #endif /* CONFIG_MLX5_ESWITCH */
770 #endif /* __MLX5_ESWITCH_H__ */