2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <linux/atomic.h>
39 #include <linux/xarray.h>
40 #include <net/devlink.h>
41 #include <linux/mlx5/device.h>
42 #include <linux/mlx5/eswitch.h>
43 #include <linux/mlx5/vport.h>
44 #include <linux/mlx5/fs.h>
46 #include "lib/fs_chains.h"
49 #include "esw/sample.h"
51 enum mlx5_mapped_obj_type
{
52 MLX5_MAPPED_OBJ_CHAIN
,
53 MLX5_MAPPED_OBJ_SAMPLE
,
56 struct mlx5_mapped_obj
{
57 enum mlx5_mapped_obj_type type
;
68 #ifdef CONFIG_MLX5_ESWITCH
70 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
72 #define MLX5_MAX_UC_PER_VPORT(dev) \
73 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
75 #define MLX5_MAX_MC_PER_VPORT(dev) \
76 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
78 #define MLX5_MIN_BW_SHARE 1
80 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
81 min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
83 #define mlx5_esw_has_fwd_fdb(dev) \
84 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
86 #define esw_chains(esw) \
87 ((esw)->fdb_table.offloads.esw_chains_priv)
89 struct vport_ingress
{
90 struct mlx5_flow_table
*acl
;
91 struct mlx5_flow_handle
*allow_rule
;
93 struct mlx5_flow_group
*allow_spoofchk_only_grp
;
94 struct mlx5_flow_group
*allow_untagged_spoofchk_grp
;
95 struct mlx5_flow_group
*allow_untagged_only_grp
;
96 struct mlx5_flow_group
*drop_grp
;
97 struct mlx5_flow_handle
*drop_rule
;
98 struct mlx5_fc
*drop_counter
;
101 /* Optional group to add an FTE to do internal priority
102 * tagging on ingress packets.
104 struct mlx5_flow_group
*metadata_prio_tag_grp
;
105 /* Group to add default match-all FTE entry to tag ingress
106 * packet with metadata.
108 struct mlx5_flow_group
*metadata_allmatch_grp
;
109 struct mlx5_modify_hdr
*modify_metadata
;
110 struct mlx5_flow_handle
*modify_metadata_rule
;
114 struct vport_egress
{
115 struct mlx5_flow_table
*acl
;
116 struct mlx5_flow_handle
*allowed_vlan
;
117 struct mlx5_flow_group
*vlan_grp
;
120 struct mlx5_flow_group
*drop_grp
;
121 struct mlx5_flow_handle
*drop_rule
;
122 struct mlx5_fc
*drop_counter
;
125 struct mlx5_flow_group
*fwd_grp
;
126 struct mlx5_flow_handle
*fwd_rule
;
131 struct mlx5_vport_drop_stats
{
136 struct mlx5_vport_info
{
146 /* Vport context events */
147 enum mlx5_eswitch_vport_event
{
148 MLX5_VPORT_UC_ADDR_CHANGE
= BIT(0),
149 MLX5_VPORT_MC_ADDR_CHANGE
= BIT(1),
150 MLX5_VPORT_PROMISC_CHANGE
= BIT(3),
153 struct mlx5_esw_bridge
;
156 struct mlx5_core_dev
*dev
;
157 struct hlist_head uc_list
[MLX5_L2_ADDR_HASH_SIZE
];
158 struct hlist_head mc_list
[MLX5_L2_ADDR_HASH_SIZE
];
159 struct mlx5_flow_handle
*promisc_rule
;
160 struct mlx5_flow_handle
*allmulti_rule
;
161 struct work_struct vport_change_handler
;
163 struct vport_ingress ingress
;
164 struct vport_egress egress
;
165 u32 default_metadata
;
168 struct mlx5_vport_info info
;
180 enum mlx5_eswitch_vport_event enabled_events
;
182 struct devlink_port
*dl_port
;
183 struct mlx5_esw_bridge
*bridge
;
186 struct mlx5_esw_indir_table
;
188 struct mlx5_eswitch_fdb
{
191 struct mlx5_flow_table
*fdb
;
192 struct mlx5_flow_group
*addr_grp
;
193 struct mlx5_flow_group
*allmulti_grp
;
194 struct mlx5_flow_group
*promisc_grp
;
195 struct mlx5_flow_table
*vepa_fdb
;
196 struct mlx5_flow_handle
*vepa_uplink_rule
;
197 struct mlx5_flow_handle
*vepa_star_rule
;
200 struct offloads_fdb
{
201 struct mlx5_flow_namespace
*ns
;
202 struct mlx5_flow_table
*tc_miss_table
;
203 struct mlx5_flow_table
*slow_fdb
;
204 struct mlx5_flow_group
*send_to_vport_grp
;
205 struct mlx5_flow_group
*send_to_vport_meta_grp
;
206 struct mlx5_flow_group
*peer_miss_grp
;
207 struct mlx5_flow_handle
**peer_miss_rules
;
208 struct mlx5_flow_group
*miss_grp
;
209 struct mlx5_flow_handle
**send_to_vport_meta_rules
;
210 struct mlx5_flow_handle
*miss_rule_uni
;
211 struct mlx5_flow_handle
*miss_rule_multi
;
212 int vlan_push_pop_refcount
;
214 struct mlx5_fs_chains
*esw_chains_priv
;
216 DECLARE_HASHTABLE(table
, 8);
217 /* Protects vports.table */
221 struct mlx5_esw_indir_table
*indir
;
228 struct mlx5_esw_offload
{
229 struct mlx5_flow_table
*ft_offloads_restore
;
230 struct mlx5_flow_group
*restore_group
;
231 struct mlx5_modify_hdr
*restore_copy_hdr_id
;
232 struct mapping_ctx
*reg_c0_obj_pool
;
234 struct mlx5_flow_table
*ft_offloads
;
235 struct mlx5_flow_group
*vport_rx_group
;
236 struct xarray vport_reps
;
237 struct list_head peer_flows
;
238 struct mutex peer_mutex
;
239 struct mutex encap_tbl_lock
; /* protects encap_tbl */
240 DECLARE_HASHTABLE(encap_tbl
, 8);
241 struct mutex decap_tbl_lock
; /* protects decap_tbl */
242 DECLARE_HASHTABLE(decap_tbl
, 8);
243 struct mod_hdr_tbl mod_hdr
;
244 DECLARE_HASHTABLE(termtbl_tbl
, 8);
245 struct mutex termtbl_mutex
; /* protects termtbl hash */
246 struct xarray vhca_map
;
247 const struct mlx5_eswitch_rep_ops
*rep_ops
[NUM_REP_TYPES
];
249 atomic64_t num_flows
;
250 enum devlink_eswitch_encap_mode encap
;
251 struct ida vport_metadata_ida
;
252 unsigned int host_number
; /* ECPF supports one external host */
255 /* E-Switch MC FDB table hash node */
256 struct esw_mc_addr
{ /* SRIOV only */
257 struct l2addr_node node
;
258 struct mlx5_flow_handle
*uplink_rule
; /* Forward to uplink rule */
262 struct mlx5_host_work
{
263 struct work_struct work
;
264 struct mlx5_eswitch
*esw
;
267 struct mlx5_esw_functions
{
273 MLX5_ESWITCH_VPORT_MATCH_METADATA
= BIT(0),
274 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED
= BIT(1),
277 struct mlx5_esw_bridge_offloads
;
279 struct mlx5_eswitch
{
280 struct mlx5_core_dev
*dev
;
282 struct mlx5_eswitch_fdb fdb_table
;
283 /* legacy data structures */
284 struct hlist_head mc_table
[MLX5_L2_ADDR_HASH_SIZE
];
285 struct esw_mc_addr mc_promisc
;
287 struct workqueue_struct
*work_queue
;
288 struct xarray vports
;
292 /* Synchronize between vport change events
293 * and async SRIOV admin state changes
295 struct mutex state_lock
;
297 /* Protects eswitch mode change that occurs via one or more
298 * user commands, i.e. sriov state change, devlink commands.
300 struct rw_semaphore mode_lock
;
301 atomic64_t user_count
;
308 struct mlx5_esw_bridge_offloads
*br_offloads
;
309 struct mlx5_esw_offload offloads
;
312 u16 first_host_vport
;
313 struct mlx5_esw_functions esw_funcs
;
317 struct blocking_notifier_head n_head
;
320 void esw_offloads_disable(struct mlx5_eswitch
*esw
);
321 int esw_offloads_enable(struct mlx5_eswitch
*esw
);
322 void esw_offloads_cleanup_reps(struct mlx5_eswitch
*esw
);
323 int esw_offloads_init_reps(struct mlx5_eswitch
*esw
);
325 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch
*esw
);
326 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch
*esw
, bool enable
);
327 u32
mlx5_esw_match_metadata_alloc(struct mlx5_eswitch
*esw
);
328 void mlx5_esw_match_metadata_free(struct mlx5_eswitch
*esw
, u32 metadata
);
330 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch
*esw
, u16 vport_num
,
334 int mlx5_eswitch_init(struct mlx5_core_dev
*dev
);
335 void mlx5_eswitch_cleanup(struct mlx5_eswitch
*esw
);
337 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
338 int mlx5_eswitch_enable_locked(struct mlx5_eswitch
*esw
, int mode
, int num_vfs
);
339 int mlx5_eswitch_enable(struct mlx5_eswitch
*esw
, int num_vfs
);
340 void mlx5_eswitch_disable_locked(struct mlx5_eswitch
*esw
, bool clear_vf
);
341 void mlx5_eswitch_disable(struct mlx5_eswitch
*esw
, bool clear_vf
);
342 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch
*esw
,
343 u16 vport
, const u8
*mac
);
344 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch
*esw
,
345 u16 vport
, int link_state
);
346 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
347 u16 vport
, u16 vlan
, u8 qos
);
348 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch
*esw
,
349 u16 vport
, bool spoofchk
);
350 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch
*esw
,
351 u16 vport_num
, bool setting
);
352 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch
*esw
, u16 vport
,
353 u32 max_rate
, u32 min_rate
);
354 int mlx5_eswitch_set_vepa(struct mlx5_eswitch
*esw
, u8 setting
);
355 int mlx5_eswitch_get_vepa(struct mlx5_eswitch
*esw
, u8
*setting
);
356 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch
*esw
,
357 u16 vport
, struct ifla_vf_info
*ivi
);
358 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch
*esw
,
360 struct ifla_vf_stats
*vf_stats
);
361 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle
*rule
);
363 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev
*dev
, u16 vport
,
364 bool other_vport
, void *in
);
366 struct mlx5_flow_spec
;
367 struct mlx5_esw_flow_attr
;
368 struct mlx5_termtbl_handle
;
371 mlx5_eswitch_termtbl_required(struct mlx5_eswitch
*esw
,
372 struct mlx5_flow_attr
*attr
,
373 struct mlx5_flow_act
*flow_act
,
374 struct mlx5_flow_spec
*spec
);
376 struct mlx5_flow_handle
*
377 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch
*esw
,
378 struct mlx5_flow_table
*ft
,
379 struct mlx5_flow_spec
*spec
,
380 struct mlx5_esw_flow_attr
*attr
,
381 struct mlx5_flow_act
*flow_act
,
382 struct mlx5_flow_destination
*dest
,
386 mlx5_eswitch_termtbl_put(struct mlx5_eswitch
*esw
,
387 struct mlx5_termtbl_handle
*tt
);
390 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch
*esw
, struct mlx5_flow_spec
*spec
);
392 struct mlx5_flow_handle
*
393 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch
*esw
,
394 struct mlx5_flow_spec
*spec
,
395 struct mlx5_flow_attr
*attr
);
396 struct mlx5_flow_handle
*
397 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch
*esw
,
398 struct mlx5_flow_spec
*spec
,
399 struct mlx5_flow_attr
*attr
);
401 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch
*esw
,
402 struct mlx5_flow_handle
*rule
,
403 struct mlx5_flow_attr
*attr
);
405 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch
*esw
,
406 struct mlx5_flow_handle
*rule
,
407 struct mlx5_flow_attr
*attr
);
409 struct mlx5_flow_handle
*
410 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, u16 vport
,
411 struct mlx5_flow_destination
*dest
);
414 SET_VLAN_STRIP
= BIT(0),
415 SET_VLAN_INSERT
= BIT(1)
418 enum mlx5_flow_match_level
{
419 MLX5_MATCH_NONE
= MLX5_INLINE_MODE_NONE
,
420 MLX5_MATCH_L2
= MLX5_INLINE_MODE_L2
,
421 MLX5_MATCH_L3
= MLX5_INLINE_MODE_IP
,
422 MLX5_MATCH_L4
= MLX5_INLINE_MODE_TCP_UDP
,
425 /* current maximum for flow based vport multicasting */
426 #define MLX5_MAX_FLOW_FWD_VPORTS 2
429 MLX5_ESW_DEST_ENCAP
= BIT(0),
430 MLX5_ESW_DEST_ENCAP_VALID
= BIT(1),
431 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE
= BIT(2),
435 MLX5_ESW_ATTR_FLAG_VLAN_HANDLED
= BIT(0),
436 MLX5_ESW_ATTR_FLAG_SLOW_PATH
= BIT(1),
437 MLX5_ESW_ATTR_FLAG_NO_IN_PORT
= BIT(2),
438 MLX5_ESW_ATTR_FLAG_SRC_REWRITE
= BIT(3),
439 MLX5_ESW_ATTR_FLAG_SAMPLE
= BIT(4),
442 struct mlx5_esw_flow_attr
{
443 struct mlx5_eswitch_rep
*in_rep
;
444 struct mlx5_core_dev
*in_mdev
;
445 struct mlx5_core_dev
*counter_dev
;
450 __be16 vlan_proto
[MLX5_FS_VLAN_DEPTH
];
451 u16 vlan_vid
[MLX5_FS_VLAN_DEPTH
];
452 u8 vlan_prio
[MLX5_FS_VLAN_DEPTH
];
456 struct mlx5_eswitch_rep
*rep
;
457 struct mlx5_pkt_reformat
*pkt_reformat
;
458 struct mlx5_core_dev
*mdev
;
459 struct mlx5_termtbl_handle
*termtbl
;
460 int src_port_rewrite_act_id
;
461 } dests
[MLX5_MAX_FLOW_FWD_VPORTS
];
462 struct mlx5_rx_tun_attr
*rx_tun_attr
;
463 struct mlx5_pkt_reformat
*decap_pkt_reformat
;
464 struct mlx5_sample_attr
*sample
;
467 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
,
468 struct netlink_ext_ack
*extack
);
469 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
);
470 int mlx5_devlink_eswitch_inline_mode_set(struct devlink
*devlink
, u8 mode
,
471 struct netlink_ext_ack
*extack
);
472 int mlx5_devlink_eswitch_inline_mode_get(struct devlink
*devlink
, u8
*mode
);
473 int mlx5_devlink_eswitch_encap_mode_set(struct devlink
*devlink
,
474 enum devlink_eswitch_encap_mode encap
,
475 struct netlink_ext_ack
*extack
);
476 int mlx5_devlink_eswitch_encap_mode_get(struct devlink
*devlink
,
477 enum devlink_eswitch_encap_mode
*encap
);
478 int mlx5_devlink_port_function_hw_addr_get(struct devlink
*devlink
,
479 struct devlink_port
*port
,
480 u8
*hw_addr
, int *hw_addr_len
,
481 struct netlink_ext_ack
*extack
);
482 int mlx5_devlink_port_function_hw_addr_set(struct devlink
*devlink
,
483 struct devlink_port
*port
,
484 const u8
*hw_addr
, int hw_addr_len
,
485 struct netlink_ext_ack
*extack
);
487 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch
*esw
, u8 rep_type
);
489 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch
*esw
,
490 struct mlx5_flow_attr
*attr
);
491 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch
*esw
,
492 struct mlx5_flow_attr
*attr
);
493 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
494 u16 vport
, u16 vlan
, u8 qos
, u8 set_flags
);
496 static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch
*esw
)
498 return esw
->qos
.enabled
;
501 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev
*dev
,
504 bool ret
= MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, pop_vlan
) &&
505 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, push_vlan
);
510 return ret
&& MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, pop_vlan_2
) &&
511 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, push_vlan_2
);
514 bool mlx5_esw_lag_prereq(struct mlx5_core_dev
*dev0
,
515 struct mlx5_core_dev
*dev1
);
516 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev
*dev0
,
517 struct mlx5_core_dev
*dev1
);
519 const u32
*mlx5_esw_query_functions(struct mlx5_core_dev
*dev
);
521 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
523 #define esw_info(__dev, format, ...) \
524 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
526 #define esw_warn(__dev, format, ...) \
527 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
529 #define esw_debug(dev, format, ...) \
530 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
532 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch
*esw
)
534 return esw
&& MLX5_ESWITCH_MANAGER(esw
->dev
);
537 /* The returned number is valid only when the dev is eswitch manager. */
538 static inline u16
mlx5_eswitch_manager_vport(struct mlx5_core_dev
*dev
)
540 return mlx5_core_is_ecpf_esw_manager(dev
) ?
541 MLX5_VPORT_ECPF
: MLX5_VPORT_PF
;
545 mlx5_esw_is_manager_vport(const struct mlx5_eswitch
*esw
, u16 vport_num
)
547 return esw
->manager_vport
== vport_num
;
550 static inline u16
mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev
*dev
)
552 return mlx5_core_is_ecpf_esw_manager(dev
) ?
553 MLX5_VPORT_PF
: MLX5_VPORT_FIRST_VF
;
556 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev
*dev
)
558 return mlx5_core_is_ecpf_esw_manager(dev
);
561 static inline unsigned int
562 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev
*dev
,
565 return (MLX5_CAP_GEN(dev
, vhca_id
) << 16) | vport_num
;
569 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index
)
571 return dl_port_index
& 0xffff;
574 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
575 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch
*esw
);
577 /* Each mark identifies eswitch vport type.
578 * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
580 * MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
581 * MLX5_ESW_VPT_SF identifies SF vport.
583 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0
584 #define MLX5_ESW_VPT_VF XA_MARK_1
585 #define MLX5_ESW_VPT_SF XA_MARK_2
587 /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
588 * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
591 #define mlx5_esw_for_each_vport(esw, index, vport) \
592 xa_for_each(&((esw)->vports), index, vport)
594 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \
595 for (index = 0, entry = xa_find(xa, &index, last, filter); \
596 entry; entry = xa_find_after(xa, &index, last, filter))
598 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \
599 mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
601 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \
602 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
604 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
605 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
607 struct mlx5_eswitch
*mlx5_devlink_eswitch_get(struct devlink
*devlink
);
608 struct mlx5_vport
*__must_check
609 mlx5_eswitch_get_vport(struct mlx5_eswitch
*esw
, u16 vport_num
);
611 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch
*esw
, u16 vport_num
);
612 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch
*esw
, u16 vport_num
);
614 int mlx5_esw_funcs_changed_handler(struct notifier_block
*nb
, unsigned long type
, void *data
);
617 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch
*esw
,
618 enum mlx5_eswitch_vport_event enabled_events
);
619 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch
*esw
);
621 int mlx5_esw_vport_enable(struct mlx5_eswitch
*esw
, u16 vport_num
,
622 enum mlx5_eswitch_vport_event enabled_events
);
623 void mlx5_esw_vport_disable(struct mlx5_eswitch
*esw
, u16 vport_num
);
626 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch
*esw
,
627 struct mlx5_vport
*vport
);
629 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch
*esw
,
630 struct mlx5_vport
*vport
);
632 struct esw_vport_tbl_namespace
{
638 struct mlx5_vport_tbl_attr
{
642 const struct esw_vport_tbl_namespace
*vport_ns
;
645 struct mlx5_flow_table
*
646 mlx5_esw_vporttbl_get(struct mlx5_eswitch
*esw
, struct mlx5_vport_tbl_attr
*attr
);
648 mlx5_esw_vporttbl_put(struct mlx5_eswitch
*esw
, struct mlx5_vport_tbl_attr
*attr
);
650 struct mlx5_flow_handle
*
651 esw_add_restore_rule(struct mlx5_eswitch
*esw
, u32 tag
);
653 int esw_offloads_load_rep(struct mlx5_eswitch
*esw
, u16 vport_num
);
654 void esw_offloads_unload_rep(struct mlx5_eswitch
*esw
, u16 vport_num
);
656 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch
*esw
, u16 vport_num
);
657 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch
*esw
, u16 vport_num
);
659 int mlx5_eswitch_load_vport(struct mlx5_eswitch
*esw
, u16 vport_num
,
660 enum mlx5_eswitch_vport_event enabled_events
);
661 void mlx5_eswitch_unload_vport(struct mlx5_eswitch
*esw
, u16 vport_num
);
663 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch
*esw
, u16 num_vfs
,
664 enum mlx5_eswitch_vport_event enabled_events
);
665 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch
*esw
, u16 num_vfs
);
667 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch
*esw
, u16 vport_num
);
668 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch
*esw
, u16 vport_num
);
669 struct devlink_port
*mlx5_esw_offloads_devlink_port(struct mlx5_eswitch
*esw
, u16 vport_num
);
671 int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch
*esw
, struct devlink_port
*dl_port
,
672 u16 vport_num
, u32 controller
, u32 sfnum
);
673 void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch
*esw
, u16 vport_num
);
675 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch
*esw
, struct devlink_port
*dl_port
,
676 u16 vport_num
, u32 controller
, u32 sfnum
);
677 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch
*esw
, u16 vport_num
);
678 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev
*dev
, u16
*max_sfs
, u16
*sf_base_id
);
680 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch
*esw
, u16 vport_num
);
681 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch
*esw
, u16 vport_num
);
682 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch
*esw
, u16 vhca_id
, u16
*vport_num
);
685 * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
687 * @new_mode: New mode of eswitch.
689 struct mlx5_esw_event_info
{
693 int mlx5_esw_event_notifier_register(struct mlx5_eswitch
*esw
, struct notifier_block
*n
);
694 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch
*esw
, struct notifier_block
*n
);
696 bool mlx5_esw_hold(struct mlx5_core_dev
*dev
);
697 void mlx5_esw_release(struct mlx5_core_dev
*dev
);
698 void mlx5_esw_get(struct mlx5_core_dev
*dev
);
699 void mlx5_esw_put(struct mlx5_core_dev
*dev
);
700 int mlx5_esw_try_lock(struct mlx5_eswitch
*esw
);
701 void mlx5_esw_unlock(struct mlx5_eswitch
*esw
);
703 void esw_vport_change_handle_locked(struct mlx5_vport
*vport
);
705 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch
*esw
, u32 controller
);
707 #else /* CONFIG_MLX5_ESWITCH */
708 /* eswitch API stubs */
709 static inline int mlx5_eswitch_init(struct mlx5_core_dev
*dev
) { return 0; }
710 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch
*esw
) {}
711 static inline int mlx5_eswitch_enable(struct mlx5_eswitch
*esw
, int num_vfs
) { return 0; }
712 static inline void mlx5_eswitch_disable(struct mlx5_eswitch
*esw
, bool clear_vf
) {}
713 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev
*dev0
, struct mlx5_core_dev
*dev1
) { return true; }
714 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev
*dev
) { return false; }
716 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch
*esw
, u16 vport
, int link_state
) { return 0; }
717 static inline const u32
*mlx5_esw_query_functions(struct mlx5_core_dev
*dev
)
719 return ERR_PTR(-EOPNOTSUPP
);
722 static inline struct mlx5_flow_handle
*
723 esw_add_restore_rule(struct mlx5_eswitch
*esw
, u32 tag
)
725 return ERR_PTR(-EOPNOTSUPP
);
728 static inline unsigned int
729 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev
*dev
,
734 #endif /* CONFIG_MLX5_ESWITCH */
736 #endif /* __MLX5_ESWITCH_H__ */