]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
net/mlx5: E-switch, Prepare eswitch to handle SF vport
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch.h
CommitLineData
073bb189
SM
1/*
2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __MLX5_ESWITCH_H__
34#define __MLX5_ESWITCH_H__
35
77256579
SM
36#include <linux/if_ether.h>
37#include <linux/if_link.h>
525e84be 38#include <linux/atomic.h>
feae9087 39#include <net/devlink.h>
073bb189 40#include <linux/mlx5/device.h>
57cbd893 41#include <linux/mlx5/eswitch.h>
a1b3839a 42#include <linux/mlx5/vport.h>
cc495188 43#include <linux/mlx5/fs.h>
eeb66cdb 44#include "lib/mpfs.h"
ae430332 45#include "lib/fs_chains.h"
d7f33a45 46#include "sf/sf.h"
4c3844d9 47#include "en/tc_ct.h"
073bb189 48
e80541ec
SM
49#ifdef CONFIG_MLX5_ESWITCH
50
87dac697
JL
51#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
52
073bb189
SM
53#define MLX5_MAX_UC_PER_VPORT(dev) \
54 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
55
56#define MLX5_MAX_MC_PER_VPORT(dev) \
57 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
58
c9497c98
MHY
59#define MLX5_MIN_BW_SHARE 1
60
61#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
62 min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
63
a842dd04
CM
64#define mlx5_esw_has_fwd_fdb(dev) \
65 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
66
ae430332
AL
67#define esw_chains(esw) \
68 ((esw)->fdb_table.offloads.esw_chains_priv)
69
5742df0f
MHY
70struct vport_ingress {
71 struct mlx5_flow_table *acl;
10652f39 72 struct mlx5_flow_handle *allow_rule;
853b5352 73 struct {
10652f39
PP
74 struct mlx5_flow_group *allow_spoofchk_only_grp;
75 struct mlx5_flow_group *allow_untagged_spoofchk_grp;
76 struct mlx5_flow_group *allow_untagged_only_grp;
77 struct mlx5_flow_group *drop_grp;
853b5352
PP
78 struct mlx5_flow_handle *drop_rule;
79 struct mlx5_fc *drop_counter;
80 } legacy;
d68316b5 81 struct {
b7826076
PP
82 /* Optional group to add an FTE to do internal priority
83 * tagging on ingress packets.
84 */
85 struct mlx5_flow_group *metadata_prio_tag_grp;
86 /* Group to add default match-all FTE entry to tag ingress
87 * packet with metadata.
88 */
89 struct mlx5_flow_group *metadata_allmatch_grp;
d68316b5
PP
90 struct mlx5_modify_hdr *modify_metadata;
91 struct mlx5_flow_handle *modify_metadata_rule;
92 } offloads;
5742df0f
MHY
93};
94
95struct vport_egress {
96 struct mlx5_flow_table *acl;
74491de9 97 struct mlx5_flow_handle *allowed_vlan;
ea651a86 98 struct mlx5_flow_group *vlan_grp;
bf773dc0
VP
99 union {
100 struct {
101 struct mlx5_flow_group *drop_grp;
102 struct mlx5_flow_handle *drop_rule;
103 struct mlx5_fc *drop_counter;
104 } legacy;
105 struct {
106 struct mlx5_flow_group *fwd_grp;
107 struct mlx5_flow_handle *fwd_rule;
108 } offloads;
109 };
b8a0dbe3
EE
110};
111
112struct mlx5_vport_drop_stats {
113 u64 rx_dropped;
114 u64 tx_dropped;
5742df0f
MHY
115};
116
1ab2068a
MHY
117struct mlx5_vport_info {
118 u8 mac[ETH_ALEN];
119 u16 vlan;
120 u8 qos;
121 u64 node_guid;
122 int link_state;
c9497c98 123 u32 min_rate;
1bd27b11 124 u32 max_rate;
1ab2068a
MHY
125 bool spoofchk;
126 bool trusted;
127};
128
5019833d
PP
129/* Vport context events */
130enum mlx5_eswitch_vport_event {
131 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
132 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
133 MLX5_VPORT_PROMISC_CHANGE = BIT(3),
134};
135
073bb189
SM
136struct mlx5_vport {
137 struct mlx5_core_dev *dev;
138 int vport;
139 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
81848731 140 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
74491de9
MB
141 struct mlx5_flow_handle *promisc_rule;
142 struct mlx5_flow_handle *allmulti_rule;
073bb189
SM
143 struct work_struct vport_change_handler;
144
5742df0f
MHY
145 struct vport_ingress ingress;
146 struct vport_egress egress;
133dcfc5
VP
147 u32 default_metadata;
148 u32 metadata;
5742df0f 149
1ab2068a
MHY
150 struct mlx5_vport_info info;
151
1bd27b11
MHY
152 struct {
153 bool enabled;
154 u32 esw_tsar_ix;
c9497c98 155 u32 bw_share;
1bd27b11
MHY
156 } qos;
157
073bb189 158 bool enabled;
5019833d 159 enum mlx5_eswitch_vport_event enabled_events;
c7eddc60 160 struct devlink_port *dl_port;
073bb189
SM
161};
162
81848731 163struct mlx5_eswitch_fdb {
6ab36e35
OG
164 union {
165 struct legacy_fdb {
52fff327 166 struct mlx5_flow_table *fdb;
6ab36e35
OG
167 struct mlx5_flow_group *addr_grp;
168 struct mlx5_flow_group *allmulti_grp;
169 struct mlx5_flow_group *promisc_grp;
8da202b2
HN
170 struct mlx5_flow_table *vepa_fdb;
171 struct mlx5_flow_handle *vepa_uplink_rule;
172 struct mlx5_flow_handle *vepa_star_rule;
6ab36e35 173 } legacy;
69697b6e
OG
174
175 struct offloads_fdb {
8463daf1 176 struct mlx5_flow_namespace *ns;
52fff327 177 struct mlx5_flow_table *slow_fdb;
69697b6e 178 struct mlx5_flow_group *send_to_vport_grp;
ac004b83
RD
179 struct mlx5_flow_group *peer_miss_grp;
180 struct mlx5_flow_handle **peer_miss_rules;
69697b6e 181 struct mlx5_flow_group *miss_grp;
f80be543
MB
182 struct mlx5_flow_handle *miss_rule_uni;
183 struct mlx5_flow_handle *miss_rule_multi;
f5f82476 184 int vlan_push_pop_refcount;
e52c2802 185
ae430332 186 struct mlx5_fs_chains *esw_chains_priv;
96e32687
EC
187 struct {
188 DECLARE_HASHTABLE(table, 8);
189 /* Protects vports.table */
190 struct mutex lock;
191 } vports;
192
69697b6e 193 } offloads;
6ab36e35 194 };
e52c2802 195 u32 flags;
6ab36e35
OG
196};
197
c116c6ee 198struct mlx5_esw_offload {
11b717d6
PB
199 struct mlx5_flow_table *ft_offloads_restore;
200 struct mlx5_flow_group *restore_group;
6724e66b 201 struct mlx5_modify_hdr *restore_copy_hdr_id;
11b717d6 202
c116c6ee 203 struct mlx5_flow_table *ft_offloads;
fed9ce22 204 struct mlx5_flow_group *vport_rx_group;
127ea380 205 struct mlx5_eswitch_rep *vport_reps;
04de7dda
RD
206 struct list_head peer_flows;
207 struct mutex peer_mutex;
61086f39 208 struct mutex encap_tbl_lock; /* protects encap_tbl */
a54e20b4 209 DECLARE_HASHTABLE(encap_tbl, 8);
14e6b038
EC
210 struct mutex decap_tbl_lock; /* protects decap_tbl */
211 DECLARE_HASHTABLE(decap_tbl, 8);
dd58edc3 212 struct mod_hdr_tbl mod_hdr;
10caabda
OS
213 DECLARE_HASHTABLE(termtbl_tbl, 8);
214 struct mutex termtbl_mutex; /* protects termtbl hash */
8693115a 215 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
bffaa916 216 u8 inline_mode;
525e84be 217 atomic64_t num_flows;
98fdbea5 218 enum devlink_eswitch_encap_mode encap;
133dcfc5 219 struct ida vport_metadata_ida;
a53cf949 220 unsigned int host_number; /* ECPF supports one external host */
c116c6ee
OG
221};
222
0a0ab1d2
EC
223/* E-Switch MC FDB table hash node */
224struct esw_mc_addr { /* SRIOV only */
225 struct l2addr_node node;
226 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
227 u32 refcnt;
228};
229
a3888f33
BW
230struct mlx5_host_work {
231 struct work_struct work;
232 struct mlx5_eswitch *esw;
233};
234
cd56f929 235struct mlx5_esw_functions {
a3888f33
BW
236 struct mlx5_nb nb;
237 u16 num_vfs;
238};
239
7445cfb1
JL
240enum {
241 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
5b7cb745 242 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
7445cfb1
JL
243};
244
073bb189
SM
245struct mlx5_eswitch {
246 struct mlx5_core_dev *dev;
6933a937 247 struct mlx5_nb nb;
81848731 248 struct mlx5_eswitch_fdb fdb_table;
99ecd646 249 /* legacy data structures */
81848731 250 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
131ce701
PP
251 struct esw_mc_addr mc_promisc;
252 /* end of legacy */
073bb189
SM
253 struct workqueue_struct *work_queue;
254 struct mlx5_vport *vports;
7445cfb1 255 u32 flags;
073bb189 256 int total_vports;
81848731 257 int enabled_vports;
dfcb1ed3
MHY
258 /* Synchronize between vport change events
259 * and async SRIOV admin state changes
260 */
261 struct mutex state_lock;
1bd27b11 262
8e0aa4bc
PP
263 /* Protects eswitch mode change that occurs via one or more
264 * user commands, i.e. sriov state change, devlink commands.
265 */
266 struct mutex mode_lock;
267
1bd27b11
MHY
268 struct {
269 bool enabled;
270 u32 root_tsar_id;
271 } qos;
272
c116c6ee 273 struct mlx5_esw_offload offloads;
6ab36e35 274 int mode;
a1b3839a 275 u16 manager_vport;
411ec9e0 276 u16 first_host_vport;
cd56f929 277 struct mlx5_esw_functions esw_funcs;
87dac697
JL
278 struct {
279 u32 large_group_num;
280 } params;
073bb189
SM
281};
282
5896b972
PP
283void esw_offloads_disable(struct mlx5_eswitch *esw);
284int esw_offloads_enable(struct mlx5_eswitch *esw);
e8d31c4d
MB
285void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
286int esw_offloads_init_reps(struct mlx5_eswitch *esw);
ea651a86 287
133dcfc5
VP
288u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
289void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
290
fcb64c0f
EC
291int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
292 u32 rate_mbps);
766a0e97 293
073bb189
SM
294/* E-Switch API */
295int mlx5_eswitch_init(struct mlx5_core_dev *dev);
296void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
ebf77bb8
PP
297
298#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
8e0aa4bc
PP
299int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
300int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
301void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
556b9d16 302void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
77256579 303int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
fa997825 304 u16 vport, const u8 *mac);
77256579 305int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
02f3afd9 306 u16 vport, int link_state);
9e7ea352 307int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
02f3afd9 308 u16 vport, u16 vlan, u8 qos);
f942380c 309int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
02f3afd9 310 u16 vport, bool spoofchk);
1edc57e2 311int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
02f3afd9
PP
312 u16 vport_num, bool setting);
313int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
c9497c98 314 u32 max_rate, u32 min_rate);
8da202b2
HN
315int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
316int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
77256579 317int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
02f3afd9 318 u16 vport, struct ifla_vf_info *ivi);
3b751a2a 319int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
02f3afd9 320 u16 vport,
3b751a2a 321 struct ifla_vf_stats *vf_stats);
159fe639 322void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
073bb189 323
238302fa 324int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
e08a6832 325 bool other_vport, void *in);
57843868 326
3d80d1a2 327struct mlx5_flow_spec;
776b12b6 328struct mlx5_esw_flow_attr;
10caabda
OS
329struct mlx5_termtbl_handle;
330
331bool
332mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
c620b772 333 struct mlx5_flow_attr *attr,
10caabda
OS
334 struct mlx5_flow_act *flow_act,
335 struct mlx5_flow_spec *spec);
336
337struct mlx5_flow_handle *
338mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
339 struct mlx5_flow_table *ft,
340 struct mlx5_flow_spec *spec,
341 struct mlx5_esw_flow_attr *attr,
342 struct mlx5_flow_act *flow_act,
343 struct mlx5_flow_destination *dest,
344 int num_dest);
345
346void
347mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
348 struct mlx5_termtbl_handle *tt);
3d80d1a2 349
74491de9 350struct mlx5_flow_handle *
3d80d1a2
OG
351mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
352 struct mlx5_flow_spec *spec,
c620b772 353 struct mlx5_flow_attr *attr);
e4ad91f2
CM
354struct mlx5_flow_handle *
355mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
356 struct mlx5_flow_spec *spec,
c620b772 357 struct mlx5_flow_attr *attr);
d85cdccb
OG
358void
359mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
360 struct mlx5_flow_handle *rule,
c620b772 361 struct mlx5_flow_attr *attr);
48265006
OG
362void
363mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
364 struct mlx5_flow_handle *rule,
c620b772 365 struct mlx5_flow_attr *attr);
d85cdccb 366
74491de9 367struct mlx5_flow_handle *
02f3afd9 368mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 369 struct mlx5_flow_destination *dest);
fed9ce22 370
e33dfe31
OG
371enum {
372 SET_VLAN_STRIP = BIT(0),
373 SET_VLAN_INSERT = BIT(1)
374};
375
d708f902
OG
376enum mlx5_flow_match_level {
377 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE,
378 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2,
379 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP,
380 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP,
381};
382
592d3651
CM
383/* current maximum for flow based vport multicasting */
384#define MLX5_MAX_FLOW_FWD_VPORTS 2
385
f493f155
EB
386enum {
387 MLX5_ESW_DEST_ENCAP = BIT(0),
8c4dc42b 388 MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
f493f155
EB
389};
390
39ac237c
PB
391enum {
392 MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
393 MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
6fb0701a 394 MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
39ac237c
PB
395};
396
776b12b6
OG
397struct mlx5_esw_flow_attr {
398 struct mlx5_eswitch_rep *in_rep;
10ff5359 399 struct mlx5_core_dev *in_mdev;
f9392795 400 struct mlx5_core_dev *counter_dev;
776b12b6 401
e85e02ba 402 int split_count;
592d3651
CM
403 int out_count;
404
cc495188
JL
405 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
406 u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
407 u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
408 u8 total_vlan;
df65a573 409 struct {
f493f155 410 u32 flags;
df65a573 411 struct mlx5_eswitch_rep *rep;
2b688ea5 412 struct mlx5_pkt_reformat *pkt_reformat;
df65a573 413 struct mlx5_core_dev *mdev;
10caabda 414 struct mlx5_termtbl_handle *termtbl;
df65a573 415 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
14e6b038 416 struct mlx5_pkt_reformat *decap_pkt_reformat;
776b12b6
OG
417};
418
db7ff19e
EB
419int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
420 struct netlink_ext_ack *extack);
feae9087 421int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
db7ff19e
EB
422int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
423 struct netlink_ext_ack *extack);
bffaa916 424int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
98fdbea5
LR
425int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
426 enum devlink_eswitch_encap_mode encap,
db7ff19e 427 struct netlink_ext_ack *extack);
98fdbea5
LR
428int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
429 enum devlink_eswitch_encap_mode *encap);
f099fde1
PP
430int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
431 struct devlink_port *port,
432 u8 *hw_addr, int *hw_addr_len,
433 struct netlink_ext_ack *extack);
330077d1
PP
434int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
435 struct devlink_port *port,
436 const u8 *hw_addr, int hw_addr_len,
437 struct netlink_ext_ack *extack);
f099fde1 438
a4b97ab4 439void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
feae9087 440
f5f82476 441int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
c620b772 442 struct mlx5_flow_attr *attr);
f5f82476 443int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
c620b772 444 struct mlx5_flow_attr *attr);
f5f82476 445int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
02f3afd9 446 u16 vport, u16 vlan, u8 qos, u8 set_flags);
f5f82476 447
b5f814cc
EC
448static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
449{
450 return esw->qos.enabled;
451}
452
cc495188
JL
453static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
454 u8 vlan_depth)
6acfbf38 455{
cc495188
JL
456 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
457 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
458
459 if (vlan_depth == 1)
460 return ret;
461
462 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
463 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
6acfbf38
OG
464}
465
eff849b2
RL
466bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
467 struct mlx5_core_dev *dev1);
544fe7c2
RD
468bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
469 struct mlx5_core_dev *dev1);
eff849b2 470
dd28087c 471const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
cd56f929 472
69697b6e
OG
473#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
474
27b942fb
PP
475#define esw_info(__dev, format, ...) \
476 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
69697b6e 477
27b942fb
PP
478#define esw_warn(__dev, format, ...) \
479 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
69697b6e
OG
480
481#define esw_debug(dev, format, ...) \
482 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
a1b3839a
BW
483
484/* The returned number is valid only when the dev is eswitch manager. */
485static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
486{
487 return mlx5_core_is_ecpf_esw_manager(dev) ?
488 MLX5_VPORT_ECPF : MLX5_VPORT_PF;
489}
490
ea2300e0
PP
491static inline bool
492mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
493{
494 return esw->manager_vport == vport_num;
495}
496
411ec9e0
BW
497static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
498{
499 return mlx5_core_is_ecpf_esw_manager(dev) ?
500 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
501}
502
d7f33a45
VP
503static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw)
504{
505 /* PF and VF vports indices start from 0 to max_vfs */
506 return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
507}
508
509static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw)
510{
511 return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev);
512}
513
514static inline int
515mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num)
516{
517 return vport_num - mlx5_sf_start_function_id(esw->dev) +
518 MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
519}
520
521static inline u16
522mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx)
523{
524 return mlx5_sf_start_function_id(esw->dev) + idx -
525 (MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev));
526}
527
528static inline bool
529mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
530{
531 return mlx5_sf_supported(esw->dev) &&
532 vport_num >= mlx5_sf_start_function_id(esw->dev) &&
533 (vport_num < (mlx5_sf_start_function_id(esw->dev) +
534 mlx5_sf_max_functions(esw->dev)));
535}
536
3d5f41ca 537static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
6706a3b9 538{
3d5f41ca 539 return mlx5_core_is_ecpf_esw_manager(dev);
6706a3b9
VP
540}
541
5ae51620
BW
542static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
543{
544 /* Uplink always locate at the last element of the array.*/
545 return esw->total_vports - 1;
546}
547
81cd229c
BW
548static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw)
549{
550 return esw->total_vports - 2;
551}
552
5ae51620
BW
553static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
554 u16 vport_num)
555{
81cd229c
BW
556 if (vport_num == MLX5_VPORT_ECPF) {
557 if (!mlx5_ecpf_vport_exists(esw->dev))
558 esw_warn(esw->dev, "ECPF vport doesn't exist!\n");
559 return mlx5_eswitch_ecpf_idx(esw);
560 }
561
5ae51620
BW
562 if (vport_num == MLX5_VPORT_UPLINK)
563 return mlx5_eswitch_uplink_idx(esw);
564
d7f33a45
VP
565 if (mlx5_esw_is_sf_vport(esw, vport_num))
566 return mlx5_esw_sf_vport_num_to_index(esw, vport_num);
567
568 /* PF and VF vports start from 0 to max_vfs */
5ae51620
BW
569 return vport_num;
570}
571
02f3afd9 572static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
5ae51620
BW
573 int index)
574{
81cd229c
BW
575 if (index == mlx5_eswitch_ecpf_idx(esw) &&
576 mlx5_ecpf_vport_exists(esw->dev))
577 return MLX5_VPORT_ECPF;
578
5ae51620
BW
579 if (index == mlx5_eswitch_uplink_idx(esw))
580 return MLX5_VPORT_UPLINK;
581
d7f33a45
VP
582 /* SF vports indices are after VFs and before ECPF */
583 if (mlx5_sf_supported(esw->dev) &&
584 index > mlx5_core_max_vfs(esw->dev))
585 return mlx5_esw_sf_vport_index_to_num(esw, index);
586
587 /* PF and VF vports start from 0 to max_vfs */
5ae51620
BW
588 return index;
589}
590
443bf36e
PP
591static inline unsigned int
592mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
593 u16 vport_num)
594{
595 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
596}
597
f099fde1
PP
598static inline u16
599mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
600{
601 return dl_port_index & 0xffff;
602}
603
ee576ec1
SM
604/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
605void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
606
786ef904
PP
607/* The vport getter/iterator are only valid after esw->total_vports
608 * and vport->vport are initialized in mlx5_eswitch_init.
609 */
610#define mlx5_esw_for_all_vports(esw, i, vport) \
611 for ((i) = MLX5_VPORT_PF; \
612 (vport) = &(esw)->vports[i], \
613 (i) < (esw)->total_vports; (i)++)
614
5019833d
PP
615#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
616 for ((i) = (esw)->total_vports - 1; \
617 (vport) = &(esw)->vports[i], \
618 (i) >= MLX5_VPORT_PF; (i)--)
619
786ef904
PP
620#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
621 for ((i) = MLX5_VPORT_FIRST_VF; \
622 (vport) = &(esw)->vports[(i)], \
623 (i) <= (nvfs); (i)++)
624
625#define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
626 for ((i) = (nvfs); \
627 (vport) = &(esw)->vports[(i)], \
628 (i) >= MLX5_VPORT_FIRST_VF; (i)--)
629
630/* The rep getter/iterator are only valid after esw->total_vports
631 * and vport->vport are initialized in mlx5_eswitch_init.
632 */
633#define mlx5_esw_for_all_reps(esw, i, rep) \
634 for ((i) = MLX5_VPORT_PF; \
635 (rep) = &(esw)->offloads.vport_reps[i], \
636 (i) < (esw)->total_vports; (i)++)
637
638#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
639 for ((i) = MLX5_VPORT_FIRST_VF; \
640 (rep) = &(esw)->offloads.vport_reps[i], \
641 (i) <= (nvfs); (i)++)
642
643#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
644 for ((i) = (nvfs); \
645 (rep) = &(esw)->offloads.vport_reps[i], \
646 (i) >= MLX5_VPORT_FIRST_VF; (i)--)
647
648#define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \
649 for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
650
651#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
652 for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
653
411ec9e0
BW
654/* Includes host PF (vport 0) if it's not esw manager. */
655#define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \
656 for ((i) = (esw)->first_host_vport; \
657 (rep) = &(esw)->offloads.vport_reps[i], \
658 (i) <= (nvfs); (i)++)
659
660#define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \
661 for ((i) = (nvfs); \
662 (rep) = &(esw)->offloads.vport_reps[i], \
663 (i) >= (esw)->first_host_vport; (i)--)
664
665#define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \
666 for ((vport) = (esw)->first_host_vport; \
667 (vport) <= (nvfs); (vport)++)
668
669#define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \
670 for ((vport) = (nvfs); \
671 (vport) >= (esw)->first_host_vport; (vport)--)
672
d7f33a45
VP
673#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
674 for ((i) = mlx5_esw_sf_start_idx(esw); \
675 (rep) = &(esw)->offloads.vport_reps[(i)], \
676 (i) < mlx5_esw_sf_end_idx(esw); (i++))
677
bd939753 678struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
5d9986a3
BW
679struct mlx5_vport *__must_check
680mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
681
91d6291c
PP
682bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
683
16fff98a 684int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
062f4bf4 685
925a6acc 686int
5019833d
PP
687mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
688 enum mlx5_eswitch_vport_event enabled_events);
689void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
690
748da30b
VP
691int
692esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
693 struct mlx5_vport *vport);
694void
695esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
696 struct mlx5_vport *vport);
697
96e32687
EC
698int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw);
699void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw);
700
11b717d6
PB
701struct mlx5_flow_handle *
702esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
703u32
704esw_get_max_restore_tag(struct mlx5_eswitch *esw);
705
c2d7712c
BW
706int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
707void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
708
23bb50cf
BW
709int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
710 enum mlx5_eswitch_vport_event enabled_events);
711void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
712
713int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
714 enum mlx5_eswitch_vport_event enabled_events);
715void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
716
c7eddc60
PP
717int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
718void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
719struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
e80541ec
SM
720#else /* CONFIG_MLX5_ESWITCH */
721/* eswitch API stubs */
722static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
723static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
8e0aa4bc 724static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
556b9d16 725static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
eff849b2 726static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
6706a3b9 727static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
7d0314b1
RD
728static inline
729int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
dd28087c 730static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
10ee82ce 731{
dd28087c 732 return ERR_PTR(-EOPNOTSUPP);
10ee82ce 733}
328edb49 734
9d3faa51 735static inline struct mlx5_flow_handle *
11b717d6
PB
736esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
737{
738 return ERR_PTR(-EOPNOTSUPP);
739}
e80541ec
SM
740#endif /* CONFIG_MLX5_ESWITCH */
741
073bb189 742#endif /* __MLX5_ESWITCH_H__ */