]>
Commit | Line | Data |
---|---|---|
073bb189 SM |
1 | /* |
2 | * Copyright (c) 2015, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/etherdevice.h> | |
34 | #include <linux/mlx5/driver.h> | |
35 | #include <linux/mlx5/mlx5_ifc.h> | |
36 | #include <linux/mlx5/vport.h> | |
86d722ad | 37 | #include <linux/mlx5/fs.h> |
7c9f131f | 38 | #include <linux/mlx5/mpfs.h> |
ea651a86 | 39 | #include "esw/acl/lgcy.h" |
b55b3538 | 40 | #include "esw/legacy.h" |
2d116e3e | 41 | #include "esw/qos.h" |
073bb189 | 42 | #include "mlx5_core.h" |
6933a937 | 43 | #include "lib/eq.h" |
073bb189 | 44 | #include "eswitch.h" |
b8a0dbe3 | 45 | #include "fs_core.h" |
87dac697 | 46 | #include "devlink.h" |
a3888f33 | 47 | #include "ecpf.h" |
b2fdf3d0 | 48 | #include "en/mod_hdr.h" |
073bb189 | 49 | |
073bb189 SM |
50 | enum { |
51 | MLX5_ACTION_NONE = 0, | |
52 | MLX5_ACTION_ADD = 1, | |
53 | MLX5_ACTION_DEL = 2, | |
54 | }; | |
55 | ||
81848731 SM |
56 | /* Vport UC/MC hash node */ |
57 | struct vport_addr { | |
58 | struct l2addr_node node; | |
59 | u8 action; | |
7e4c4330 | 60 | u16 vport; |
eeb66cdb SM |
61 | struct mlx5_flow_handle *flow_rule; |
62 | bool mpfs; /* UC MAC was added to MPFs */ | |
a35f71f2 MHY |
63 | /* A flag indicating that mac was added due to mc promiscuous vport */ |
64 | bool mc_promisc; | |
073bb189 SM |
65 | }; |
66 | ||
bd939753 PP |
67 | static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) |
68 | { | |
69 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | |
70 | return -EOPNOTSUPP; | |
71 | ||
72 | if (!MLX5_ESWITCH_MANAGER(dev)) | |
e9716afd | 73 | return -EOPNOTSUPP; |
bd939753 PP |
74 | |
75 | return 0; | |
76 | } | |
77 | ||
78 | struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink) | |
79 | { | |
80 | struct mlx5_core_dev *dev = devlink_priv(devlink); | |
81 | int err; | |
82 | ||
83 | err = mlx5_eswitch_check(dev); | |
84 | if (err) | |
85 | return ERR_PTR(err); | |
86 | ||
87 | return dev->priv.eswitch; | |
88 | } | |
89 | ||
5d9986a3 BW |
90 | struct mlx5_vport *__must_check |
91 | mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) | |
879c8f84 | 92 | { |
47dd7e60 | 93 | struct mlx5_vport *vport; |
5d9986a3 BW |
94 | |
95 | if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) | |
96 | return ERR_PTR(-EPERM); | |
97 | ||
47dd7e60 PP |
98 | vport = xa_load(&esw->vports, vport_num); |
99 | if (!vport) { | |
100 | esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num); | |
5d9986a3 BW |
101 | return ERR_PTR(-EINVAL); |
102 | } | |
47dd7e60 | 103 | return vport; |
879c8f84 BW |
104 | } |
105 | ||
81848731 | 106 | static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, |
073bb189 SM |
107 | u32 events_mask) |
108 | { | |
e08a6832 | 109 | u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {}; |
073bb189 | 110 | void *nic_vport_ctx; |
073bb189 SM |
111 | |
112 | MLX5_SET(modify_nic_vport_context_in, in, | |
113 | opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); | |
114 | MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); | |
115 | MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); | |
eca4a928 | 116 | MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); |
073bb189 SM |
117 | nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, |
118 | in, nic_vport_context); | |
119 | ||
120 | MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1); | |
121 | ||
5019833d | 122 | if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE) |
073bb189 SM |
123 | MLX5_SET(nic_vport_context, nic_vport_ctx, |
124 | event_on_uc_address_change, 1); | |
5019833d | 125 | if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE) |
073bb189 SM |
126 | MLX5_SET(nic_vport_context, nic_vport_ctx, |
127 | event_on_mc_address_change, 1); | |
5019833d | 128 | if (events_mask & MLX5_VPORT_PROMISC_CHANGE) |
a35f71f2 MHY |
129 | MLX5_SET(nic_vport_context, nic_vport_ctx, |
130 | event_on_promisc_change, 1); | |
073bb189 | 131 | |
e08a6832 | 132 | return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in); |
073bb189 SM |
133 | } |
134 | ||
9e7ea352 | 135 | /* E-Switch vport context HW commands */ |
238302fa | 136 | int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, |
e08a6832 | 137 | bool other_vport, void *in) |
9e7ea352 | 138 | { |
c4f287c4 SM |
139 | MLX5_SET(modify_esw_vport_context_in, in, opcode, |
140 | MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); | |
9e7ea352 | 141 | MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); |
238302fa | 142 | MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport); |
e08a6832 | 143 | return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in); |
57843868 JL |
144 | } |
145 | ||
7e4c4330 | 146 | static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, |
e33dfe31 | 147 | u16 vlan, u8 qos, u8 set_flags) |
9e7ea352 | 148 | { |
e08a6832 | 149 | u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; |
9e7ea352 SM |
150 | |
151 | if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || | |
152 | !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) | |
9eb78923 | 153 | return -EOPNOTSUPP; |
9e7ea352 | 154 | |
e33dfe31 OG |
155 | esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", |
156 | vport, vlan, qos, set_flags); | |
157 | ||
158 | if (set_flags & SET_VLAN_STRIP) | |
9e7ea352 SM |
159 | MLX5_SET(modify_esw_vport_context_in, in, |
160 | esw_vport_context.vport_cvlan_strip, 1); | |
e33dfe31 OG |
161 | |
162 | if (set_flags & SET_VLAN_INSERT) { | |
9e7ea352 SM |
163 | /* insert only if no vlan in packet */ |
164 | MLX5_SET(modify_esw_vport_context_in, in, | |
165 | esw_vport_context.vport_cvlan_insert, 1); | |
e33dfe31 | 166 | |
9e7ea352 SM |
167 | MLX5_SET(modify_esw_vport_context_in, in, |
168 | esw_vport_context.cvlan_pcp, qos); | |
169 | MLX5_SET(modify_esw_vport_context_in, in, | |
170 | esw_vport_context.cvlan_id, vlan); | |
171 | } | |
172 | ||
173 | MLX5_SET(modify_esw_vport_context_in, in, | |
174 | field_select.vport_cvlan_strip, 1); | |
175 | MLX5_SET(modify_esw_vport_context_in, in, | |
176 | field_select.vport_cvlan_insert, 1); | |
177 | ||
e08a6832 | 178 | return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in); |
9e7ea352 SM |
179 | } |
180 | ||
81848731 | 181 | /* E-Switch FDB */ |
74491de9 | 182 | static struct mlx5_flow_handle * |
7e4c4330 | 183 | __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule, |
78a9199b | 184 | u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) |
81848731 | 185 | { |
78a9199b MHY |
186 | int match_header = (is_zero_ether_addr(mac_c) ? 0 : |
187 | MLX5_MATCH_OUTER_HEADERS); | |
74491de9 | 188 | struct mlx5_flow_handle *flow_rule = NULL; |
66958ed9 | 189 | struct mlx5_flow_act flow_act = {0}; |
4c5009c5 | 190 | struct mlx5_flow_destination dest = {}; |
c5bb1730 | 191 | struct mlx5_flow_spec *spec; |
a35f71f2 MHY |
192 | void *mv_misc = NULL; |
193 | void *mc_misc = NULL; | |
78a9199b MHY |
194 | u8 *dmac_v = NULL; |
195 | u8 *dmac_c = NULL; | |
81848731 | 196 | |
a35f71f2 MHY |
197 | if (rx_rule) |
198 | match_header |= MLX5_MATCH_MISC_PARAMETERS; | |
c5bb1730 | 199 | |
1b9a07ee LR |
200 | spec = kvzalloc(sizeof(*spec), GFP_KERNEL); |
201 | if (!spec) | |
c5bb1730 | 202 | return NULL; |
1b9a07ee | 203 | |
c5bb1730 | 204 | dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, |
81848731 | 205 | outer_headers.dmac_47_16); |
c5bb1730 | 206 | dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, |
81848731 SM |
207 | outer_headers.dmac_47_16); |
208 | ||
a35f71f2 | 209 | if (match_header & MLX5_MATCH_OUTER_HEADERS) { |
78a9199b MHY |
210 | ether_addr_copy(dmac_v, mac_v); |
211 | ether_addr_copy(dmac_c, mac_c); | |
212 | } | |
81848731 | 213 | |
a35f71f2 | 214 | if (match_header & MLX5_MATCH_MISC_PARAMETERS) { |
c5bb1730 MG |
215 | mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, |
216 | misc_parameters); | |
217 | mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, | |
218 | misc_parameters); | |
b05af6aa | 219 | MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK); |
a35f71f2 MHY |
220 | MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); |
221 | } | |
222 | ||
81848731 | 223 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
b17f7fc1 | 224 | dest.vport.num = vport; |
81848731 SM |
225 | |
226 | esw_debug(esw->dev, | |
227 | "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", | |
228 | dmac_v, dmac_c, vport); | |
c5bb1730 | 229 | spec->match_criteria_enable = match_header; |
66958ed9 | 230 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
81848731 | 231 | flow_rule = |
52fff327 | 232 | mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec, |
66958ed9 | 233 | &flow_act, &dest, 1); |
3f42ac66 | 234 | if (IS_ERR(flow_rule)) { |
2974ab6e SM |
235 | esw_warn(esw->dev, |
236 | "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", | |
81848731 SM |
237 | dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); |
238 | flow_rule = NULL; | |
239 | } | |
c5bb1730 MG |
240 | |
241 | kvfree(spec); | |
81848731 SM |
242 | return flow_rule; |
243 | } | |
244 | ||
74491de9 | 245 | static struct mlx5_flow_handle * |
7e4c4330 | 246 | esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport) |
78a9199b MHY |
247 | { |
248 | u8 mac_c[ETH_ALEN]; | |
249 | ||
250 | eth_broadcast_addr(mac_c); | |
a35f71f2 MHY |
251 | return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac); |
252 | } | |
253 | ||
74491de9 | 254 | static struct mlx5_flow_handle * |
7e4c4330 | 255 | esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport) |
a35f71f2 MHY |
256 | { |
257 | u8 mac_c[ETH_ALEN]; | |
258 | u8 mac_v[ETH_ALEN]; | |
259 | ||
260 | eth_zero_addr(mac_c); | |
261 | eth_zero_addr(mac_v); | |
262 | mac_c[0] = 0x01; | |
263 | mac_v[0] = 0x01; | |
264 | return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v); | |
265 | } | |
266 | ||
74491de9 | 267 | static struct mlx5_flow_handle * |
7e4c4330 | 268 | esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport) |
a35f71f2 MHY |
269 | { |
270 | u8 mac_c[ETH_ALEN]; | |
271 | u8 mac_v[ETH_ALEN]; | |
272 | ||
273 | eth_zero_addr(mac_c); | |
274 | eth_zero_addr(mac_v); | |
275 | return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); | |
78a9199b MHY |
276 | } |
277 | ||
81848731 SM |
278 | /* E-Switch vport UC/MC lists management */ |
279 | typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, | |
280 | struct vport_addr *vaddr); | |
281 | ||
282 | static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) | |
283 | { | |
81848731 | 284 | u8 *mac = vaddr->node.addr; |
7e4c4330 | 285 | u16 vport = vaddr->vport; |
81848731 SM |
286 | int err; |
287 | ||
a1b3839a BW |
288 | /* Skip mlx5_mpfs_add_mac for eswitch_managers, |
289 | * it is already done by its netdev in mlx5e_execute_l2_action | |
eeb66cdb | 290 | */ |
ea2300e0 | 291 | if (mlx5_esw_is_manager_vport(esw, vport)) |
eeb66cdb SM |
292 | goto fdb_add; |
293 | ||
294 | err = mlx5_mpfs_add_mac(esw->dev, mac); | |
295 | if (err) { | |
073bb189 | 296 | esw_warn(esw->dev, |
a1b3839a | 297 | "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n", |
eeb66cdb SM |
298 | mac, vport, err); |
299 | return err; | |
073bb189 | 300 | } |
eeb66cdb | 301 | vaddr->mpfs = true; |
073bb189 | 302 | |
eeb66cdb | 303 | fdb_add: |
69697b6e | 304 | /* SRIOV is enabled: Forward UC MAC to vport */ |
f6455de0 | 305 | if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) |
81848731 SM |
306 | vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); |
307 | ||
eeb66cdb SM |
308 | esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", |
309 | vport, mac, vaddr->flow_rule); | |
310 | ||
1547f538 | 311 | return 0; |
073bb189 SM |
312 | } |
313 | ||
81848731 | 314 | static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) |
073bb189 | 315 | { |
81848731 | 316 | u8 *mac = vaddr->node.addr; |
7e4c4330 | 317 | u16 vport = vaddr->vport; |
eeb66cdb | 318 | int err = 0; |
81848731 | 319 | |
e019cb53 | 320 | /* Skip mlx5_mpfs_del_mac for eswitch managers, |
a1b3839a | 321 | * it is already done by its netdev in mlx5e_execute_l2_action |
eeb66cdb | 322 | */ |
ea2300e0 | 323 | if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport)) |
eeb66cdb | 324 | goto fdb_del; |
81848731 | 325 | |
eeb66cdb SM |
326 | err = mlx5_mpfs_del_mac(esw->dev, mac); |
327 | if (err) | |
328 | esw_warn(esw->dev, | |
329 | "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n", | |
330 | mac, vport, err); | |
331 | vaddr->mpfs = false; | |
81848731 | 332 | |
eeb66cdb | 333 | fdb_del: |
81848731 | 334 | if (vaddr->flow_rule) |
74491de9 | 335 | mlx5_del_flow_rules(vaddr->flow_rule); |
81848731 SM |
336 | vaddr->flow_rule = NULL; |
337 | ||
81848731 SM |
338 | return 0; |
339 | } | |
340 | ||
a35f71f2 MHY |
341 | static void update_allmulti_vports(struct mlx5_eswitch *esw, |
342 | struct vport_addr *vaddr, | |
343 | struct esw_mc_addr *esw_mc) | |
344 | { | |
345 | u8 *mac = vaddr->node.addr; | |
879c8f84 | 346 | struct mlx5_vport *vport; |
47dd7e60 PP |
347 | unsigned long i; |
348 | u16 vport_num; | |
a35f71f2 | 349 | |
47dd7e60 | 350 | mlx5_esw_for_each_vport(esw, i, vport) { |
a35f71f2 MHY |
351 | struct hlist_head *vport_hash = vport->mc_list; |
352 | struct vport_addr *iter_vaddr = | |
353 | l2addr_hash_find(vport_hash, | |
354 | mac, | |
355 | struct vport_addr); | |
879c8f84 | 356 | vport_num = vport->vport; |
a35f71f2 | 357 | if (IS_ERR_OR_NULL(vport->allmulti_rule) || |
879c8f84 | 358 | vaddr->vport == vport_num) |
a35f71f2 MHY |
359 | continue; |
360 | switch (vaddr->action) { | |
361 | case MLX5_ACTION_ADD: | |
362 | if (iter_vaddr) | |
363 | continue; | |
364 | iter_vaddr = l2addr_hash_add(vport_hash, mac, | |
365 | struct vport_addr, | |
366 | GFP_KERNEL); | |
367 | if (!iter_vaddr) { | |
368 | esw_warn(esw->dev, | |
369 | "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n", | |
879c8f84 | 370 | mac, vport_num); |
a35f71f2 MHY |
371 | continue; |
372 | } | |
879c8f84 | 373 | iter_vaddr->vport = vport_num; |
a35f71f2 MHY |
374 | iter_vaddr->flow_rule = |
375 | esw_fdb_set_vport_rule(esw, | |
376 | mac, | |
879c8f84 | 377 | vport_num); |
62e3c24a | 378 | iter_vaddr->mc_promisc = true; |
a35f71f2 MHY |
379 | break; |
380 | case MLX5_ACTION_DEL: | |
381 | if (!iter_vaddr) | |
382 | continue; | |
74491de9 | 383 | mlx5_del_flow_rules(iter_vaddr->flow_rule); |
a35f71f2 MHY |
384 | l2addr_hash_del(iter_vaddr); |
385 | break; | |
386 | } | |
387 | } | |
388 | } | |
389 | ||
81848731 SM |
390 | static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) |
391 | { | |
392 | struct hlist_head *hash = esw->mc_table; | |
393 | struct esw_mc_addr *esw_mc; | |
394 | u8 *mac = vaddr->node.addr; | |
7e4c4330 | 395 | u16 vport = vaddr->vport; |
81848731 | 396 | |
52fff327 | 397 | if (!esw->fdb_table.legacy.fdb) |
81848731 SM |
398 | return 0; |
399 | ||
400 | esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); | |
401 | if (esw_mc) | |
402 | goto add; | |
403 | ||
404 | esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL); | |
405 | if (!esw_mc) | |
406 | return -ENOMEM; | |
407 | ||
408 | esw_mc->uplink_rule = /* Forward MC MAC to Uplink */ | |
b05af6aa | 409 | esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK); |
a35f71f2 MHY |
410 | |
411 | /* Add this multicast mac to all the mc promiscuous vports */ | |
412 | update_allmulti_vports(esw, vaddr, esw_mc); | |
413 | ||
81848731 | 414 | add: |
a35f71f2 MHY |
415 | /* If the multicast mac is added as a result of mc promiscuous vport, |
416 | * don't increment the multicast ref count | |
417 | */ | |
418 | if (!vaddr->mc_promisc) | |
419 | esw_mc->refcnt++; | |
420 | ||
81848731 SM |
421 | /* Forward MC MAC to vport */ |
422 | vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); | |
423 | esw_debug(esw->dev, | |
424 | "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", | |
425 | vport, mac, vaddr->flow_rule, | |
426 | esw_mc->refcnt, esw_mc->uplink_rule); | |
427 | return 0; | |
428 | } | |
429 | ||
430 | static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) | |
431 | { | |
432 | struct hlist_head *hash = esw->mc_table; | |
433 | struct esw_mc_addr *esw_mc; | |
434 | u8 *mac = vaddr->node.addr; | |
7e4c4330 | 435 | u16 vport = vaddr->vport; |
073bb189 | 436 | |
52fff327 | 437 | if (!esw->fdb_table.legacy.fdb) |
81848731 SM |
438 | return 0; |
439 | ||
440 | esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); | |
441 | if (!esw_mc) { | |
442 | esw_warn(esw->dev, | |
443 | "Failed to find eswitch MC addr for MAC(%pM) vport(%d)", | |
073bb189 SM |
444 | mac, vport); |
445 | return -EINVAL; | |
446 | } | |
81848731 SM |
447 | esw_debug(esw->dev, |
448 | "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n", | |
449 | vport, mac, vaddr->flow_rule, esw_mc->refcnt, | |
450 | esw_mc->uplink_rule); | |
451 | ||
452 | if (vaddr->flow_rule) | |
74491de9 | 453 | mlx5_del_flow_rules(vaddr->flow_rule); |
81848731 SM |
454 | vaddr->flow_rule = NULL; |
455 | ||
a35f71f2 MHY |
456 | /* If the multicast mac is added as a result of mc promiscuous vport, |
457 | * don't decrement the multicast ref count. | |
458 | */ | |
459 | if (vaddr->mc_promisc || (--esw_mc->refcnt > 0)) | |
81848731 | 460 | return 0; |
073bb189 | 461 | |
a35f71f2 MHY |
462 | /* Remove this multicast mac from all the mc promiscuous vports */ |
463 | update_allmulti_vports(esw, vaddr, esw_mc); | |
464 | ||
81848731 | 465 | if (esw_mc->uplink_rule) |
74491de9 | 466 | mlx5_del_flow_rules(esw_mc->uplink_rule); |
81848731 SM |
467 | |
468 | l2addr_hash_del(esw_mc); | |
073bb189 SM |
469 | return 0; |
470 | } | |
471 | ||
81848731 SM |
472 | /* Apply vport UC/MC list to HW l2 table and FDB table */ |
473 | static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, | |
ee813f31 | 474 | struct mlx5_vport *vport, int list_type) |
073bb189 | 475 | { |
81848731 SM |
476 | bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; |
477 | vport_addr_action vport_addr_add; | |
478 | vport_addr_action vport_addr_del; | |
479 | struct vport_addr *addr; | |
073bb189 SM |
480 | struct l2addr_node *node; |
481 | struct hlist_head *hash; | |
482 | struct hlist_node *tmp; | |
483 | int hi; | |
484 | ||
81848731 SM |
485 | vport_addr_add = is_uc ? esw_add_uc_addr : |
486 | esw_add_mc_addr; | |
487 | vport_addr_del = is_uc ? esw_del_uc_addr : | |
488 | esw_del_mc_addr; | |
489 | ||
490 | hash = is_uc ? vport->uc_list : vport->mc_list; | |
073bb189 | 491 | for_each_l2hash_node(node, tmp, hash, hi) { |
81848731 | 492 | addr = container_of(node, struct vport_addr, node); |
073bb189 SM |
493 | switch (addr->action) { |
494 | case MLX5_ACTION_ADD: | |
81848731 | 495 | vport_addr_add(esw, addr); |
073bb189 SM |
496 | addr->action = MLX5_ACTION_NONE; |
497 | break; | |
498 | case MLX5_ACTION_DEL: | |
81848731 | 499 | vport_addr_del(esw, addr); |
073bb189 SM |
500 | l2addr_hash_del(addr); |
501 | break; | |
502 | } | |
503 | } | |
504 | } | |
505 | ||
81848731 SM |
506 | /* Sync vport UC/MC list from vport context */ |
507 | static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, | |
ee813f31 | 508 | struct mlx5_vport *vport, int list_type) |
073bb189 | 509 | { |
81848731 | 510 | bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; |
073bb189 | 511 | u8 (*mac_list)[ETH_ALEN]; |
81848731 SM |
512 | struct l2addr_node *node; |
513 | struct vport_addr *addr; | |
073bb189 SM |
514 | struct hlist_head *hash; |
515 | struct hlist_node *tmp; | |
516 | int size; | |
517 | int err; | |
518 | int hi; | |
519 | int i; | |
520 | ||
81848731 SM |
521 | size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) : |
522 | MLX5_MAX_MC_PER_VPORT(esw->dev); | |
073bb189 SM |
523 | |
524 | mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL); | |
525 | if (!mac_list) | |
526 | return; | |
527 | ||
81848731 | 528 | hash = is_uc ? vport->uc_list : vport->mc_list; |
073bb189 SM |
529 | |
530 | for_each_l2hash_node(node, tmp, hash, hi) { | |
81848731 | 531 | addr = container_of(node, struct vport_addr, node); |
073bb189 SM |
532 | addr->action = MLX5_ACTION_DEL; |
533 | } | |
534 | ||
586cfa7f MHY |
535 | if (!vport->enabled) |
536 | goto out; | |
537 | ||
ee813f31 | 538 | err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type, |
073bb189 SM |
539 | mac_list, &size); |
540 | if (err) | |
761e205b | 541 | goto out; |
81848731 | 542 | esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n", |
ee813f31 | 543 | vport->vport, is_uc ? "UC" : "MC", size); |
073bb189 SM |
544 | |
545 | for (i = 0; i < size; i++) { | |
81848731 | 546 | if (is_uc && !is_valid_ether_addr(mac_list[i])) |
073bb189 SM |
547 | continue; |
548 | ||
81848731 SM |
549 | if (!is_uc && !is_multicast_ether_addr(mac_list[i])) |
550 | continue; | |
551 | ||
552 | addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr); | |
073bb189 SM |
553 | if (addr) { |
554 | addr->action = MLX5_ACTION_NONE; | |
a35f71f2 MHY |
555 | /* If this mac was previously added because of allmulti |
556 | * promiscuous rx mode, its now converted to be original | |
557 | * vport mac. | |
558 | */ | |
559 | if (addr->mc_promisc) { | |
560 | struct esw_mc_addr *esw_mc = | |
561 | l2addr_hash_find(esw->mc_table, | |
562 | mac_list[i], | |
563 | struct esw_mc_addr); | |
564 | if (!esw_mc) { | |
565 | esw_warn(esw->dev, | |
566 | "Failed to MAC(%pM) in mcast DB\n", | |
567 | mac_list[i]); | |
568 | continue; | |
569 | } | |
570 | esw_mc->refcnt++; | |
571 | addr->mc_promisc = false; | |
572 | } | |
073bb189 SM |
573 | continue; |
574 | } | |
575 | ||
81848731 | 576 | addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr, |
073bb189 SM |
577 | GFP_KERNEL); |
578 | if (!addr) { | |
579 | esw_warn(esw->dev, | |
580 | "Failed to add MAC(%pM) to vport[%d] DB\n", | |
ee813f31 | 581 | mac_list[i], vport->vport); |
073bb189 SM |
582 | continue; |
583 | } | |
ee813f31 | 584 | addr->vport = vport->vport; |
073bb189 SM |
585 | addr->action = MLX5_ACTION_ADD; |
586 | } | |
761e205b | 587 | out: |
073bb189 SM |
588 | kfree(mac_list); |
589 | } | |
590 | ||
a35f71f2 MHY |
591 | /* Sync vport UC/MC list from vport context |
592 | * Must be called after esw_update_vport_addr_list | |
593 | */ | |
ee813f31 PP |
594 | static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, |
595 | struct mlx5_vport *vport) | |
a35f71f2 | 596 | { |
a35f71f2 MHY |
597 | struct l2addr_node *node; |
598 | struct vport_addr *addr; | |
599 | struct hlist_head *hash; | |
600 | struct hlist_node *tmp; | |
601 | int hi; | |
602 | ||
603 | hash = vport->mc_list; | |
604 | ||
605 | for_each_l2hash_node(node, tmp, esw->mc_table, hi) { | |
606 | u8 *mac = node->addr; | |
607 | ||
608 | addr = l2addr_hash_find(hash, mac, struct vport_addr); | |
609 | if (addr) { | |
610 | if (addr->action == MLX5_ACTION_DEL) | |
611 | addr->action = MLX5_ACTION_NONE; | |
612 | continue; | |
613 | } | |
614 | addr = l2addr_hash_add(hash, mac, struct vport_addr, | |
615 | GFP_KERNEL); | |
616 | if (!addr) { | |
617 | esw_warn(esw->dev, | |
618 | "Failed to add allmulti MAC(%pM) to vport[%d] DB\n", | |
ee813f31 | 619 | mac, vport->vport); |
a35f71f2 MHY |
620 | continue; |
621 | } | |
ee813f31 | 622 | addr->vport = vport->vport; |
a35f71f2 MHY |
623 | addr->action = MLX5_ACTION_ADD; |
624 | addr->mc_promisc = true; | |
625 | } | |
626 | } | |
627 | ||
628 | /* Apply vport rx mode to HW FDB table */ | |
ee813f31 PP |
629 | static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, |
630 | struct mlx5_vport *vport, | |
a35f71f2 MHY |
631 | bool promisc, bool mc_promisc) |
632 | { | |
0a0ab1d2 | 633 | struct esw_mc_addr *allmulti_addr = &esw->mc_promisc; |
a35f71f2 MHY |
634 | |
635 | if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc) | |
636 | goto promisc; | |
637 | ||
638 | if (mc_promisc) { | |
639 | vport->allmulti_rule = | |
ee813f31 | 640 | esw_fdb_set_vport_allmulti_rule(esw, vport->vport); |
a35f71f2 MHY |
641 | if (!allmulti_addr->uplink_rule) |
642 | allmulti_addr->uplink_rule = | |
643 | esw_fdb_set_vport_allmulti_rule(esw, | |
b05af6aa | 644 | MLX5_VPORT_UPLINK); |
a35f71f2 MHY |
645 | allmulti_addr->refcnt++; |
646 | } else if (vport->allmulti_rule) { | |
74491de9 | 647 | mlx5_del_flow_rules(vport->allmulti_rule); |
a35f71f2 MHY |
648 | vport->allmulti_rule = NULL; |
649 | ||
650 | if (--allmulti_addr->refcnt > 0) | |
651 | goto promisc; | |
652 | ||
653 | if (allmulti_addr->uplink_rule) | |
74491de9 | 654 | mlx5_del_flow_rules(allmulti_addr->uplink_rule); |
a35f71f2 MHY |
655 | allmulti_addr->uplink_rule = NULL; |
656 | } | |
657 | ||
658 | promisc: | |
659 | if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc) | |
660 | return; | |
661 | ||
662 | if (promisc) { | |
ee813f31 PP |
663 | vport->promisc_rule = |
664 | esw_fdb_set_vport_promisc_rule(esw, vport->vport); | |
a35f71f2 | 665 | } else if (vport->promisc_rule) { |
74491de9 | 666 | mlx5_del_flow_rules(vport->promisc_rule); |
a35f71f2 MHY |
667 | vport->promisc_rule = NULL; |
668 | } | |
669 | } | |
670 | ||
671 | /* Sync vport rx mode from vport context */ | |
ee813f31 PP |
672 | static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, |
673 | struct mlx5_vport *vport) | |
a35f71f2 | 674 | { |
a35f71f2 MHY |
675 | int promisc_all = 0; |
676 | int promisc_uc = 0; | |
677 | int promisc_mc = 0; | |
678 | int err; | |
679 | ||
680 | err = mlx5_query_nic_vport_promisc(esw->dev, | |
ee813f31 | 681 | vport->vport, |
a35f71f2 MHY |
682 | &promisc_uc, |
683 | &promisc_mc, | |
684 | &promisc_all); | |
685 | if (err) | |
686 | return; | |
687 | esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n", | |
ee813f31 | 688 | vport->vport, promisc_all, promisc_mc); |
a35f71f2 | 689 | |
1ab2068a | 690 | if (!vport->info.trusted || !vport->enabled) { |
a35f71f2 MHY |
691 | promisc_uc = 0; |
692 | promisc_mc = 0; | |
693 | promisc_all = 0; | |
694 | } | |
695 | ||
ee813f31 | 696 | esw_apply_vport_rx_mode(esw, vport, promisc_all, |
a35f71f2 MHY |
697 | (promisc_all || promisc_mc)); |
698 | } | |
699 | ||
b55b3538 | 700 | void esw_vport_change_handle_locked(struct mlx5_vport *vport) |
073bb189 | 701 | { |
073bb189 | 702 | struct mlx5_core_dev *dev = vport->dev; |
81848731 | 703 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
073bb189 SM |
704 | u8 mac[ETH_ALEN]; |
705 | ||
e1d974d0 | 706 | mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac); |
81848731 SM |
707 | esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", |
708 | vport->vport, mac); | |
709 | ||
5019833d | 710 | if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) { |
ee813f31 PP |
711 | esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); |
712 | esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); | |
81848731 | 713 | } |
073bb189 | 714 | |
5019833d | 715 | if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE) |
ee813f31 | 716 | esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); |
a35f71f2 | 717 | |
5019833d | 718 | if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) { |
ee813f31 | 719 | esw_update_vport_rx_mode(esw, vport); |
a35f71f2 | 720 | if (!IS_ERR_OR_NULL(vport->allmulti_rule)) |
ee813f31 | 721 | esw_update_vport_mc_promisc(esw, vport); |
a35f71f2 MHY |
722 | } |
723 | ||
5019833d | 724 | if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE)) |
ee813f31 | 725 | esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); |
073bb189 | 726 | |
81848731 | 727 | esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport); |
073bb189 SM |
728 | if (vport->enabled) |
729 | arm_vport_context_events_cmd(dev, vport->vport, | |
81848731 | 730 | vport->enabled_events); |
073bb189 SM |
731 | } |
732 | ||
1edc57e2 MHY |
733 | static void esw_vport_change_handler(struct work_struct *work) |
734 | { | |
735 | struct mlx5_vport *vport = | |
736 | container_of(work, struct mlx5_vport, vport_change_handler); | |
737 | struct mlx5_eswitch *esw = vport->dev->priv.eswitch; | |
738 | ||
739 | mutex_lock(&esw->state_lock); | |
740 | esw_vport_change_handle_locked(vport); | |
741 | mutex_unlock(&esw->state_lock); | |
742 | } | |
743 | ||
fa997825 | 744 | static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac) |
1ab2068a MHY |
745 | { |
746 | ((u8 *)node_guid)[7] = mac[0]; | |
747 | ((u8 *)node_guid)[6] = mac[1]; | |
748 | ((u8 *)node_guid)[5] = mac[2]; | |
749 | ((u8 *)node_guid)[4] = 0xff; | |
750 | ((u8 *)node_guid)[3] = 0xfe; | |
751 | ((u8 *)node_guid)[2] = mac[3]; | |
752 | ((u8 *)node_guid)[1] = mac[4]; | |
753 | ((u8 *)node_guid)[0] = mac[5]; | |
754 | } | |
755 | ||
f5d0c01d PP |
756 | static int esw_vport_setup_acl(struct mlx5_eswitch *esw, |
757 | struct mlx5_vport *vport) | |
b8a0dbe3 | 758 | { |
f5d0c01d | 759 | if (esw->mode == MLX5_ESWITCH_LEGACY) |
b55b3538 | 760 | return esw_legacy_vport_acl_setup(esw, vport); |
748da30b VP |
761 | else |
762 | return esw_vport_create_offloads_acl_tables(esw, vport); | |
f5d0c01d | 763 | } |
b8a0dbe3 | 764 | |
f5d0c01d PP |
765 | static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, |
766 | struct mlx5_vport *vport) | |
767 | { | |
768 | if (esw->mode == MLX5_ESWITCH_LEGACY) | |
b55b3538 | 769 | esw_legacy_vport_acl_cleanup(esw, vport); |
748da30b VP |
770 | else |
771 | esw_vport_destroy_offloads_acl_tables(esw, vport); | |
b8a0dbe3 EE |
772 | } |
773 | ||
d7c92cb5 BW |
774 | static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) |
775 | { | |
776 | u16 vport_num = vport->vport; | |
777 | int flags; | |
778 | int err; | |
779 | ||
780 | err = esw_vport_setup_acl(esw, vport); | |
781 | if (err) | |
782 | return err; | |
783 | ||
784 | /* Attach vport to the eswitch rate limiter */ | |
2d116e3e | 785 | mlx5_esw_qos_vport_enable(esw, vport, vport->qos.max_rate, vport->qos.bw_share); |
d7c92cb5 BW |
786 | |
787 | if (mlx5_esw_is_manager_vport(esw, vport_num)) | |
788 | return 0; | |
789 | ||
790 | mlx5_modify_vport_admin_state(esw->dev, | |
791 | MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, | |
792 | vport_num, 1, | |
793 | vport->info.link_state); | |
794 | ||
795 | /* Host PF has its own mac/guid. */ | |
796 | if (vport_num) { | |
797 | mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, | |
798 | vport->info.mac); | |
799 | mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, | |
800 | vport->info.node_guid); | |
801 | } | |
802 | ||
803 | flags = (vport->info.vlan || vport->info.qos) ? | |
804 | SET_VLAN_STRIP | SET_VLAN_INSERT : 0; | |
805 | modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, | |
806 | vport->info.qos, flags); | |
807 | ||
808 | return 0; | |
809 | } | |
810 | ||
811 | /* Don't cleanup vport->info, it's needed to restore vport configuration */ | |
812 | static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) | |
813 | { | |
814 | u16 vport_num = vport->vport; | |
815 | ||
816 | if (!mlx5_esw_is_manager_vport(esw, vport_num)) | |
817 | mlx5_modify_vport_admin_state(esw->dev, | |
818 | MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, | |
819 | vport_num, 1, | |
820 | MLX5_VPORT_ADMIN_STATE_DOWN); | |
821 | ||
2d116e3e | 822 | mlx5_esw_qos_vport_disable(esw, vport); |
d7c92cb5 BW |
823 | esw_vport_cleanup_acl(esw, vport); |
824 | } | |
825 | ||
d970812b PP |
826 | int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, |
827 | enum mlx5_eswitch_vport_event enabled_events) | |
073bb189 | 828 | { |
c2d7712c | 829 | struct mlx5_vport *vport; |
f5d0c01d | 830 | int ret; |
073bb189 | 831 | |
c2d7712c | 832 | vport = mlx5_eswitch_get_vport(esw, vport_num); |
7bef147a SM |
833 | if (IS_ERR(vport)) |
834 | return PTR_ERR(vport); | |
c2d7712c | 835 | |
dfcb1ed3 | 836 | mutex_lock(&esw->state_lock); |
81848731 SM |
837 | WARN_ON(vport->enabled); |
838 | ||
839 | esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); | |
5742df0f | 840 | |
d7c92cb5 | 841 | ret = esw_vport_setup(esw, vport); |
f5d0c01d PP |
842 | if (ret) |
843 | goto done; | |
844 | ||
81848731 | 845 | /* Sync with current vport context */ |
5019833d | 846 | vport->enabled_events = enabled_events; |
073bb189 | 847 | vport->enabled = true; |
073bb189 | 848 | |
a1b3839a BW |
849 | /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well |
850 | * in smartNIC as it's a vport group manager. | |
851 | */ | |
ea2300e0 | 852 | if (mlx5_esw_is_manager_vport(esw, vport_num) || |
a1b3839a | 853 | (!vport_num && mlx5_core_is_ecpf(esw->dev))) |
1ab2068a MHY |
854 | vport->info.trusted = true; |
855 | ||
84ae9c1f VB |
856 | if (!mlx5_esw_is_manager_vport(esw, vport->vport) && |
857 | MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { | |
858 | ret = mlx5_esw_vport_vhca_id_set(esw, vport_num); | |
859 | if (ret) | |
860 | goto err_vhca_mapping; | |
861 | } | |
862 | ||
bbc8222d PP |
863 | /* External controller host PF has factory programmed MAC. |
864 | * Read it from the device. | |
865 | */ | |
866 | if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) | |
867 | mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac); | |
868 | ||
25fff58c | 869 | esw_vport_change_handle_locked(vport); |
81848731 SM |
870 | |
871 | esw->enabled_vports++; | |
872 | esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); | |
f5d0c01d | 873 | done: |
dfcb1ed3 | 874 | mutex_unlock(&esw->state_lock); |
f5d0c01d | 875 | return ret; |
84ae9c1f VB |
876 | |
877 | err_vhca_mapping: | |
878 | esw_vport_cleanup(esw, vport); | |
879 | mutex_unlock(&esw->state_lock); | |
880 | return ret; | |
81848731 SM |
881 | } |
882 | ||
d970812b | 883 | void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) |
073bb189 | 884 | { |
c2d7712c BW |
885 | struct mlx5_vport *vport; |
886 | ||
887 | vport = mlx5_eswitch_get_vport(esw, vport_num); | |
7bef147a SM |
888 | if (IS_ERR(vport)) |
889 | return; | |
073bb189 | 890 | |
77b09430 | 891 | mutex_lock(&esw->state_lock); |
073bb189 | 892 | if (!vport->enabled) |
77b09430 | 893 | goto done; |
073bb189 | 894 | |
81848731 | 895 | esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); |
073bb189 | 896 | /* Mark this vport as disabled to discard new events */ |
073bb189 | 897 | vport->enabled = false; |
831cae1d | 898 | |
073bb189 SM |
899 | /* Disable events from this vport */ |
900 | arm_vport_context_events_cmd(esw->dev, vport->vport, 0); | |
84ae9c1f VB |
901 | |
902 | if (!mlx5_esw_is_manager_vport(esw, vport->vport) && | |
903 | MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) | |
904 | mlx5_esw_vport_vhca_id_clear(esw, vport_num); | |
905 | ||
586cfa7f MHY |
906 | /* We don't assume VFs will cleanup after themselves. |
907 | * Calling vport change handler while vport is disabled will cleanup | |
908 | * the vport resources. | |
909 | */ | |
1edc57e2 | 910 | esw_vport_change_handle_locked(vport); |
586cfa7f | 911 | vport->enabled_events = 0; |
878a7331 | 912 | esw_vport_cleanup(esw, vport); |
81848731 | 913 | esw->enabled_vports--; |
77b09430 PP |
914 | |
915 | done: | |
dfcb1ed3 | 916 | mutex_unlock(&esw->state_lock); |
073bb189 SM |
917 | } |
918 | ||
6933a937 SM |
919 | static int eswitch_vport_event(struct notifier_block *nb, |
920 | unsigned long type, void *data) | |
921 | { | |
922 | struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb); | |
923 | struct mlx5_eqe *eqe = data; | |
924 | struct mlx5_vport *vport; | |
925 | u16 vport_num; | |
926 | ||
927 | vport_num = be16_to_cpu(eqe->data.vport_change.vport_num); | |
879c8f84 | 928 | vport = mlx5_eswitch_get_vport(esw, vport_num); |
77b09430 | 929 | if (!IS_ERR(vport)) |
6933a937 | 930 | queue_work(esw->work_queue, &vport->vport_change_handler); |
6933a937 SM |
931 | return NOTIFY_OK; |
932 | } | |
933 | ||
dd28087c PP |
934 | /** |
935 | * mlx5_esw_query_functions - Returns raw output about functions state | |
936 | * @dev: Pointer to device to query | |
937 | * | |
938 | * mlx5_esw_query_functions() allocates and returns functions changed | |
939 | * raw output memory pointer from device on success. Otherwise returns ERR_PTR. | |
940 | * Caller must free the memory using kvfree() when valid pointer is returned. | |
941 | */ | |
942 | const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) | |
cd56f929 | 943 | { |
dd28087c | 944 | int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out); |
10ee82ce | 945 | u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {}; |
dd28087c PP |
946 | u32 *out; |
947 | int err; | |
948 | ||
949 | out = kvzalloc(outlen, GFP_KERNEL); | |
950 | if (!out) | |
951 | return ERR_PTR(-ENOMEM); | |
cd56f929 VP |
952 | |
953 | MLX5_SET(query_esw_functions_in, in, opcode, | |
954 | MLX5_CMD_OP_QUERY_ESW_FUNCTIONS); | |
955 | ||
d7f33a45 | 956 | err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); |
dd28087c PP |
957 | if (!err) |
958 | return out; | |
959 | ||
960 | kvfree(out); | |
961 | return ERR_PTR(err); | |
cd56f929 VP |
962 | } |
963 | ||
16fff98a BW |
964 | static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw) |
965 | { | |
4a3929b2 BW |
966 | MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); |
967 | mlx5_eq_notifier_register(esw->dev, &esw->nb); | |
968 | ||
969 | if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) { | |
16fff98a BW |
970 | MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler, |
971 | ESW_FUNCTIONS_CHANGED); | |
972 | mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb); | |
973 | } | |
974 | } | |
975 | ||
976 | static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) | |
977 | { | |
4a3929b2 | 978 | if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) |
16fff98a BW |
979 | mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb); |
980 | ||
4a3929b2 BW |
981 | mlx5_eq_notifier_unregister(esw->dev, &esw->nb); |
982 | ||
16fff98a | 983 | flush_workqueue(esw->work_queue); |
cd56f929 VP |
984 | } |
985 | ||
556b9d16 AL |
986 | static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) |
987 | { | |
988 | struct mlx5_vport *vport; | |
47dd7e60 | 989 | unsigned long i; |
556b9d16 | 990 | |
3b83b6c2 | 991 | mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { |
1ce5fc72 | 992 | memset(&vport->qos, 0, sizeof(vport->qos)); |
556b9d16 | 993 | memset(&vport->info, 0, sizeof(vport->info)); |
3b83b6c2 DL |
994 | vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; |
995 | } | |
556b9d16 AL |
996 | } |
997 | ||
073bb189 | 998 | /* Public E-Switch API */ |
23bb50cf BW |
999 | int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, |
1000 | enum mlx5_eswitch_vport_event enabled_events) | |
c2d7712c BW |
1001 | { |
1002 | int err; | |
1003 | ||
d970812b | 1004 | err = mlx5_esw_vport_enable(esw, vport_num, enabled_events); |
c2d7712c BW |
1005 | if (err) |
1006 | return err; | |
1007 | ||
1008 | err = esw_offloads_load_rep(esw, vport_num); | |
1009 | if (err) | |
1010 | goto err_rep; | |
1011 | ||
1012 | return err; | |
1013 | ||
1014 | err_rep: | |
d970812b | 1015 | mlx5_esw_vport_disable(esw, vport_num); |
c2d7712c BW |
1016 | return err; |
1017 | } | |
1018 | ||
23bb50cf | 1019 | void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num) |
c2d7712c BW |
1020 | { |
1021 | esw_offloads_unload_rep(esw, vport_num); | |
d970812b | 1022 | mlx5_esw_vport_disable(esw, vport_num); |
c2d7712c BW |
1023 | } |
1024 | ||
23bb50cf BW |
1025 | void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs) |
1026 | { | |
47dd7e60 PP |
1027 | struct mlx5_vport *vport; |
1028 | unsigned long i; | |
23bb50cf | 1029 | |
47dd7e60 PP |
1030 | mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { |
1031 | if (!vport->enabled) | |
1032 | continue; | |
1033 | mlx5_eswitch_unload_vport(esw, vport->vport); | |
1034 | } | |
23bb50cf BW |
1035 | } |
1036 | ||
1037 | int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, | |
1038 | enum mlx5_eswitch_vport_event enabled_events) | |
1039 | { | |
47dd7e60 PP |
1040 | struct mlx5_vport *vport; |
1041 | unsigned long i; | |
23bb50cf | 1042 | int err; |
23bb50cf | 1043 | |
47dd7e60 PP |
1044 | mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) { |
1045 | err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events); | |
23bb50cf BW |
1046 | if (err) |
1047 | goto vf_err; | |
1048 | } | |
1049 | ||
1050 | return 0; | |
1051 | ||
1052 | vf_err: | |
47dd7e60 | 1053 | mlx5_eswitch_unload_vf_vports(esw, num_vfs); |
23bb50cf BW |
1054 | return err; |
1055 | } | |
1056 | ||
5bef709d PP |
1057 | static int host_pf_enable_hca(struct mlx5_core_dev *dev) |
1058 | { | |
1059 | if (!mlx5_core_is_ecpf(dev)) | |
1060 | return 0; | |
1061 | ||
1062 | /* Once vport and representor are ready, take out the external host PF | |
1063 | * out of initializing state. Enabling HCA clears the iser->initializing | |
1064 | * bit and host PF driver loading can progress. | |
1065 | */ | |
1066 | return mlx5_cmd_host_pf_enable_hca(dev); | |
1067 | } | |
1068 | ||
1069 | static void host_pf_disable_hca(struct mlx5_core_dev *dev) | |
1070 | { | |
1071 | if (!mlx5_core_is_ecpf(dev)) | |
1072 | return; | |
1073 | ||
1074 | mlx5_cmd_host_pf_disable_hca(dev); | |
1075 | } | |
1076 | ||
5019833d PP |
1077 | /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs |
1078 | * whichever are present on the eswitch. | |
1079 | */ | |
925a6acc | 1080 | int |
5019833d PP |
1081 | mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, |
1082 | enum mlx5_eswitch_vport_event enabled_events) | |
1083 | { | |
925a6acc | 1084 | int ret; |
5019833d PP |
1085 | |
1086 | /* Enable PF vport */ | |
c2d7712c | 1087 | ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events); |
925a6acc PP |
1088 | if (ret) |
1089 | return ret; | |
5019833d | 1090 | |
5bef709d PP |
1091 | /* Enable external host PF HCA */ |
1092 | ret = host_pf_enable_hca(esw->dev); | |
1093 | if (ret) | |
1094 | goto pf_hca_err; | |
1095 | ||
925a6acc | 1096 | /* Enable ECPF vport */ |
5019833d | 1097 | if (mlx5_ecpf_vport_exists(esw->dev)) { |
c2d7712c | 1098 | ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events); |
925a6acc PP |
1099 | if (ret) |
1100 | goto ecpf_err; | |
5019833d PP |
1101 | } |
1102 | ||
1103 | /* Enable VF vports */ | |
23bb50cf BW |
1104 | ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs, |
1105 | enabled_events); | |
1106 | if (ret) | |
1107 | goto vf_err; | |
925a6acc PP |
1108 | return 0; |
1109 | ||
1110 | vf_err: | |
c2d7712c BW |
1111 | if (mlx5_ecpf_vport_exists(esw->dev)) |
1112 | mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); | |
925a6acc | 1113 | ecpf_err: |
5bef709d PP |
1114 | host_pf_disable_hca(esw->dev); |
1115 | pf_hca_err: | |
c2d7712c | 1116 | mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); |
925a6acc | 1117 | return ret; |
5019833d PP |
1118 | } |
1119 | ||
1120 | /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs | |
1121 | * whichever are previously enabled on the eswitch. | |
1122 | */ | |
1123 | void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) | |
81848731 | 1124 | { |
23bb50cf | 1125 | mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); |
c2d7712c BW |
1126 | |
1127 | if (mlx5_ecpf_vport_exists(esw->dev)) | |
1128 | mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); | |
1129 | ||
5bef709d | 1130 | host_pf_disable_hca(esw->dev); |
c2d7712c | 1131 | mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); |
5019833d PP |
1132 | } |
1133 | ||
87dac697 JL |
1134 | static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw) |
1135 | { | |
1136 | struct devlink *devlink = priv_to_devlink(esw->dev); | |
1137 | union devlink_param_value val; | |
1138 | int err; | |
1139 | ||
1140 | err = devlink_param_driverinit_value_get(devlink, | |
1141 | MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, | |
1142 | &val); | |
1143 | if (!err) { | |
1144 | esw->params.large_group_num = val.vu32; | |
1145 | } else { | |
1146 | esw_warn(esw->dev, | |
1147 | "Devlink can't get param fdb_large_groups, uses default (%d).\n", | |
1148 | ESW_OFFLOADS_DEFAULT_NUM_GROUPS); | |
1149 | esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS; | |
1150 | } | |
1151 | } | |
1152 | ||
ebf77bb8 PP |
1153 | static void |
1154 | mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs) | |
1155 | { | |
1156 | const u32 *out; | |
1157 | ||
1158 | WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE); | |
1159 | ||
1160 | if (num_vfs < 0) | |
1161 | return; | |
1162 | ||
1163 | if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) { | |
1164 | esw->esw_funcs.num_vfs = num_vfs; | |
1165 | return; | |
1166 | } | |
1167 | ||
1168 | out = mlx5_esw_query_functions(esw->dev); | |
1169 | if (IS_ERR(out)) | |
1170 | return; | |
1171 | ||
1172 | esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out, | |
1173 | host_params_context.host_num_of_vfs); | |
1174 | kvfree(out); | |
1175 | } | |
1176 | ||
8f010541 PP |
1177 | static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode) |
1178 | { | |
1179 | struct mlx5_esw_event_info info = {}; | |
1180 | ||
1181 | info.new_mode = mode; | |
1182 | ||
1183 | blocking_notifier_call_chain(&esw->n_head, 0, &info); | |
1184 | } | |
1185 | ||
57b92bdd PP |
1186 | static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw) |
1187 | { | |
1188 | struct mlx5_core_dev *dev = esw->dev; | |
1189 | int total_vports; | |
1190 | int err; | |
1191 | ||
1192 | total_vports = mlx5_eswitch_get_total_vports(dev); | |
1193 | ||
1194 | if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { | |
1195 | err = mlx5_fs_egress_acls_init(dev, total_vports); | |
1196 | if (err) | |
1197 | return err; | |
1198 | } else { | |
1199 | esw_warn(dev, "engress ACL is not supported by FW\n"); | |
1200 | } | |
1201 | ||
1202 | if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { | |
1203 | err = mlx5_fs_ingress_acls_init(dev, total_vports); | |
1204 | if (err) | |
1205 | goto err; | |
1206 | } else { | |
1207 | esw_warn(dev, "ingress ACL is not supported by FW\n"); | |
1208 | } | |
1209 | return 0; | |
1210 | ||
1211 | err: | |
1212 | if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) | |
1213 | mlx5_fs_egress_acls_cleanup(dev); | |
1214 | return err; | |
1215 | } | |
1216 | ||
1217 | static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw) | |
1218 | { | |
1219 | struct mlx5_core_dev *dev = esw->dev; | |
1220 | ||
1221 | if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) | |
1222 | mlx5_fs_ingress_acls_cleanup(dev); | |
1223 | if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) | |
1224 | mlx5_fs_egress_acls_cleanup(dev); | |
1225 | } | |
1226 | ||
ebf77bb8 | 1227 | /** |
8e0aa4bc | 1228 | * mlx5_eswitch_enable_locked - Enable eswitch |
ebf77bb8 PP |
1229 | * @esw: Pointer to eswitch |
1230 | * @mode: Eswitch mode to enable | |
1231 | * @num_vfs: Enable eswitch for given number of VFs. This is optional. | |
1232 | * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS. | |
1233 | * Caller should pass num_vfs > 0 when enabling eswitch for | |
1234 | * vf vports. Caller should pass num_vfs = 0, when eswitch | |
1235 | * is enabled without sriov VFs or when caller | |
1236 | * is unaware of the sriov state of the host PF on ECPF based | |
1237 | * eswitch. Caller should pass < 0 when num_vfs should be | |
1238 | * completely ignored. This is typically the case when eswitch | |
1239 | * is enabled without sriov regardless of PF/ECPF system. | |
8e0aa4bc PP |
1240 | * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads |
1241 | * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports. | |
1242 | * It returns 0 on success or error code on failure. | |
ebf77bb8 | 1243 | */ |
8e0aa4bc | 1244 | int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) |
5019833d | 1245 | { |
81848731 | 1246 | int err; |
81848731 | 1247 | |
8e0aa4bc PP |
1248 | lockdep_assert_held(&esw->mode_lock); |
1249 | ||
1250 | if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { | |
f6455de0 | 1251 | esw_warn(esw->dev, "FDB is not supported, aborting ...\n"); |
9eb78923 | 1252 | return -EOPNOTSUPP; |
81848731 SM |
1253 | } |
1254 | ||
87dac697 JL |
1255 | mlx5_eswitch_get_devlink_param(esw); |
1256 | ||
57b92bdd PP |
1257 | err = mlx5_esw_acls_ns_init(esw); |
1258 | if (err) | |
1259 | return err; | |
1260 | ||
ebf77bb8 PP |
1261 | mlx5_eswitch_update_num_of_vfs(esw, num_vfs); |
1262 | ||
2d116e3e | 1263 | mlx5_esw_qos_create(esw); |
610090eb | 1264 | |
6ab36e35 | 1265 | esw->mode = mode; |
81848731 | 1266 | |
f6455de0 | 1267 | if (mode == MLX5_ESWITCH_LEGACY) { |
5896b972 | 1268 | err = esw_legacy_enable(esw); |
c5447c70 | 1269 | } else { |
912cebf4 | 1270 | mlx5_rescan_drivers(esw->dev); |
5896b972 | 1271 | err = esw_offloads_enable(esw); |
c5447c70 MB |
1272 | } |
1273 | ||
81848731 SM |
1274 | if (err) |
1275 | goto abort; | |
1276 | ||
16fff98a | 1277 | mlx5_eswitch_event_handlers_register(esw); |
6933a937 | 1278 | |
f6455de0 BW |
1279 | esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", |
1280 | mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", | |
062f4bf4 | 1281 | esw->esw_funcs.num_vfs, esw->enabled_vports); |
6933a937 | 1282 | |
8f010541 PP |
1283 | mlx5_esw_mode_change_notify(esw, mode); |
1284 | ||
81848731 SM |
1285 | return 0; |
1286 | ||
1287 | abort: | |
f6455de0 | 1288 | esw->mode = MLX5_ESWITCH_NONE; |
c5447c70 | 1289 | |
93f82444 | 1290 | if (mode == MLX5_ESWITCH_OFFLOADS) |
912cebf4 | 1291 | mlx5_rescan_drivers(esw->dev); |
93f82444 | 1292 | |
2d116e3e | 1293 | mlx5_esw_qos_destroy(esw); |
57b92bdd | 1294 | mlx5_esw_acls_ns_cleanup(esw); |
81848731 SM |
1295 | return err; |
1296 | } | |
1297 | ||
8e0aa4bc PP |
1298 | /** |
1299 | * mlx5_eswitch_enable - Enable eswitch | |
1300 | * @esw: Pointer to eswitch | |
39c538d6 | 1301 | * @num_vfs: Enable eswitch switch for given number of VFs. |
8e0aa4bc PP |
1302 | * Caller must pass num_vfs > 0 when enabling eswitch for |
1303 | * vf vports. | |
1304 | * mlx5_eswitch_enable() returns 0 on success or error code on failure. | |
1305 | */ | |
1306 | int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) | |
1307 | { | |
1308 | int ret; | |
1309 | ||
b16f2bb6 | 1310 | if (!mlx5_esw_allowed(esw)) |
8e0aa4bc PP |
1311 | return 0; |
1312 | ||
cac1eb2c | 1313 | mlx5_lag_disable_change(esw->dev); |
c55479d0 | 1314 | down_write(&esw->mode_lock); |
ea2128fd PP |
1315 | if (esw->mode == MLX5_ESWITCH_NONE) { |
1316 | ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); | |
1317 | } else { | |
1318 | enum mlx5_eswitch_vport_event vport_events; | |
1319 | ||
1320 | vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ? | |
1321 | MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE; | |
1322 | ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events); | |
1323 | if (!ret) | |
1324 | esw->esw_funcs.num_vfs = num_vfs; | |
1325 | } | |
c55479d0 | 1326 | up_write(&esw->mode_lock); |
cac1eb2c | 1327 | mlx5_lag_enable_change(esw->dev); |
8e0aa4bc PP |
1328 | return ret; |
1329 | } | |
1330 | ||
1331 | void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) | |
81848731 | 1332 | { |
c5447c70 | 1333 | int old_mode; |
81848731 | 1334 | |
8e0aa4bc PP |
1335 | lockdep_assert_held_write(&esw->mode_lock); |
1336 | ||
1337 | if (esw->mode == MLX5_ESWITCH_NONE) | |
81848731 SM |
1338 | return; |
1339 | ||
f6455de0 BW |
1340 | esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n", |
1341 | esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", | |
062f4bf4 | 1342 | esw->esw_funcs.num_vfs, esw->enabled_vports); |
81848731 | 1343 | |
8f010541 PP |
1344 | /* Notify eswitch users that it is exiting from current mode. |
1345 | * So that it can do necessary cleanup before the eswitch is disabled. | |
1346 | */ | |
1347 | mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE); | |
1348 | ||
16fff98a | 1349 | mlx5_eswitch_event_handlers_unregister(esw); |
6933a937 | 1350 | |
f6455de0 | 1351 | if (esw->mode == MLX5_ESWITCH_LEGACY) |
5896b972 | 1352 | esw_legacy_disable(esw); |
f6455de0 | 1353 | else if (esw->mode == MLX5_ESWITCH_OFFLOADS) |
5896b972 | 1354 | esw_offloads_disable(esw); |
610090eb | 1355 | |
c5447c70 | 1356 | old_mode = esw->mode; |
f6455de0 | 1357 | esw->mode = MLX5_ESWITCH_NONE; |
c5447c70 | 1358 | |
93f82444 | 1359 | if (old_mode == MLX5_ESWITCH_OFFLOADS) |
912cebf4 | 1360 | mlx5_rescan_drivers(esw->dev); |
93f82444 | 1361 | |
2d116e3e | 1362 | mlx5_esw_qos_destroy(esw); |
57b92bdd | 1363 | mlx5_esw_acls_ns_cleanup(esw); |
0c2600c6 | 1364 | |
556b9d16 AL |
1365 | if (clear_vf) |
1366 | mlx5_eswitch_clear_vf_vports_info(esw); | |
62a9b90a MHY |
1367 | } |
1368 | ||
8e0aa4bc PP |
1369 | void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) |
1370 | { | |
b16f2bb6 | 1371 | if (!mlx5_esw_allowed(esw)) |
8e0aa4bc PP |
1372 | return; |
1373 | ||
cac1eb2c | 1374 | mlx5_lag_disable_change(esw->dev); |
c55479d0 | 1375 | down_write(&esw->mode_lock); |
8e0aa4bc | 1376 | mlx5_eswitch_disable_locked(esw, clear_vf); |
ea2128fd | 1377 | esw->esw_funcs.num_vfs = 0; |
c55479d0 | 1378 | up_write(&esw->mode_lock); |
cac1eb2c | 1379 | mlx5_lag_enable_change(esw->dev); |
8e0aa4bc PP |
1380 | } |
1381 | ||
87bd418e PP |
1382 | static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out) |
1383 | { | |
1384 | u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01); | |
1385 | u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {}; | |
1386 | ||
1387 | MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); | |
1388 | MLX5_SET(query_hca_cap_in, in, op_mod, opmod); | |
1389 | MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF); | |
1390 | MLX5_SET(query_hca_cap_in, in, other_function, true); | |
1391 | return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out); | |
1392 | } | |
1393 | ||
1394 | int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id) | |
1395 | ||
1396 | { | |
1397 | int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); | |
1398 | void *query_ctx; | |
1399 | void *hca_caps; | |
1400 | int err; | |
1401 | ||
1402 | if (!mlx5_core_is_ecpf(dev)) { | |
1403 | *max_sfs = 0; | |
1404 | return 0; | |
1405 | } | |
1406 | ||
1407 | query_ctx = kzalloc(query_out_sz, GFP_KERNEL); | |
1408 | if (!query_ctx) | |
1409 | return -ENOMEM; | |
1410 | ||
1411 | err = mlx5_query_hca_cap_host_pf(dev, query_ctx); | |
1412 | if (err) | |
1413 | goto out_free; | |
1414 | ||
1415 | hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); | |
1416 | *max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf); | |
1417 | *sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id); | |
1418 | ||
1419 | out_free: | |
1420 | kfree(query_ctx); | |
1421 | return err; | |
1422 | } | |
1423 | ||
47dd7e60 PP |
1424 | static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev, |
1425 | int index, u16 vport_num) | |
1426 | { | |
1427 | struct mlx5_vport *vport; | |
1428 | int err; | |
1429 | ||
1430 | vport = kzalloc(sizeof(*vport), GFP_KERNEL); | |
1431 | if (!vport) | |
1432 | return -ENOMEM; | |
1433 | ||
1434 | vport->dev = esw->dev; | |
1435 | vport->vport = vport_num; | |
1436 | vport->index = index; | |
1437 | vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; | |
1438 | INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); | |
1439 | err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL); | |
1440 | if (err) | |
1441 | goto insert_err; | |
1442 | ||
1443 | esw->total_vports++; | |
1444 | return 0; | |
1445 | ||
1446 | insert_err: | |
1447 | kfree(vport); | |
1448 | return err; | |
1449 | } | |
1450 | ||
1451 | static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport) | |
1452 | { | |
1453 | xa_erase(&esw->vports, vport->vport); | |
1454 | kfree(vport); | |
1455 | } | |
1456 | ||
1457 | static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw) | |
1458 | { | |
1459 | struct mlx5_vport *vport; | |
1460 | unsigned long i; | |
1461 | ||
1462 | mlx5_esw_for_each_vport(esw, i, vport) | |
1463 | mlx5_esw_vport_free(esw, vport); | |
1464 | xa_destroy(&esw->vports); | |
1465 | } | |
1466 | ||
1467 | static int mlx5_esw_vports_init(struct mlx5_eswitch *esw) | |
1468 | { | |
1469 | struct mlx5_core_dev *dev = esw->dev; | |
87bd418e | 1470 | u16 max_host_pf_sfs; |
47dd7e60 PP |
1471 | u16 base_sf_num; |
1472 | int idx = 0; | |
1473 | int err; | |
1474 | int i; | |
1475 | ||
1476 | xa_init(&esw->vports); | |
1477 | ||
1478 | err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF); | |
1479 | if (err) | |
1480 | goto err; | |
1481 | if (esw->first_host_vport == MLX5_VPORT_PF) | |
1482 | xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); | |
1483 | idx++; | |
1484 | ||
1485 | for (i = 0; i < mlx5_core_max_vfs(dev); i++) { | |
1486 | err = mlx5_esw_vport_alloc(esw, dev, idx, idx); | |
1487 | if (err) | |
1488 | goto err; | |
1489 | xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF); | |
1490 | xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); | |
1491 | idx++; | |
1492 | } | |
1493 | base_sf_num = mlx5_sf_start_function_id(dev); | |
1494 | for (i = 0; i < mlx5_sf_max_functions(dev); i++) { | |
1495 | err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i); | |
1496 | if (err) | |
1497 | goto err; | |
1498 | xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF); | |
1499 | idx++; | |
1500 | } | |
87bd418e PP |
1501 | |
1502 | err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num); | |
1503 | if (err) | |
1504 | goto err; | |
1505 | for (i = 0; i < max_host_pf_sfs; i++) { | |
1506 | err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i); | |
1507 | if (err) | |
1508 | goto err; | |
1509 | xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF); | |
1510 | idx++; | |
1511 | } | |
1512 | ||
47dd7e60 PP |
1513 | if (mlx5_ecpf_vport_exists(dev)) { |
1514 | err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF); | |
1515 | if (err) | |
1516 | goto err; | |
1517 | idx++; | |
1518 | } | |
1519 | err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK); | |
1520 | if (err) | |
1521 | goto err; | |
1522 | return 0; | |
1523 | ||
1524 | err: | |
1525 | mlx5_esw_vports_cleanup(esw); | |
1526 | return err; | |
1527 | } | |
1528 | ||
073bb189 SM |
1529 | int mlx5_eswitch_init(struct mlx5_core_dev *dev) |
1530 | { | |
073bb189 | 1531 | struct mlx5_eswitch *esw; |
47dd7e60 | 1532 | int err; |
073bb189 | 1533 | |
4e046de0 | 1534 | if (!MLX5_VPORT_MANAGER(dev)) |
073bb189 SM |
1535 | return 0; |
1536 | ||
073bb189 SM |
1537 | esw = kzalloc(sizeof(*esw), GFP_KERNEL); |
1538 | if (!esw) | |
1539 | return -ENOMEM; | |
1540 | ||
1541 | esw->dev = dev; | |
a1b3839a | 1542 | esw->manager_vport = mlx5_eswitch_manager_vport(dev); |
411ec9e0 | 1543 | esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev); |
073bb189 | 1544 | |
073bb189 SM |
1545 | esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); |
1546 | if (!esw->work_queue) { | |
1547 | err = -ENOMEM; | |
1548 | goto abort; | |
1549 | } | |
1550 | ||
47dd7e60 PP |
1551 | err = mlx5_esw_vports_init(esw); |
1552 | if (err) | |
073bb189 | 1553 | goto abort; |
879c8f84 | 1554 | |
e8d31c4d MB |
1555 | err = esw_offloads_init_reps(esw); |
1556 | if (err) | |
47dd7e60 | 1557 | goto reps_err; |
127ea380 | 1558 | |
61086f39 | 1559 | mutex_init(&esw->offloads.encap_tbl_lock); |
a54e20b4 | 1560 | hash_init(esw->offloads.encap_tbl); |
14e6b038 EC |
1561 | mutex_init(&esw->offloads.decap_tbl_lock); |
1562 | hash_init(esw->offloads.decap_tbl); | |
b2fdf3d0 | 1563 | mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr); |
525e84be | 1564 | atomic64_set(&esw->offloads.num_flows, 0); |
133dcfc5 | 1565 | ida_init(&esw->offloads.vport_metadata_ida); |
84ae9c1f | 1566 | xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC); |
dfcb1ed3 | 1567 | mutex_init(&esw->state_lock); |
cac1eb2c | 1568 | lockdep_register_key(&esw->mode_lock_key); |
c55479d0 | 1569 | init_rwsem(&esw->mode_lock); |
cac1eb2c | 1570 | lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key); |
dfcb1ed3 | 1571 | |
81848731 | 1572 | esw->enabled_vports = 0; |
f6455de0 | 1573 | esw->mode = MLX5_ESWITCH_NONE; |
bffaa916 | 1574 | esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; |
7d4fd44e PB |
1575 | if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) && |
1576 | MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) | |
1577 | esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; | |
1578 | else | |
1579 | esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; | |
073bb189 | 1580 | |
81848731 | 1581 | dev->priv.eswitch = esw; |
8f010541 | 1582 | BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); |
47dd7e60 PP |
1583 | |
1584 | esw_info(dev, | |
1585 | "Total vports %d, per vport: max uc(%d) max mc(%d)\n", | |
1586 | esw->total_vports, | |
1587 | MLX5_MAX_UC_PER_VPORT(dev), | |
1588 | MLX5_MAX_MC_PER_VPORT(dev)); | |
073bb189 | 1589 | return 0; |
47dd7e60 PP |
1590 | |
1591 | reps_err: | |
1592 | mlx5_esw_vports_cleanup(esw); | |
073bb189 SM |
1593 | abort: |
1594 | if (esw->work_queue) | |
1595 | destroy_workqueue(esw->work_queue); | |
073bb189 SM |
1596 | kfree(esw); |
1597 | return err; | |
1598 | } | |
1599 | ||
1600 | void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) | |
1601 | { | |
4e046de0 | 1602 | if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) |
073bb189 SM |
1603 | return; |
1604 | ||
1605 | esw_info(esw->dev, "cleanup\n"); | |
073bb189 SM |
1606 | |
1607 | esw->dev->priv.eswitch = NULL; | |
1608 | destroy_workqueue(esw->work_queue); | |
cac1eb2c | 1609 | lockdep_unregister_key(&esw->mode_lock_key); |
d6c8022d | 1610 | mutex_destroy(&esw->state_lock); |
84ae9c1f VB |
1611 | WARN_ON(!xa_empty(&esw->offloads.vhca_map)); |
1612 | xa_destroy(&esw->offloads.vhca_map); | |
133dcfc5 | 1613 | ida_destroy(&esw->offloads.vport_metadata_ida); |
b2fdf3d0 | 1614 | mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr); |
61086f39 | 1615 | mutex_destroy(&esw->offloads.encap_tbl_lock); |
14e6b038 | 1616 | mutex_destroy(&esw->offloads.decap_tbl_lock); |
13795553 | 1617 | esw_offloads_cleanup_reps(esw); |
47dd7e60 | 1618 | mlx5_esw_vports_cleanup(esw); |
073bb189 SM |
1619 | kfree(esw); |
1620 | } | |
1621 | ||
77256579 | 1622 | /* Vport Administration */ |
1094795c PP |
1623 | static int |
1624 | mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw, | |
1625 | struct mlx5_vport *evport, const u8 *mac) | |
77256579 | 1626 | { |
1094795c | 1627 | u16 vport_num = evport->vport; |
23898c76 NO |
1628 | u64 node_guid; |
1629 | int err = 0; | |
77256579 | 1630 | |
5d9986a3 | 1631 | if (is_multicast_ether_addr(mac)) |
77256579 SM |
1632 | return -EINVAL; |
1633 | ||
9d2cbdc5 | 1634 | if (evport->info.spoofchk && !is_valid_ether_addr(mac)) |
f942380c | 1635 | mlx5_core_warn(esw->dev, |
9d2cbdc5 | 1636 | "Set invalid MAC while spoofchk is on, vport(%d)\n", |
1094795c | 1637 | vport_num); |
f942380c | 1638 | |
1094795c | 1639 | err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac); |
77256579 SM |
1640 | if (err) { |
1641 | mlx5_core_warn(esw->dev, | |
1642 | "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n", | |
1094795c PP |
1643 | vport_num, err); |
1644 | return err; | |
77256579 SM |
1645 | } |
1646 | ||
23898c76 | 1647 | node_guid_gen_from_mac(&node_guid, mac); |
1094795c | 1648 | err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid); |
23898c76 NO |
1649 | if (err) |
1650 | mlx5_core_warn(esw->dev, | |
1651 | "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", | |
1094795c | 1652 | vport_num, err); |
23898c76 | 1653 | |
1ab2068a MHY |
1654 | ether_addr_copy(evport->info.mac, mac); |
1655 | evport->info.node_guid = node_guid; | |
f6455de0 | 1656 | if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) |
07bab950 | 1657 | err = esw_acl_ingress_lgcy_setup(esw, evport); |
1ab2068a | 1658 | |
1094795c PP |
1659 | return err; |
1660 | } | |
1661 | ||
1662 | int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | |
1663 | u16 vport, const u8 *mac) | |
1664 | { | |
1665 | struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); | |
1666 | int err = 0; | |
1667 | ||
1668 | if (IS_ERR(evport)) | |
1669 | return PTR_ERR(evport); | |
1670 | ||
1671 | mutex_lock(&esw->state_lock); | |
1672 | err = mlx5_esw_set_vport_mac_locked(esw, evport, mac); | |
f942380c | 1673 | mutex_unlock(&esw->state_lock); |
77256579 SM |
1674 | return err; |
1675 | } | |
1676 | ||
47dd7e60 PP |
1677 | static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark) |
1678 | { | |
1679 | struct mlx5_vport *vport; | |
1680 | ||
1681 | vport = mlx5_eswitch_get_vport(esw, vport_num); | |
1682 | if (IS_ERR(vport)) | |
1683 | return false; | |
1684 | ||
1685 | return xa_get_mark(&esw->vports, vport_num, mark); | |
1686 | } | |
1687 | ||
1688 | bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num) | |
1689 | { | |
1690 | return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF); | |
1691 | } | |
1692 | ||
1693 | bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num) | |
1694 | { | |
1695 | return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF); | |
1696 | } | |
1697 | ||
f099fde1 | 1698 | static bool |
47dd7e60 | 1699 | is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num) |
f099fde1 PP |
1700 | { |
1701 | return vport_num == MLX5_VPORT_PF || | |
d7f33a45 VP |
1702 | mlx5_eswitch_is_vf_vport(esw, vport_num) || |
1703 | mlx5_esw_is_sf_vport(esw, vport_num); | |
f099fde1 PP |
1704 | } |
1705 | ||
82564f6c | 1706 | int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, |
f099fde1 PP |
1707 | u8 *hw_addr, int *hw_addr_len, |
1708 | struct netlink_ext_ack *extack) | |
1709 | { | |
1710 | struct mlx5_eswitch *esw; | |
1711 | struct mlx5_vport *vport; | |
1712 | int err = -EOPNOTSUPP; | |
1713 | u16 vport_num; | |
1714 | ||
82564f6c | 1715 | esw = mlx5_devlink_eswitch_get(port->devlink); |
f099fde1 PP |
1716 | if (IS_ERR(esw)) |
1717 | return PTR_ERR(esw); | |
1718 | ||
1719 | vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); | |
1720 | if (!is_port_function_supported(esw, vport_num)) | |
1721 | return -EOPNOTSUPP; | |
1722 | ||
1723 | vport = mlx5_eswitch_get_vport(esw, vport_num); | |
1724 | if (IS_ERR(vport)) { | |
1725 | NL_SET_ERR_MSG_MOD(extack, "Invalid port"); | |
1726 | return PTR_ERR(vport); | |
1727 | } | |
1728 | ||
1729 | mutex_lock(&esw->state_lock); | |
1730 | if (vport->enabled) { | |
1731 | ether_addr_copy(hw_addr, vport->info.mac); | |
1732 | *hw_addr_len = ETH_ALEN; | |
1733 | err = 0; | |
f099fde1 PP |
1734 | } |
1735 | mutex_unlock(&esw->state_lock); | |
1736 | return err; | |
1737 | } | |
1738 | ||
82564f6c | 1739 | int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, |
330077d1 PP |
1740 | const u8 *hw_addr, int hw_addr_len, |
1741 | struct netlink_ext_ack *extack) | |
1742 | { | |
1743 | struct mlx5_eswitch *esw; | |
1744 | struct mlx5_vport *vport; | |
1745 | int err = -EOPNOTSUPP; | |
1746 | u16 vport_num; | |
1747 | ||
82564f6c | 1748 | esw = mlx5_devlink_eswitch_get(port->devlink); |
330077d1 PP |
1749 | if (IS_ERR(esw)) { |
1750 | NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr"); | |
1751 | return PTR_ERR(esw); | |
1752 | } | |
1753 | ||
1754 | vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); | |
1755 | if (!is_port_function_supported(esw, vport_num)) { | |
1756 | NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr"); | |
1757 | return -EINVAL; | |
1758 | } | |
1759 | vport = mlx5_eswitch_get_vport(esw, vport_num); | |
1760 | if (IS_ERR(vport)) { | |
1761 | NL_SET_ERR_MSG_MOD(extack, "Invalid port"); | |
1762 | return PTR_ERR(vport); | |
1763 | } | |
1764 | ||
1765 | mutex_lock(&esw->state_lock); | |
1766 | if (vport->enabled) | |
1767 | err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr); | |
1768 | else | |
1769 | NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); | |
1770 | mutex_unlock(&esw->state_lock); | |
1771 | return err; | |
1772 | } | |
1773 | ||
77256579 | 1774 | int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, |
02f3afd9 | 1775 | u16 vport, int link_state) |
77256579 | 1776 | { |
5d9986a3 | 1777 | struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); |
7d0314b1 RD |
1778 | int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT; |
1779 | int other_vport = 1; | |
1ab2068a MHY |
1780 | int err = 0; |
1781 | ||
b16f2bb6 | 1782 | if (!mlx5_esw_allowed(esw)) |
77256579 | 1783 | return -EPERM; |
5d9986a3 BW |
1784 | if (IS_ERR(evport)) |
1785 | return PTR_ERR(evport); | |
77256579 | 1786 | |
7d0314b1 RD |
1787 | if (vport == MLX5_VPORT_UPLINK) { |
1788 | opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK; | |
1789 | other_vport = 0; | |
1790 | vport = 0; | |
1791 | } | |
1ab2068a | 1792 | mutex_lock(&esw->state_lock); |
1aa48ca6 RD |
1793 | if (esw->mode != MLX5_ESWITCH_LEGACY) { |
1794 | err = -EOPNOTSUPP; | |
1795 | goto unlock; | |
1796 | } | |
1ab2068a | 1797 | |
7d0314b1 | 1798 | err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state); |
1ab2068a | 1799 | if (err) { |
7d0314b1 RD |
1800 | mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d", |
1801 | vport, opmod, err); | |
1ab2068a MHY |
1802 | goto unlock; |
1803 | } | |
1804 | ||
1805 | evport->info.link_state = link_state; | |
1806 | ||
1807 | unlock: | |
1808 | mutex_unlock(&esw->state_lock); | |
75102121 | 1809 | return err; |
77256579 SM |
1810 | } |
1811 | ||
1812 | int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, | |
02f3afd9 | 1813 | u16 vport, struct ifla_vf_info *ivi) |
77256579 | 1814 | { |
5d9986a3 | 1815 | struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); |
9e7ea352 | 1816 | |
5d9986a3 BW |
1817 | if (IS_ERR(evport)) |
1818 | return PTR_ERR(evport); | |
f942380c | 1819 | |
77256579 SM |
1820 | memset(ivi, 0, sizeof(*ivi)); |
1821 | ivi->vf = vport - 1; | |
1822 | ||
1ab2068a MHY |
1823 | mutex_lock(&esw->state_lock); |
1824 | ether_addr_copy(ivi->mac, evport->info.mac); | |
1825 | ivi->linkstate = evport->info.link_state; | |
1826 | ivi->vlan = evport->info.vlan; | |
1827 | ivi->qos = evport->info.qos; | |
1828 | ivi->spoofchk = evport->info.spoofchk; | |
1829 | ivi->trusted = evport->info.trusted; | |
e591605f PP |
1830 | ivi->min_tx_rate = evport->qos.min_rate; |
1831 | ivi->max_tx_rate = evport->qos.max_rate; | |
1ab2068a | 1832 | mutex_unlock(&esw->state_lock); |
77256579 SM |
1833 | |
1834 | return 0; | |
1835 | } | |
9e7ea352 | 1836 | |
e33dfe31 | 1837 | int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, |
02f3afd9 | 1838 | u16 vport, u16 vlan, u8 qos, u8 set_flags) |
9e7ea352 | 1839 | { |
5d9986a3 | 1840 | struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); |
dfcb1ed3 | 1841 | int err = 0; |
9e7ea352 | 1842 | |
5d9986a3 BW |
1843 | if (IS_ERR(evport)) |
1844 | return PTR_ERR(evport); | |
1845 | if (vlan > 4095 || qos > 7) | |
9e7ea352 SM |
1846 | return -EINVAL; |
1847 | ||
e33dfe31 | 1848 | err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); |
dfcb1ed3 | 1849 | if (err) |
0e18134f | 1850 | return err; |
dfcb1ed3 | 1851 | |
1ab2068a MHY |
1852 | evport->info.vlan = vlan; |
1853 | evport->info.qos = qos; | |
f6455de0 | 1854 | if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) { |
07bab950 | 1855 | err = esw_acl_ingress_lgcy_setup(esw, evport); |
dfcb1ed3 | 1856 | if (err) |
0e18134f | 1857 | return err; |
ea651a86 | 1858 | err = esw_acl_egress_lgcy_setup(esw, evport); |
dfcb1ed3 MHY |
1859 | } |
1860 | ||
dfcb1ed3 | 1861 | return err; |
9e7ea352 | 1862 | } |
3b751a2a SM |
1863 | |
1864 | int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, | |
02f3afd9 | 1865 | u16 vport_num, |
3b751a2a SM |
1866 | struct ifla_vf_stats *vf_stats) |
1867 | { | |
5d9986a3 | 1868 | struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); |
3b751a2a | 1869 | int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); |
e08a6832 LR |
1870 | u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {}; |
1871 | struct mlx5_vport_drop_stats stats = {}; | |
3b751a2a SM |
1872 | int err = 0; |
1873 | u32 *out; | |
1874 | ||
5d9986a3 BW |
1875 | if (IS_ERR(vport)) |
1876 | return PTR_ERR(vport); | |
3b751a2a | 1877 | |
1b9a07ee | 1878 | out = kvzalloc(outlen, GFP_KERNEL); |
3b751a2a SM |
1879 | if (!out) |
1880 | return -ENOMEM; | |
1881 | ||
3b751a2a SM |
1882 | MLX5_SET(query_vport_counter_in, in, opcode, |
1883 | MLX5_CMD_OP_QUERY_VPORT_COUNTER); | |
1884 | MLX5_SET(query_vport_counter_in, in, op_mod, 0); | |
ee813f31 | 1885 | MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport); |
cbc44e76 | 1886 | MLX5_SET(query_vport_counter_in, in, other_vport, 1); |
3b751a2a | 1887 | |
e08a6832 | 1888 | err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out); |
3b751a2a SM |
1889 | if (err) |
1890 | goto free_out; | |
1891 | ||
1892 | #define MLX5_GET_CTR(p, x) \ | |
1893 | MLX5_GET64(query_vport_counter_out, p, x) | |
1894 | ||
1895 | memset(vf_stats, 0, sizeof(*vf_stats)); | |
1896 | vf_stats->rx_packets = | |
1897 | MLX5_GET_CTR(out, received_eth_unicast.packets) + | |
88d725bb | 1898 | MLX5_GET_CTR(out, received_ib_unicast.packets) + |
3b751a2a | 1899 | MLX5_GET_CTR(out, received_eth_multicast.packets) + |
88d725bb | 1900 | MLX5_GET_CTR(out, received_ib_multicast.packets) + |
3b751a2a SM |
1901 | MLX5_GET_CTR(out, received_eth_broadcast.packets); |
1902 | ||
1903 | vf_stats->rx_bytes = | |
1904 | MLX5_GET_CTR(out, received_eth_unicast.octets) + | |
88d725bb | 1905 | MLX5_GET_CTR(out, received_ib_unicast.octets) + |
3b751a2a | 1906 | MLX5_GET_CTR(out, received_eth_multicast.octets) + |
88d725bb | 1907 | MLX5_GET_CTR(out, received_ib_multicast.octets) + |
3b751a2a SM |
1908 | MLX5_GET_CTR(out, received_eth_broadcast.octets); |
1909 | ||
1910 | vf_stats->tx_packets = | |
1911 | MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + | |
88d725bb | 1912 | MLX5_GET_CTR(out, transmitted_ib_unicast.packets) + |
3b751a2a | 1913 | MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + |
88d725bb | 1914 | MLX5_GET_CTR(out, transmitted_ib_multicast.packets) + |
3b751a2a SM |
1915 | MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); |
1916 | ||
1917 | vf_stats->tx_bytes = | |
1918 | MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + | |
88d725bb | 1919 | MLX5_GET_CTR(out, transmitted_ib_unicast.octets) + |
3b751a2a | 1920 | MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + |
88d725bb | 1921 | MLX5_GET_CTR(out, transmitted_ib_multicast.octets) + |
3b751a2a SM |
1922 | MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); |
1923 | ||
1924 | vf_stats->multicast = | |
88d725bb AN |
1925 | MLX5_GET_CTR(out, received_eth_multicast.packets) + |
1926 | MLX5_GET_CTR(out, received_ib_multicast.packets); | |
3b751a2a SM |
1927 | |
1928 | vf_stats->broadcast = | |
1929 | MLX5_GET_CTR(out, received_eth_broadcast.packets); | |
1930 | ||
b55b3538 | 1931 | err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats); |
aaabd078 MS |
1932 | if (err) |
1933 | goto free_out; | |
b8a0dbe3 EE |
1934 | vf_stats->rx_dropped = stats.rx_dropped; |
1935 | vf_stats->tx_dropped = stats.tx_dropped; | |
1936 | ||
3b751a2a SM |
1937 | free_out: |
1938 | kvfree(out); | |
1939 | return err; | |
1940 | } | |
57cbd893 | 1941 | |
7d4fd44e | 1942 | u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev) |
57cbd893 | 1943 | { |
e8711402 LR |
1944 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
1945 | ||
b16f2bb6 | 1946 | return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE; |
57cbd893 MB |
1947 | } |
1948 | EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); | |
eff849b2 | 1949 | |
82b11f07 MG |
1950 | enum devlink_eswitch_encap_mode |
1951 | mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) | |
1952 | { | |
1953 | struct mlx5_eswitch *esw; | |
1954 | ||
1955 | esw = dev->priv.eswitch; | |
7d4fd44e | 1956 | return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap : |
82b11f07 MG |
1957 | DEVLINK_ESWITCH_ENCAP_MODE_NONE; |
1958 | } | |
1959 | EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode); | |
1960 | ||
eff849b2 RL |
1961 | bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) |
1962 | { | |
f6455de0 BW |
1963 | if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE && |
1964 | dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) || | |
1965 | (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS && | |
1966 | dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS)) | |
eff849b2 RL |
1967 | return true; |
1968 | ||
1969 | return false; | |
1970 | } | |
544fe7c2 RD |
1971 | |
1972 | bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, | |
1973 | struct mlx5_core_dev *dev1) | |
1974 | { | |
f6455de0 BW |
1975 | return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS && |
1976 | dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS); | |
544fe7c2 | 1977 | } |
062f4bf4 | 1978 | |
8f010541 PP |
1979 | int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb) |
1980 | { | |
1981 | return blocking_notifier_chain_register(&esw->n_head, nb); | |
1982 | } | |
dd28087c | 1983 | |
8f010541 PP |
1984 | void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb) |
1985 | { | |
1986 | blocking_notifier_chain_unregister(&esw->n_head, nb); | |
1987 | } | |
7dc84de9 RD |
1988 | |
1989 | /** | |
1990 | * mlx5_esw_hold() - Try to take a read lock on esw mode lock. | |
1991 | * @mdev: mlx5 core device. | |
1992 | * | |
1993 | * Should be called by esw resources callers. | |
1994 | * | |
1995 | * Return: true on success or false. | |
1996 | */ | |
1997 | bool mlx5_esw_hold(struct mlx5_core_dev *mdev) | |
1998 | { | |
1999 | struct mlx5_eswitch *esw = mdev->priv.eswitch; | |
2000 | ||
2001 | /* e.g. VF doesn't have eswitch so nothing to do */ | |
b16f2bb6 | 2002 | if (!mlx5_esw_allowed(esw)) |
7dc84de9 RD |
2003 | return true; |
2004 | ||
2005 | if (down_read_trylock(&esw->mode_lock) != 0) | |
2006 | return true; | |
2007 | ||
2008 | return false; | |
2009 | } | |
2010 | ||
2011 | /** | |
2012 | * mlx5_esw_release() - Release a read lock on esw mode lock. | |
2013 | * @mdev: mlx5 core device. | |
2014 | */ | |
2015 | void mlx5_esw_release(struct mlx5_core_dev *mdev) | |
2016 | { | |
2017 | struct mlx5_eswitch *esw = mdev->priv.eswitch; | |
2018 | ||
b16f2bb6 | 2019 | if (mlx5_esw_allowed(esw)) |
7dc84de9 RD |
2020 | up_read(&esw->mode_lock); |
2021 | } | |
2022 | ||
2023 | /** | |
2024 | * mlx5_esw_get() - Increase esw user count. | |
2025 | * @mdev: mlx5 core device. | |
2026 | */ | |
2027 | void mlx5_esw_get(struct mlx5_core_dev *mdev) | |
2028 | { | |
2029 | struct mlx5_eswitch *esw = mdev->priv.eswitch; | |
2030 | ||
b16f2bb6 | 2031 | if (mlx5_esw_allowed(esw)) |
7dc84de9 RD |
2032 | atomic64_inc(&esw->user_count); |
2033 | } | |
2034 | ||
2035 | /** | |
2036 | * mlx5_esw_put() - Decrease esw user count. | |
2037 | * @mdev: mlx5 core device. | |
2038 | */ | |
2039 | void mlx5_esw_put(struct mlx5_core_dev *mdev) | |
2040 | { | |
2041 | struct mlx5_eswitch *esw = mdev->priv.eswitch; | |
2042 | ||
b16f2bb6 | 2043 | if (mlx5_esw_allowed(esw)) |
7dc84de9 RD |
2044 | atomic64_dec_if_positive(&esw->user_count); |
2045 | } | |
2046 | ||
2047 | /** | |
2048 | * mlx5_esw_try_lock() - Take a write lock on esw mode lock. | |
2049 | * @esw: eswitch device. | |
2050 | * | |
2051 | * Should be called by esw mode change routine. | |
2052 | * | |
2053 | * Return: | |
2054 | * * 0 - esw mode if successfully locked and refcount is 0. | |
2055 | * * -EBUSY - refcount is not 0. | |
2056 | * * -EINVAL - In the middle of switching mode or lock is already held. | |
2057 | */ | |
2058 | int mlx5_esw_try_lock(struct mlx5_eswitch *esw) | |
2059 | { | |
2060 | if (down_write_trylock(&esw->mode_lock) == 0) | |
2061 | return -EINVAL; | |
2062 | ||
2063 | if (atomic64_read(&esw->user_count) > 0) { | |
2064 | up_write(&esw->mode_lock); | |
2065 | return -EBUSY; | |
2066 | } | |
2067 | ||
2068 | return esw->mode; | |
2069 | } | |
2070 | ||
2071 | /** | |
2072 | * mlx5_esw_unlock() - Release write lock on esw mode lock | |
2073 | * @esw: eswitch device. | |
2074 | */ | |
2075 | void mlx5_esw_unlock(struct mlx5_eswitch *esw) | |
2076 | { | |
cac1eb2c MB |
2077 | if (!mlx5_esw_allowed(esw)) |
2078 | return; | |
7dc84de9 RD |
2079 | up_write(&esw->mode_lock); |
2080 | } | |
06ec5acc | 2081 | |
cac1eb2c MB |
2082 | /** |
2083 | * mlx5_esw_lock() - Take write lock on esw mode lock | |
2084 | * @esw: eswitch device. | |
2085 | */ | |
2086 | void mlx5_esw_lock(struct mlx5_eswitch *esw) | |
2087 | { | |
2088 | if (!mlx5_esw_allowed(esw)) | |
2089 | return; | |
2090 | down_write(&esw->mode_lock); | |
2091 | } | |
2092 | ||
06ec5acc PP |
2093 | /** |
2094 | * mlx5_eswitch_get_total_vports - Get total vports of the eswitch | |
2095 | * | |
2096 | * @dev: Pointer to core device | |
2097 | * | |
2098 | * mlx5_eswitch_get_total_vports returns total number of eswitch vports. | |
2099 | */ | |
2100 | u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) | |
2101 | { | |
9f8c7100 PP |
2102 | struct mlx5_eswitch *esw; |
2103 | ||
2104 | esw = dev->priv.eswitch; | |
2105 | return mlx5_esw_allowed(esw) ? esw->total_vports : 0; | |
06ec5acc PP |
2106 | } |
2107 | EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports); | |
97a8a8c1 MB |
2108 | |
2109 | /** | |
2110 | * mlx5_eswitch_get_core_dev - Get the mdev device | |
2111 | * @esw : eswitch device. | |
2112 | * | |
2113 | * Return the mellanox core device which manages the eswitch. | |
2114 | */ | |
2115 | struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw) | |
2116 | { | |
2117 | return mlx5_esw_allowed(esw) ? esw->dev : NULL; | |
2118 | } | |
2119 | EXPORT_SYMBOL(mlx5_eswitch_get_core_dev); |