]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/lag.c
UBUNTU: Ubuntu-5.11.0-22.23
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / lag.c
CommitLineData
7907f23a
AH
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/netdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/vport.h>
36#include "mlx5_core.h"
3b5ff59f 37#include "eswitch.h"
10a193ed 38#include "lag.h"
544fe7c2 39#include "lag_mp.h"
7907f23a
AH
40
41/* General purpose, use for short periods of time.
42 * Beware of lock dependencies (preferably, no locks should be acquired
43 * under it).
44 */
64363e61 45static DEFINE_SPINLOCK(lag_lock);
7907f23a
AH
46
47static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
48 u8 remap_port2)
49{
5d19395f 50 u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
7907f23a
AH
51 void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
52
53 MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
54
55 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
56 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
57
5d19395f 58 return mlx5_cmd_exec_in(dev, create_lag, in);
7907f23a
AH
59}
60
61static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
62 u8 remap_port2)
63{
5d19395f 64 u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
7907f23a
AH
65 void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
66
67 MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
68 MLX5_SET(modify_lag_in, in, field_select, 0x1);
69
70 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
71 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
72
5d19395f 73 return mlx5_cmd_exec_in(dev, modify_lag, in);
7907f23a
AH
74}
75
3bc34f3b
AH
76int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
77{
5d19395f 78 u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {};
3bc34f3b
AH
79
80 MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
81
5d19395f 82 return mlx5_cmd_exec_in(dev, create_vport_lag, in);
3bc34f3b
AH
83}
84EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
85
86int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
87{
5d19395f 88 u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {};
3bc34f3b
AH
89
90 MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
91
5d19395f 92 return mlx5_cmd_exec_in(dev, destroy_vport_lag, in);
3bc34f3b
AH
93}
94EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
95
10a193ed
RD
96int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
97 struct net_device *ndev)
7907f23a
AH
98{
99 int i;
100
101 for (i = 0; i < MLX5_MAX_PORTS; i++)
102 if (ldev->pf[i].netdev == ndev)
103 return i;
104
f552be54 105 return -ENOENT;
7907f23a
AH
106}
107
7c34ec19
AH
108static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
109{
110 return !!(ldev->flags & MLX5_LAG_FLAG_ROCE);
111}
112
113static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
114{
115 return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
116}
117
7907f23a
AH
118static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
119 u8 *port1, u8 *port2)
120{
dc798b4c
AH
121 *port1 = 1;
122 *port2 = 2;
84d2dbb0
EA
123 if (!tracker->netdev_state[MLX5_LAG_P1].tx_enabled ||
124 !tracker->netdev_state[MLX5_LAG_P1].link_up) {
dc798b4c
AH
125 *port1 = 2;
126 return;
7907f23a 127 }
dc798b4c 128
84d2dbb0
EA
129 if (!tracker->netdev_state[MLX5_LAG_P2].tx_enabled ||
130 !tracker->netdev_state[MLX5_LAG_P2].link_up)
dc798b4c 131 *port2 = 1;
7907f23a
AH
132}
133
10a193ed
RD
134void mlx5_modify_lag(struct mlx5_lag *ldev,
135 struct lag_tracker *tracker)
4c283e61 136{
84d2dbb0 137 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
4c283e61
SK
138 u8 v2p_port1, v2p_port2;
139 int err;
140
141 mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
142 &v2p_port2);
143
84d2dbb0
EA
144 if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
145 v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
146 ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
147 ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
4c283e61
SK
148
149 mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
84d2dbb0
EA
150 ldev->v2p_map[MLX5_LAG_P1],
151 ldev->v2p_map[MLX5_LAG_P2]);
4c283e61
SK
152
153 err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
154 if (err)
155 mlx5_core_err(dev0,
156 "Failed to modify LAG (%d)\n",
157 err);
158 }
159}
160
8252cf72
RD
161static int mlx5_create_lag(struct mlx5_lag *ldev,
162 struct lag_tracker *tracker)
7907f23a 163{
84d2dbb0 164 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
7907f23a
AH
165 int err;
166
84d2dbb0
EA
167 mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
168 &ldev->v2p_map[MLX5_LAG_P2]);
7907f23a 169
3cfe432e 170 mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
84d2dbb0 171 ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2]);
3cfe432e 172
84d2dbb0
EA
173 err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
174 ldev->v2p_map[MLX5_LAG_P2]);
7907f23a
AH
175 if (err)
176 mlx5_core_err(dev0,
177 "Failed to create LAG (%d)\n",
178 err);
8252cf72
RD
179 return err;
180}
181
10a193ed
RD
182int mlx5_activate_lag(struct mlx5_lag *ldev,
183 struct lag_tracker *tracker,
184 u8 flags)
8252cf72 185{
95824666 186 bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
84d2dbb0 187 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
95824666
AH
188 int err;
189
190 err = mlx5_create_lag(ldev, tracker);
191 if (err) {
192 if (roce_lag) {
193 mlx5_core_err(dev0,
194 "Failed to activate RoCE LAG\n");
195 } else {
196 mlx5_core_err(dev0,
197 "Failed to activate VF LAG\n"
198 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
199 }
200 return err;
201 }
202
7c34ec19 203 ldev->flags |= flags;
95824666 204 return 0;
7907f23a
AH
205}
206
95824666 207static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
7907f23a 208{
84d2dbb0 209 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
5d19395f 210 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
95824666 211 bool roce_lag = __mlx5_lag_is_roce(ldev);
7907f23a
AH
212 int err;
213
7c34ec19 214 ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
7907f23a 215
5d19395f
LR
216 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
217 err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
95824666
AH
218 if (err) {
219 if (roce_lag) {
220 mlx5_core_err(dev0,
221 "Failed to deactivate RoCE LAG; driver restart required\n");
222 } else {
223 mlx5_core_err(dev0,
224 "Failed to deactivate VF LAG; driver restart required\n"
225 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
226 }
227 }
228
229 return err;
7907f23a
AH
230}
231
eff849b2
RL
232static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
233{
84d2dbb0 234 if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
eff849b2 235 return false;
a6491744
AH
236
237#ifdef CONFIG_MLX5_ESWITCH
84d2dbb0
EA
238 return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev,
239 ldev->pf[MLX5_LAG_P2].dev);
a6491744 240#else
84d2dbb0
EA
241 return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) &&
242 !mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev));
a6491744 243#endif
eff849b2
RL
244}
245
93f82444 246static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
95824666
AH
247{
248 int i;
249
93f82444
LR
250 for (i = 0; i < MLX5_MAX_PORTS; i++) {
251 if (!ldev->pf[i].dev)
252 continue;
253
254 ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
255 mlx5_rescan_drivers_locked(ldev->pf[i].dev);
256 }
95824666
AH
257}
258
93f82444 259static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
95824666
AH
260{
261 int i;
262
93f82444
LR
263 for (i = 0; i < MLX5_MAX_PORTS; i++) {
264 if (!ldev->pf[i].dev)
265 continue;
266
267 ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
268 mlx5_rescan_drivers_locked(ldev->pf[i].dev);
269 }
95824666
AH
270}
271
7907f23a
AH
272static void mlx5_do_bond(struct mlx5_lag *ldev)
273{
84d2dbb0
EA
274 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
275 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
7907f23a 276 struct lag_tracker tracker;
7c34ec19 277 bool do_bond, roce_lag;
95824666 278 int err;
7907f23a 279
9b412cc3 280 if (!mlx5_lag_is_ready(ldev))
7907f23a
AH
281 return;
282
64363e61 283 spin_lock(&lag_lock);
7907f23a 284 tracker = ldev->tracker;
64363e61 285 spin_unlock(&lag_lock);
7907f23a 286
eff849b2 287 do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
edb31b16 288
292612d6 289 if (do_bond && !__mlx5_lag_is_active(ldev)) {
7c34ec19
AH
290 roce_lag = !mlx5_sriov_is_enabled(dev0) &&
291 !mlx5_sriov_is_enabled(dev1);
292
86b39a66 293#ifdef CONFIG_MLX5_ESWITCH
f6455de0
BW
294 roce_lag &= dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
295 dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
86b39a66
BW
296#endif
297
7c34ec19 298 if (roce_lag)
93f82444 299 mlx5_lag_remove_devices(ldev);
95824666
AH
300
301 err = mlx5_activate_lag(ldev, &tracker,
302 roce_lag ? MLX5_LAG_FLAG_ROCE :
303 MLX5_LAG_FLAG_SRIOV);
304 if (err) {
305 if (roce_lag)
93f82444 306 mlx5_lag_add_devices(ldev);
7907f23a 307
95824666
AH
308 return;
309 }
7907f23a 310
7c34ec19 311 if (roce_lag) {
93f82444
LR
312 dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
313 mlx5_rescan_drivers_locked(dev0);
3b5ff59f
RL
314 mlx5_nic_vport_enable_roce(dev1);
315 }
292612d6 316 } else if (do_bond && __mlx5_lag_is_active(ldev)) {
4c283e61 317 mlx5_modify_lag(ldev, &tracker);
292612d6 318 } else if (!do_bond && __mlx5_lag_is_active(ldev)) {
7c34ec19
AH
319 roce_lag = __mlx5_lag_is_roce(ldev);
320
321 if (roce_lag) {
93f82444
LR
322 dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
323 mlx5_rescan_drivers_locked(dev0);
3b5ff59f
RL
324 mlx5_nic_vport_disable_roce(dev1);
325 }
7907f23a 326
95824666
AH
327 err = mlx5_deactivate_lag(ldev);
328 if (err)
329 return;
7907f23a 330
7c34ec19 331 if (roce_lag)
93f82444 332 mlx5_lag_add_devices(ldev);
7907f23a
AH
333 }
334}
335
336static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
337{
e6ee5e71 338 queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
7907f23a
AH
339}
340
341static void mlx5_do_bond_work(struct work_struct *work)
342{
343 struct delayed_work *delayed_work = to_delayed_work(work);
344 struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
345 bond_work);
346 int status;
347
f1ee87fe 348 status = mlx5_dev_list_trylock();
7907f23a
AH
349 if (!status) {
350 /* 1 sec delay. */
351 mlx5_queue_bond_work(ldev, HZ);
352 return;
353 }
354
355 mlx5_do_bond(ldev);
f1ee87fe 356 mlx5_dev_list_unlock();
7907f23a
AH
357}
358
359static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
360 struct lag_tracker *tracker,
361 struct net_device *ndev,
362 struct netdev_notifier_changeupper_info *info)
363{
364 struct net_device *upper = info->upper_dev, *ndev_tmp;
e497ec68 365 struct netdev_lag_upper_info *lag_upper_info = NULL;
1a3c9114 366 bool is_bonded, is_in_lag, mode_supported;
7907f23a
AH
367 int bond_status = 0;
368 int num_slaves = 0;
369 int idx;
370
371 if (!netif_is_lag_master(upper))
372 return 0;
373
e497ec68
TB
374 if (info->linking)
375 lag_upper_info = info->upper_info;
7907f23a
AH
376
377 /* The event may still be of interest if the slave does not belong to
378 * us, but is enslaved to a master which has one or more of our netdevs
379 * as slaves (e.g., if a new slave is added to a master that bonds two
380 * of our netdevs, we should unbond).
381 */
382 rcu_read_lock();
383 for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
384 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
f552be54 385 if (idx >= 0)
7907f23a
AH
386 bond_status |= (1 << idx);
387
388 num_slaves++;
389 }
390 rcu_read_unlock();
391
392 /* None of this lagdev's netdevs are slaves of this master. */
393 if (!(bond_status & 0x3))
394 return 0;
395
396 if (lag_upper_info)
397 tracker->tx_type = lag_upper_info->tx_type;
398
399 /* Determine bonding status:
400 * A device is considered bonded if both its physical ports are slaves
401 * of the same lag master, and only them.
7907f23a 402 */
1a3c9114 403 is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
7907f23a 404
9b412cc3
JL
405 if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
406 NL_SET_ERR_MSG_MOD(info->info.extack,
407 "Can't activate LAG offload, PF is configured with more than 64 VFs");
408 return 0;
409 }
410
1a3c9114
JL
411 /* Lag mode must be activebackup or hash. */
412 mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
413 tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
414
415 if (is_in_lag && !mode_supported)
416 NL_SET_ERR_MSG_MOD(info->info.extack,
417 "Can't activate LAG offload, TX type isn't supported");
418
419 is_bonded = is_in_lag && mode_supported;
7907f23a
AH
420 if (tracker->is_bonded != is_bonded) {
421 tracker->is_bonded = is_bonded;
422 return 1;
423 }
424
425 return 0;
426}
427
428static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
429 struct lag_tracker *tracker,
430 struct net_device *ndev,
431 struct netdev_notifier_changelowerstate_info *info)
432{
433 struct netdev_lag_lower_state_info *lag_lower_info;
434 int idx;
435
436 if (!netif_is_lag_port(ndev))
437 return 0;
438
439 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
f552be54 440 if (idx < 0)
7907f23a
AH
441 return 0;
442
443 /* This information is used to determine virtual to physical
444 * port mapping.
445 */
446 lag_lower_info = info->lower_state_info;
447 if (!lag_lower_info)
448 return 0;
449
450 tracker->netdev_state[idx] = *lag_lower_info;
451
452 return 1;
453}
454
455static int mlx5_lag_netdev_event(struct notifier_block *this,
456 unsigned long event, void *ptr)
457{
458 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
459 struct lag_tracker tracker;
460 struct mlx5_lag *ldev;
461 int changed = 0;
462
7907f23a
AH
463 if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
464 return NOTIFY_DONE;
465
466 ldev = container_of(this, struct mlx5_lag, nb);
9b412cc3
JL
467
468 if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
469 return NOTIFY_DONE;
470
7907f23a
AH
471 tracker = ldev->tracker;
472
473 switch (event) {
474 case NETDEV_CHANGEUPPER:
475 changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
476 ptr);
477 break;
478 case NETDEV_CHANGELOWERSTATE:
479 changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
480 ndev, ptr);
481 break;
482 }
483
64363e61 484 spin_lock(&lag_lock);
7907f23a 485 ldev->tracker = tracker;
64363e61 486 spin_unlock(&lag_lock);
7907f23a
AH
487
488 if (changed)
489 mlx5_queue_bond_work(ldev, 0);
490
491 return NOTIFY_DONE;
492}
493
494static struct mlx5_lag *mlx5_lag_dev_alloc(void)
495{
496 struct mlx5_lag *ldev;
497
498 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
499 if (!ldev)
500 return NULL;
501
e6ee5e71
RD
502 ldev->wq = create_singlethread_workqueue("mlx5_lag");
503 if (!ldev->wq) {
504 kfree(ldev);
505 return NULL;
506 }
507
7907f23a
AH
508 INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
509
510 return ldev;
511}
512
513static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
514{
e6ee5e71 515 destroy_workqueue(ldev->wq);
7907f23a
AH
516 kfree(ldev);
517}
518
9b412cc3
JL
519static int mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
520 struct mlx5_core_dev *dev,
521 struct net_device *netdev)
7907f23a
AH
522{
523 unsigned int fn = PCI_FUNC(dev->pdev->devfn);
524
525 if (fn >= MLX5_MAX_PORTS)
9b412cc3 526 return -EPERM;
7907f23a 527
64363e61 528 spin_lock(&lag_lock);
7907f23a
AH
529 ldev->pf[fn].dev = dev;
530 ldev->pf[fn].netdev = netdev;
531 ldev->tracker.netdev_state[fn].link_up = 0;
532 ldev->tracker.netdev_state[fn].tx_enabled = 0;
533
534 dev->priv.lag = ldev;
552db7bc 535
64363e61 536 spin_unlock(&lag_lock);
9b412cc3
JL
537
538 return fn;
7907f23a
AH
539}
540
541static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
542 struct mlx5_core_dev *dev)
543{
544 int i;
545
546 for (i = 0; i < MLX5_MAX_PORTS; i++)
547 if (ldev->pf[i].dev == dev)
548 break;
549
550 if (i == MLX5_MAX_PORTS)
551 return;
552
64363e61 553 spin_lock(&lag_lock);
7907f23a
AH
554 memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
555
556 dev->priv.lag = NULL;
64363e61 557 spin_unlock(&lag_lock);
7907f23a
AH
558}
559
7907f23a
AH
560/* Must be called with intf_mutex held */
561void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
562{
563 struct mlx5_lag *ldev = NULL;
564 struct mlx5_core_dev *tmp_dev;
9b412cc3 565 int i, err;
7907f23a 566
abf8ef95
MZ
567 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
568 !MLX5_CAP_GEN(dev, lag_master) ||
569 MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
7907f23a
AH
570 return;
571
f1ee87fe
MHY
572 tmp_dev = mlx5_get_next_phys_dev(dev);
573 if (tmp_dev)
574 ldev = tmp_dev->priv.lag;
7907f23a
AH
575
576 if (!ldev) {
577 ldev = mlx5_lag_dev_alloc();
578 if (!ldev) {
579 mlx5_core_err(dev, "Failed to alloc lag dev\n");
580 return;
581 }
582 }
583
9b412cc3
JL
584 if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
585 return;
586
abf8ef95
MZ
587 for (i = 0; i < MLX5_MAX_PORTS; i++)
588 if (!ldev->pf[i].dev)
9b412cc3 589 break;
9b412cc3
JL
590
591 if (i >= MLX5_MAX_PORTS)
592 ldev->flags |= MLX5_LAG_FLAG_READY;
7907f23a
AH
593
594 if (!ldev->nb.notifier_call) {
595 ldev->nb.notifier_call = mlx5_lag_netdev_event;
e387f7d5 596 if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
7907f23a
AH
597 ldev->nb.notifier_call = NULL;
598 mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
599 }
600 }
544fe7c2
RD
601
602 err = mlx5_lag_mp_init(ldev);
603 if (err)
604 mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
605 err);
912cebf4
LR
606
607 return;
7907f23a
AH
608}
609
610/* Must be called with intf_mutex held */
e387f7d5 611void mlx5_lag_remove(struct mlx5_core_dev *dev)
7907f23a
AH
612{
613 struct mlx5_lag *ldev;
614 int i;
615
616 ldev = mlx5_lag_dev_get(dev);
617 if (!ldev)
618 return;
619
292612d6 620 if (__mlx5_lag_is_active(ldev))
7907f23a
AH
621 mlx5_deactivate_lag(ldev);
622
623 mlx5_lag_dev_remove_pf(ldev, dev);
624
9b412cc3
JL
625 ldev->flags &= ~MLX5_LAG_FLAG_READY;
626
7907f23a
AH
627 for (i = 0; i < MLX5_MAX_PORTS; i++)
628 if (ldev->pf[i].dev)
629 break;
630
631 if (i == MLX5_MAX_PORTS) {
0b136454 632 if (ldev->nb.notifier_call) {
e387f7d5 633 unregister_netdevice_notifier_net(&init_net, &ldev->nb);
0b136454
EC
634 ldev->nb.notifier_call = NULL;
635 }
544fe7c2 636 mlx5_lag_mp_cleanup(ldev);
7907f23a
AH
637 cancel_delayed_work_sync(&ldev->bond_work);
638 mlx5_lag_dev_free(ldev);
639 }
640}
641
7c34ec19
AH
642bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
643{
644 struct mlx5_lag *ldev;
645 bool res;
646
64363e61 647 spin_lock(&lag_lock);
7c34ec19
AH
648 ldev = mlx5_lag_dev_get(dev);
649 res = ldev && __mlx5_lag_is_roce(ldev);
64363e61 650 spin_unlock(&lag_lock);
7c34ec19
AH
651
652 return res;
653}
654EXPORT_SYMBOL(mlx5_lag_is_roce);
655
7907f23a
AH
656bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
657{
658 struct mlx5_lag *ldev;
659 bool res;
660
64363e61 661 spin_lock(&lag_lock);
7907f23a 662 ldev = mlx5_lag_dev_get(dev);
292612d6 663 res = ldev && __mlx5_lag_is_active(ldev);
64363e61 664 spin_unlock(&lag_lock);
7907f23a
AH
665
666 return res;
667}
668EXPORT_SYMBOL(mlx5_lag_is_active);
669
7c34ec19
AH
670bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
671{
672 struct mlx5_lag *ldev;
673 bool res;
674
64363e61 675 spin_lock(&lag_lock);
7c34ec19
AH
676 ldev = mlx5_lag_dev_get(dev);
677 res = ldev && __mlx5_lag_is_sriov(ldev);
64363e61 678 spin_unlock(&lag_lock);
7c34ec19
AH
679
680 return res;
681}
682EXPORT_SYMBOL(mlx5_lag_is_sriov);
683
eff849b2 684void mlx5_lag_update(struct mlx5_core_dev *dev)
552db7bc
MS
685{
686 struct mlx5_lag *ldev;
552db7bc
MS
687
688 mlx5_dev_list_lock();
552db7bc 689 ldev = mlx5_lag_dev_get(dev);
eff849b2 690 if (!ldev)
552db7bc 691 goto unlock;
552db7bc 692
eff849b2 693 mlx5_do_bond(ldev);
552db7bc 694
eff849b2
RL
695unlock:
696 mlx5_dev_list_unlock();
552db7bc
MS
697}
698
6a32047a
AH
699struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
700{
701 struct net_device *ndev = NULL;
702 struct mlx5_lag *ldev;
703
64363e61 704 spin_lock(&lag_lock);
6a32047a
AH
705 ldev = mlx5_lag_dev_get(dev);
706
7c34ec19 707 if (!(ldev && __mlx5_lag_is_roce(ldev)))
6a32047a
AH
708 goto unlock;
709
710 if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
84d2dbb0
EA
711 ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ?
712 ldev->pf[MLX5_LAG_P1].netdev :
713 ldev->pf[MLX5_LAG_P2].netdev;
6a32047a 714 } else {
84d2dbb0 715 ndev = ldev->pf[MLX5_LAG_P1].netdev;
6a32047a
AH
716 }
717 if (ndev)
718 dev_hold(ndev);
719
720unlock:
64363e61 721 spin_unlock(&lag_lock);
6a32047a
AH
722
723 return ndev;
724}
725EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
726
c6bc6041
MG
727u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
728 struct net_device *slave)
729{
730 struct mlx5_lag *ldev;
731 u8 port = 0;
732
733 spin_lock(&lag_lock);
734 ldev = mlx5_lag_dev_get(dev);
735 if (!(ldev && __mlx5_lag_is_roce(ldev)))
736 goto unlock;
737
738 if (ldev->pf[MLX5_LAG_P1].netdev == slave)
739 port = MLX5_LAG_P1;
740 else
741 port = MLX5_LAG_P2;
742
743 port = ldev->v2p_map[port];
744
745unlock:
746 spin_unlock(&lag_lock);
747 return port;
748}
749EXPORT_SYMBOL(mlx5_lag_get_slave_port);
750
71a0ff65
MD
751int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
752 u64 *values,
753 int num_counters,
754 size_t *offsets)
755{
756 int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
757 struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
758 struct mlx5_lag *ldev;
759 int num_ports;
760 int ret, i, j;
761 void *out;
762
763 out = kvzalloc(outlen, GFP_KERNEL);
764 if (!out)
765 return -ENOMEM;
766
767 memset(values, 0, sizeof(*values) * num_counters);
768
64363e61 769 spin_lock(&lag_lock);
71a0ff65 770 ldev = mlx5_lag_dev_get(dev);
7c34ec19 771 if (ldev && __mlx5_lag_is_roce(ldev)) {
71a0ff65 772 num_ports = MLX5_MAX_PORTS;
84d2dbb0
EA
773 mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
774 mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
71a0ff65
MD
775 } else {
776 num_ports = 1;
84d2dbb0 777 mdev[MLX5_LAG_P1] = dev;
71a0ff65 778 }
64363e61 779 spin_unlock(&lag_lock);
71a0ff65
MD
780
781 for (i = 0; i < num_ports; ++i) {
5d19395f
LR
782 u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
783
784 MLX5_SET(query_cong_statistics_in, in, opcode,
785 MLX5_CMD_OP_QUERY_CONG_STATISTICS);
786 ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
787 out);
71a0ff65 788 if (ret)
64363e61 789 goto free;
71a0ff65
MD
790
791 for (j = 0; j < num_counters; ++j)
792 values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
793 }
794
64363e61 795free:
71a0ff65
MD
796 kvfree(out);
797 return ret;
798}
799EXPORT_SYMBOL(mlx5_lag_query_cong_counters);