2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/netdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include "mlx5_core.h"
41 /* General purpose, use for short periods of time.
42 * Beware of lock dependencies (preferably, no locks should be acquired
45 static DEFINE_MUTEX(lag_mutex
);
47 static int mlx5_cmd_create_lag(struct mlx5_core_dev
*dev
, u8 remap_port1
,
50 u32 in
[MLX5_ST_SZ_DW(create_lag_in
)] = {0};
51 u32 out
[MLX5_ST_SZ_DW(create_lag_out
)] = {0};
52 void *lag_ctx
= MLX5_ADDR_OF(create_lag_in
, in
, ctx
);
54 MLX5_SET(create_lag_in
, in
, opcode
, MLX5_CMD_OP_CREATE_LAG
);
56 MLX5_SET(lagc
, lag_ctx
, tx_remap_affinity_1
, remap_port1
);
57 MLX5_SET(lagc
, lag_ctx
, tx_remap_affinity_2
, remap_port2
);
59 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
62 static int mlx5_cmd_modify_lag(struct mlx5_core_dev
*dev
, u8 remap_port1
,
65 u32 in
[MLX5_ST_SZ_DW(modify_lag_in
)] = {0};
66 u32 out
[MLX5_ST_SZ_DW(modify_lag_out
)] = {0};
67 void *lag_ctx
= MLX5_ADDR_OF(modify_lag_in
, in
, ctx
);
69 MLX5_SET(modify_lag_in
, in
, opcode
, MLX5_CMD_OP_MODIFY_LAG
);
70 MLX5_SET(modify_lag_in
, in
, field_select
, 0x1);
72 MLX5_SET(lagc
, lag_ctx
, tx_remap_affinity_1
, remap_port1
);
73 MLX5_SET(lagc
, lag_ctx
, tx_remap_affinity_2
, remap_port2
);
75 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
78 static int mlx5_cmd_destroy_lag(struct mlx5_core_dev
*dev
)
80 u32 in
[MLX5_ST_SZ_DW(destroy_lag_in
)] = {0};
81 u32 out
[MLX5_ST_SZ_DW(destroy_lag_out
)] = {0};
83 MLX5_SET(destroy_lag_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_LAG
);
85 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
88 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev
*dev
)
90 u32 in
[MLX5_ST_SZ_DW(create_vport_lag_in
)] = {0};
91 u32 out
[MLX5_ST_SZ_DW(create_vport_lag_out
)] = {0};
93 MLX5_SET(create_vport_lag_in
, in
, opcode
, MLX5_CMD_OP_CREATE_VPORT_LAG
);
95 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
97 EXPORT_SYMBOL(mlx5_cmd_create_vport_lag
);
99 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev
*dev
)
101 u32 in
[MLX5_ST_SZ_DW(destroy_vport_lag_in
)] = {0};
102 u32 out
[MLX5_ST_SZ_DW(destroy_vport_lag_out
)] = {0};
104 MLX5_SET(destroy_vport_lag_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_VPORT_LAG
);
106 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
108 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag
);
110 static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev
*dev
,
111 bool reset
, void *out
, int out_size
)
113 u32 in
[MLX5_ST_SZ_DW(query_cong_statistics_in
)] = { };
115 MLX5_SET(query_cong_statistics_in
, in
, opcode
,
116 MLX5_CMD_OP_QUERY_CONG_STATISTICS
);
117 MLX5_SET(query_cong_statistics_in
, in
, clear
, reset
);
118 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_size
);
121 int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag
*ldev
,
122 struct net_device
*ndev
)
126 for (i
= 0; i
< MLX5_MAX_PORTS
; i
++)
127 if (ldev
->pf
[i
].netdev
== ndev
)
133 static bool __mlx5_lag_is_roce(struct mlx5_lag
*ldev
)
135 return !!(ldev
->flags
& MLX5_LAG_FLAG_ROCE
);
138 static bool __mlx5_lag_is_sriov(struct mlx5_lag
*ldev
)
140 return !!(ldev
->flags
& MLX5_LAG_FLAG_SRIOV
);
143 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker
*tracker
,
144 u8
*port1
, u8
*port2
)
148 if (!tracker
->netdev_state
[0].tx_enabled
||
149 !tracker
->netdev_state
[0].link_up
) {
154 if (!tracker
->netdev_state
[1].tx_enabled
||
155 !tracker
->netdev_state
[1].link_up
)
159 void mlx5_modify_lag(struct mlx5_lag
*ldev
,
160 struct lag_tracker
*tracker
)
162 struct mlx5_core_dev
*dev0
= ldev
->pf
[0].dev
;
163 u8 v2p_port1
, v2p_port2
;
166 mlx5_infer_tx_affinity_mapping(tracker
, &v2p_port1
,
169 if (v2p_port1
!= ldev
->v2p_map
[0] ||
170 v2p_port2
!= ldev
->v2p_map
[1]) {
171 ldev
->v2p_map
[0] = v2p_port1
;
172 ldev
->v2p_map
[1] = v2p_port2
;
174 mlx5_core_info(dev0
, "modify lag map port 1:%d port 2:%d",
175 ldev
->v2p_map
[0], ldev
->v2p_map
[1]);
177 err
= mlx5_cmd_modify_lag(dev0
, v2p_port1
, v2p_port2
);
180 "Failed to modify LAG (%d)\n",
185 static int mlx5_create_lag(struct mlx5_lag
*ldev
,
186 struct lag_tracker
*tracker
)
188 struct mlx5_core_dev
*dev0
= ldev
->pf
[0].dev
;
191 mlx5_infer_tx_affinity_mapping(tracker
, &ldev
->v2p_map
[0],
194 mlx5_core_info(dev0
, "lag map port 1:%d port 2:%d",
195 ldev
->v2p_map
[0], ldev
->v2p_map
[1]);
197 err
= mlx5_cmd_create_lag(dev0
, ldev
->v2p_map
[0], ldev
->v2p_map
[1]);
200 "Failed to create LAG (%d)\n",
205 int mlx5_activate_lag(struct mlx5_lag
*ldev
,
206 struct lag_tracker
*tracker
,
209 bool roce_lag
= !!(flags
& MLX5_LAG_FLAG_ROCE
);
210 struct mlx5_core_dev
*dev0
= ldev
->pf
[0].dev
;
213 err
= mlx5_create_lag(ldev
, tracker
);
217 "Failed to activate RoCE LAG\n");
220 "Failed to activate VF LAG\n"
221 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
226 ldev
->flags
|= flags
;
230 static int mlx5_deactivate_lag(struct mlx5_lag
*ldev
)
232 struct mlx5_core_dev
*dev0
= ldev
->pf
[0].dev
;
233 bool roce_lag
= __mlx5_lag_is_roce(ldev
);
236 ldev
->flags
&= ~MLX5_LAG_MODE_FLAGS
;
238 err
= mlx5_cmd_destroy_lag(dev0
);
242 "Failed to deactivate RoCE LAG; driver restart required\n");
245 "Failed to deactivate VF LAG; driver restart required\n"
246 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
253 static bool mlx5_lag_check_prereq(struct mlx5_lag
*ldev
)
255 if (!ldev
->pf
[0].dev
|| !ldev
->pf
[1].dev
)
258 #ifdef CONFIG_MLX5_ESWITCH
259 return mlx5_esw_lag_prereq(ldev
->pf
[0].dev
, ldev
->pf
[1].dev
);
261 return (!mlx5_sriov_is_enabled(ldev
->pf
[0].dev
) &&
262 !mlx5_sriov_is_enabled(ldev
->pf
[1].dev
));
266 static void mlx5_lag_add_ib_devices(struct mlx5_lag
*ldev
)
270 for (i
= 0; i
< MLX5_MAX_PORTS
; i
++)
272 mlx5_add_dev_by_protocol(ldev
->pf
[i
].dev
,
273 MLX5_INTERFACE_PROTOCOL_IB
);
276 static void mlx5_lag_remove_ib_devices(struct mlx5_lag
*ldev
)
280 for (i
= 0; i
< MLX5_MAX_PORTS
; i
++)
282 mlx5_remove_dev_by_protocol(ldev
->pf
[i
].dev
,
283 MLX5_INTERFACE_PROTOCOL_IB
);
286 static void mlx5_do_bond(struct mlx5_lag
*ldev
)
288 struct mlx5_core_dev
*dev0
= ldev
->pf
[0].dev
;
289 struct mlx5_core_dev
*dev1
= ldev
->pf
[1].dev
;
290 struct lag_tracker tracker
;
291 bool do_bond
, roce_lag
;
297 mutex_lock(&lag_mutex
);
298 tracker
= ldev
->tracker
;
299 mutex_unlock(&lag_mutex
);
301 do_bond
= tracker
.is_bonded
&& mlx5_lag_check_prereq(ldev
);
303 if (do_bond
&& !__mlx5_lag_is_active(ldev
)) {
304 roce_lag
= !mlx5_sriov_is_enabled(dev0
) &&
305 !mlx5_sriov_is_enabled(dev1
);
307 #ifdef CONFIG_MLX5_ESWITCH
308 roce_lag
&= dev0
->priv
.eswitch
->mode
== SRIOV_NONE
&&
309 dev1
->priv
.eswitch
->mode
== SRIOV_NONE
;
313 mlx5_lag_remove_ib_devices(ldev
);
315 err
= mlx5_activate_lag(ldev
, &tracker
,
316 roce_lag
? MLX5_LAG_FLAG_ROCE
:
317 MLX5_LAG_FLAG_SRIOV
);
320 mlx5_lag_add_ib_devices(ldev
);
326 mlx5_add_dev_by_protocol(dev0
, MLX5_INTERFACE_PROTOCOL_IB
);
327 mlx5_nic_vport_enable_roce(dev1
);
329 } else if (do_bond
&& __mlx5_lag_is_active(ldev
)) {
330 mlx5_modify_lag(ldev
, &tracker
);
331 } else if (!do_bond
&& __mlx5_lag_is_active(ldev
)) {
332 roce_lag
= __mlx5_lag_is_roce(ldev
);
335 mlx5_remove_dev_by_protocol(dev0
, MLX5_INTERFACE_PROTOCOL_IB
);
336 mlx5_nic_vport_disable_roce(dev1
);
339 err
= mlx5_deactivate_lag(ldev
);
344 mlx5_lag_add_ib_devices(ldev
);
348 static void mlx5_queue_bond_work(struct mlx5_lag
*ldev
, unsigned long delay
)
350 queue_delayed_work(ldev
->wq
, &ldev
->bond_work
, delay
);
353 static void mlx5_do_bond_work(struct work_struct
*work
)
355 struct delayed_work
*delayed_work
= to_delayed_work(work
);
356 struct mlx5_lag
*ldev
= container_of(delayed_work
, struct mlx5_lag
,
360 status
= mlx5_dev_list_trylock();
363 mlx5_queue_bond_work(ldev
, HZ
);
368 mlx5_dev_list_unlock();
371 static int mlx5_handle_changeupper_event(struct mlx5_lag
*ldev
,
372 struct lag_tracker
*tracker
,
373 struct net_device
*ndev
,
374 struct netdev_notifier_changeupper_info
*info
)
376 struct net_device
*upper
= info
->upper_dev
, *ndev_tmp
;
377 struct netdev_lag_upper_info
*lag_upper_info
= NULL
;
383 if (!netif_is_lag_master(upper
))
387 lag_upper_info
= info
->upper_info
;
389 /* The event may still be of interest if the slave does not belong to
390 * us, but is enslaved to a master which has one or more of our netdevs
391 * as slaves (e.g., if a new slave is added to a master that bonds two
392 * of our netdevs, we should unbond).
395 for_each_netdev_in_bond_rcu(upper
, ndev_tmp
) {
396 idx
= mlx5_lag_dev_get_netdev_idx(ldev
, ndev_tmp
);
398 bond_status
|= (1 << idx
);
404 /* None of this lagdev's netdevs are slaves of this master. */
405 if (!(bond_status
& 0x3))
409 tracker
->tx_type
= lag_upper_info
->tx_type
;
411 /* Determine bonding status:
412 * A device is considered bonded if both its physical ports are slaves
413 * of the same lag master, and only them.
414 * Lag mode must be activebackup or hash.
416 is_bonded
= (num_slaves
== MLX5_MAX_PORTS
) &&
417 (bond_status
== 0x3) &&
418 ((tracker
->tx_type
== NETDEV_LAG_TX_TYPE_ACTIVEBACKUP
) ||
419 (tracker
->tx_type
== NETDEV_LAG_TX_TYPE_HASH
));
421 if (tracker
->is_bonded
!= is_bonded
) {
422 tracker
->is_bonded
= is_bonded
;
429 static int mlx5_handle_changelowerstate_event(struct mlx5_lag
*ldev
,
430 struct lag_tracker
*tracker
,
431 struct net_device
*ndev
,
432 struct netdev_notifier_changelowerstate_info
*info
)
434 struct netdev_lag_lower_state_info
*lag_lower_info
;
437 if (!netif_is_lag_port(ndev
))
440 idx
= mlx5_lag_dev_get_netdev_idx(ldev
, ndev
);
444 /* This information is used to determine virtual to physical
447 lag_lower_info
= info
->lower_state_info
;
451 tracker
->netdev_state
[idx
] = *lag_lower_info
;
456 static int mlx5_lag_netdev_event(struct notifier_block
*this,
457 unsigned long event
, void *ptr
)
459 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
460 struct lag_tracker tracker
;
461 struct mlx5_lag
*ldev
;
464 if (!net_eq(dev_net(ndev
), &init_net
))
467 if ((event
!= NETDEV_CHANGEUPPER
) && (event
!= NETDEV_CHANGELOWERSTATE
))
470 ldev
= container_of(this, struct mlx5_lag
, nb
);
471 tracker
= ldev
->tracker
;
474 case NETDEV_CHANGEUPPER
:
475 changed
= mlx5_handle_changeupper_event(ldev
, &tracker
, ndev
,
478 case NETDEV_CHANGELOWERSTATE
:
479 changed
= mlx5_handle_changelowerstate_event(ldev
, &tracker
,
484 mutex_lock(&lag_mutex
);
485 ldev
->tracker
= tracker
;
486 mutex_unlock(&lag_mutex
);
489 mlx5_queue_bond_work(ldev
, 0);
494 static struct mlx5_lag
*mlx5_lag_dev_alloc(void)
496 struct mlx5_lag
*ldev
;
498 ldev
= kzalloc(sizeof(*ldev
), GFP_KERNEL
);
502 ldev
->wq
= create_singlethread_workqueue("mlx5_lag");
508 INIT_DELAYED_WORK(&ldev
->bond_work
, mlx5_do_bond_work
);
513 static void mlx5_lag_dev_free(struct mlx5_lag
*ldev
)
515 destroy_workqueue(ldev
->wq
);
519 static void mlx5_lag_dev_add_pf(struct mlx5_lag
*ldev
,
520 struct mlx5_core_dev
*dev
,
521 struct net_device
*netdev
)
523 unsigned int fn
= PCI_FUNC(dev
->pdev
->devfn
);
525 if (fn
>= MLX5_MAX_PORTS
)
528 mutex_lock(&lag_mutex
);
529 ldev
->pf
[fn
].dev
= dev
;
530 ldev
->pf
[fn
].netdev
= netdev
;
531 ldev
->tracker
.netdev_state
[fn
].link_up
= 0;
532 ldev
->tracker
.netdev_state
[fn
].tx_enabled
= 0;
534 dev
->priv
.lag
= ldev
;
536 mutex_unlock(&lag_mutex
);
539 static void mlx5_lag_dev_remove_pf(struct mlx5_lag
*ldev
,
540 struct mlx5_core_dev
*dev
)
544 for (i
= 0; i
< MLX5_MAX_PORTS
; i
++)
545 if (ldev
->pf
[i
].dev
== dev
)
548 if (i
== MLX5_MAX_PORTS
)
551 mutex_lock(&lag_mutex
);
552 memset(&ldev
->pf
[i
], 0, sizeof(*ldev
->pf
));
554 dev
->priv
.lag
= NULL
;
555 mutex_unlock(&lag_mutex
);
558 /* Must be called with intf_mutex held */
559 void mlx5_lag_add(struct mlx5_core_dev
*dev
, struct net_device
*netdev
)
561 struct mlx5_lag
*ldev
= NULL
;
562 struct mlx5_core_dev
*tmp_dev
;
565 if (!MLX5_CAP_GEN(dev
, vport_group_manager
) ||
566 !MLX5_CAP_GEN(dev
, lag_master
) ||
567 (MLX5_CAP_GEN(dev
, num_lag_ports
) != MLX5_MAX_PORTS
))
570 tmp_dev
= mlx5_get_next_phys_dev(dev
);
572 ldev
= tmp_dev
->priv
.lag
;
575 ldev
= mlx5_lag_dev_alloc();
577 mlx5_core_err(dev
, "Failed to alloc lag dev\n");
582 mlx5_lag_dev_add_pf(ldev
, dev
, netdev
);
584 if (!ldev
->nb
.notifier_call
) {
585 ldev
->nb
.notifier_call
= mlx5_lag_netdev_event
;
586 if (register_netdevice_notifier(&ldev
->nb
)) {
587 ldev
->nb
.notifier_call
= NULL
;
588 mlx5_core_err(dev
, "Failed to register LAG netdev notifier\n");
592 err
= mlx5_lag_mp_init(ldev
);
594 mlx5_core_err(dev
, "Failed to init multipath lag err=%d\n",
598 int mlx5_lag_get_pf_num(struct mlx5_core_dev
*dev
, int *pf_num
)
600 struct mlx5_lag
*ldev
;
603 ldev
= mlx5_lag_dev_get(dev
);
605 mlx5_core_warn(dev
, "no lag device, can't get pf num\n");
609 for (n
= 0; n
< MLX5_MAX_PORTS
; n
++)
610 if (ldev
->pf
[n
].dev
== dev
) {
615 mlx5_core_warn(dev
, "wasn't able to locate pf in the lag device\n");
619 /* Must be called with intf_mutex held */
620 void mlx5_lag_remove(struct mlx5_core_dev
*dev
)
622 struct mlx5_lag
*ldev
;
625 ldev
= mlx5_lag_dev_get(dev
);
629 if (__mlx5_lag_is_active(ldev
))
630 mlx5_deactivate_lag(ldev
);
632 mlx5_lag_dev_remove_pf(ldev
, dev
);
634 for (i
= 0; i
< MLX5_MAX_PORTS
; i
++)
638 if (i
== MLX5_MAX_PORTS
) {
639 if (ldev
->nb
.notifier_call
)
640 unregister_netdevice_notifier(&ldev
->nb
);
641 mlx5_lag_mp_cleanup(ldev
);
642 cancel_delayed_work_sync(&ldev
->bond_work
);
643 mlx5_lag_dev_free(ldev
);
647 bool mlx5_lag_is_roce(struct mlx5_core_dev
*dev
)
649 struct mlx5_lag
*ldev
;
652 mutex_lock(&lag_mutex
);
653 ldev
= mlx5_lag_dev_get(dev
);
654 res
= ldev
&& __mlx5_lag_is_roce(ldev
);
655 mutex_unlock(&lag_mutex
);
659 EXPORT_SYMBOL(mlx5_lag_is_roce
);
661 bool mlx5_lag_is_active(struct mlx5_core_dev
*dev
)
663 struct mlx5_lag
*ldev
;
666 mutex_lock(&lag_mutex
);
667 ldev
= mlx5_lag_dev_get(dev
);
668 res
= ldev
&& __mlx5_lag_is_active(ldev
);
669 mutex_unlock(&lag_mutex
);
673 EXPORT_SYMBOL(mlx5_lag_is_active
);
675 bool mlx5_lag_is_sriov(struct mlx5_core_dev
*dev
)
677 struct mlx5_lag
*ldev
;
680 mutex_lock(&lag_mutex
);
681 ldev
= mlx5_lag_dev_get(dev
);
682 res
= ldev
&& __mlx5_lag_is_sriov(ldev
);
683 mutex_unlock(&lag_mutex
);
687 EXPORT_SYMBOL(mlx5_lag_is_sriov
);
689 void mlx5_lag_update(struct mlx5_core_dev
*dev
)
691 struct mlx5_lag
*ldev
;
693 mlx5_dev_list_lock();
694 ldev
= mlx5_lag_dev_get(dev
);
701 mlx5_dev_list_unlock();
704 struct net_device
*mlx5_lag_get_roce_netdev(struct mlx5_core_dev
*dev
)
706 struct net_device
*ndev
= NULL
;
707 struct mlx5_lag
*ldev
;
709 mutex_lock(&lag_mutex
);
710 ldev
= mlx5_lag_dev_get(dev
);
712 if (!(ldev
&& __mlx5_lag_is_roce(ldev
)))
715 if (ldev
->tracker
.tx_type
== NETDEV_LAG_TX_TYPE_ACTIVEBACKUP
) {
716 ndev
= ldev
->tracker
.netdev_state
[0].tx_enabled
?
717 ldev
->pf
[0].netdev
: ldev
->pf
[1].netdev
;
719 ndev
= ldev
->pf
[0].netdev
;
725 mutex_unlock(&lag_mutex
);
729 EXPORT_SYMBOL(mlx5_lag_get_roce_netdev
);
731 bool mlx5_lag_intf_add(struct mlx5_interface
*intf
, struct mlx5_priv
*priv
)
733 struct mlx5_core_dev
*dev
= container_of(priv
, struct mlx5_core_dev
,
735 struct mlx5_lag
*ldev
;
737 if (intf
->protocol
!= MLX5_INTERFACE_PROTOCOL_IB
)
740 ldev
= mlx5_lag_dev_get(dev
);
741 if (!ldev
|| !__mlx5_lag_is_roce(ldev
) || ldev
->pf
[0].dev
== dev
)
744 /* If bonded, we do not add an IB device for PF1. */
748 int mlx5_lag_query_cong_counters(struct mlx5_core_dev
*dev
,
753 int outlen
= MLX5_ST_SZ_BYTES(query_cong_statistics_out
);
754 struct mlx5_core_dev
*mdev
[MLX5_MAX_PORTS
];
755 struct mlx5_lag
*ldev
;
760 out
= kvzalloc(outlen
, GFP_KERNEL
);
764 memset(values
, 0, sizeof(*values
) * num_counters
);
766 mutex_lock(&lag_mutex
);
767 ldev
= mlx5_lag_dev_get(dev
);
768 if (ldev
&& __mlx5_lag_is_roce(ldev
)) {
769 num_ports
= MLX5_MAX_PORTS
;
770 mdev
[0] = ldev
->pf
[0].dev
;
771 mdev
[1] = ldev
->pf
[1].dev
;
777 for (i
= 0; i
< num_ports
; ++i
) {
778 ret
= mlx5_cmd_query_cong_counter(mdev
[i
], false, out
, outlen
);
782 for (j
= 0; j
< num_counters
; ++j
)
783 values
[j
] += be64_to_cpup((__be64
*)(out
+ offsets
[j
]));
787 mutex_unlock(&lag_mutex
);
791 EXPORT_SYMBOL(mlx5_lag_query_cong_counters
);