2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
22 #include <linux/netdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/ethtool.h>
25 #include <linux/rtnetlink.h>
31 #define BNAD_NUM_TXF_COUNTERS 12
32 #define BNAD_NUM_RXF_COUNTERS 10
33 #define BNAD_NUM_CQ_COUNTERS (3 + 5)
34 #define BNAD_NUM_RXQ_COUNTERS 6
35 #define BNAD_NUM_TXQ_COUNTERS 5
37 #define BNAD_ETHTOOL_STATS_NUM \
38 (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
39 sizeof(struct bnad_drv_stats) / sizeof(u64) + \
40 offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
42 static const char *bnad_net_stats_strings
[BNAD_ETHTOOL_STATS_NUM
] = {
64 "tx_heartbeat_errors",
72 "netif_queue_stopped",
82 "tx_skb_mss_too_long",
83 "tx_skb_tso_too_short",
85 "tx_skb_non_tso_too_long",
89 "tx_skb_headlen_too_long",
90 "tx_skb_headlen_zero",
92 "tx_skb_len_mismatch",
99 "rxp_info_alloc_failed",
100 "mbox_intr_disabled",
102 "tx_unmap_q_alloc_failed",
103 "rx_unmap_q_alloc_failed",
104 "rxbuf_alloc_failed",
111 "mac_frame_512_1023",
112 "mac_frame_1024_1518",
113 "mac_frame_1518_1522",
119 "mac_rx_control_frames",
121 "mac_rx_unknown_opcode",
122 "mac_rx_alignment_error",
123 "mac_rx_frame_length_error",
125 "mac_rx_carrier_sense_error",
138 "mac_tx_excessive_deferral",
139 "mac_tx_single_collision",
140 "mac_tx_muliple_collision",
141 "mac_tx_late_collision",
142 "mac_tx_excessive_collision",
143 "mac_tx_total_collision",
144 "mac_tx_pause_honored",
148 "mac_tx_control_frame",
161 "bpc_tx_zero_pause_0",
162 "bpc_tx_zero_pause_1",
163 "bpc_tx_zero_pause_2",
164 "bpc_tx_zero_pause_3",
165 "bpc_tx_zero_pause_4",
166 "bpc_tx_zero_pause_5",
167 "bpc_tx_zero_pause_6",
168 "bpc_tx_zero_pause_7",
169 "bpc_tx_first_pause_0",
170 "bpc_tx_first_pause_1",
171 "bpc_tx_first_pause_2",
172 "bpc_tx_first_pause_3",
173 "bpc_tx_first_pause_4",
174 "bpc_tx_first_pause_5",
175 "bpc_tx_first_pause_6",
176 "bpc_tx_first_pause_7",
186 "bpc_rx_zero_pause_0",
187 "bpc_rx_zero_pause_1",
188 "bpc_rx_zero_pause_2",
189 "bpc_rx_zero_pause_3",
190 "bpc_rx_zero_pause_4",
191 "bpc_rx_zero_pause_5",
192 "bpc_rx_zero_pause_6",
193 "bpc_rx_zero_pause_7",
194 "bpc_rx_first_pause_0",
195 "bpc_rx_first_pause_1",
196 "bpc_rx_first_pause_2",
197 "bpc_rx_first_pause_3",
198 "bpc_rx_first_pause_4",
199 "bpc_rx_first_pause_5",
200 "bpc_rx_first_pause_6",
201 "bpc_rx_first_pause_7",
205 "rad_rx_vlan_frames",
207 "rad_rx_ucast_octets",
210 "rad_rx_mcast_octets",
213 "rad_rx_bcast_octets",
219 "rlb_rad_rx_vlan_frames",
221 "rlb_rad_rx_ucast_octets",
222 "rlb_rad_rx_ucast_vlan",
224 "rlb_rad_rx_mcast_octets",
225 "rlb_rad_rx_mcast_vlan",
227 "rlb_rad_rx_bcast_octets",
228 "rlb_rad_rx_bcast_vlan",
231 "fc_rx_ucast_octets",
234 "fc_rx_mcast_octets",
237 "fc_rx_bcast_octets",
241 "fc_tx_ucast_octets",
244 "fc_tx_mcast_octets",
247 "fc_tx_bcast_octets",
250 "fc_tx_parity_errors",
252 "fc_tx_fid_parity_errors",
256 bnad_get_settings(struct net_device
*netdev
, struct ethtool_cmd
*cmd
)
258 cmd
->supported
= SUPPORTED_10000baseT_Full
;
259 cmd
->advertising
= ADVERTISED_10000baseT_Full
;
260 cmd
->autoneg
= AUTONEG_DISABLE
;
261 cmd
->supported
|= SUPPORTED_FIBRE
;
262 cmd
->advertising
|= ADVERTISED_FIBRE
;
263 cmd
->port
= PORT_FIBRE
;
264 cmd
->phy_address
= 0;
266 if (netif_carrier_ok(netdev
)) {
267 ethtool_cmd_speed_set(cmd
, SPEED_10000
);
268 cmd
->duplex
= DUPLEX_FULL
;
270 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
271 cmd
->duplex
= DUPLEX_UNKNOWN
;
273 cmd
->transceiver
= XCVR_EXTERNAL
;
281 bnad_set_settings(struct net_device
*netdev
, struct ethtool_cmd
*cmd
)
283 /* 10G full duplex setting supported only */
284 if (cmd
->autoneg
== AUTONEG_ENABLE
)
285 return -EOPNOTSUPP
; else {
286 if ((ethtool_cmd_speed(cmd
) == SPEED_10000
)
287 && (cmd
->duplex
== DUPLEX_FULL
))
295 bnad_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*drvinfo
)
297 struct bnad
*bnad
= netdev_priv(netdev
);
298 struct bfa_ioc_attr
*ioc_attr
;
301 strlcpy(drvinfo
->driver
, BNAD_NAME
, sizeof(drvinfo
->driver
));
302 strlcpy(drvinfo
->version
, BNAD_VERSION
, sizeof(drvinfo
->version
));
304 ioc_attr
= kzalloc(sizeof(*ioc_attr
), GFP_KERNEL
);
306 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
307 bfa_nw_ioc_get_attr(&bnad
->bna
.ioceth
.ioc
, ioc_attr
);
308 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
310 strlcpy(drvinfo
->fw_version
, ioc_attr
->adapter_attr
.fw_ver
,
311 sizeof(drvinfo
->fw_version
));
315 strlcpy(drvinfo
->bus_info
, pci_name(bnad
->pcidev
),
316 sizeof(drvinfo
->bus_info
));
320 bnad_get_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wolinfo
)
322 wolinfo
->supported
= 0;
323 wolinfo
->wolopts
= 0;
327 bnad_get_coalesce(struct net_device
*netdev
, struct ethtool_coalesce
*coalesce
)
329 struct bnad
*bnad
= netdev_priv(netdev
);
332 /* Lock rqd. to access bnad->bna_lock */
333 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
334 coalesce
->use_adaptive_rx_coalesce
=
335 (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
) ? true : false;
336 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
338 coalesce
->rx_coalesce_usecs
= bnad
->rx_coalescing_timeo
*
339 BFI_COALESCING_TIMER_UNIT
;
340 coalesce
->tx_coalesce_usecs
= bnad
->tx_coalescing_timeo
*
341 BFI_COALESCING_TIMER_UNIT
;
342 coalesce
->tx_max_coalesced_frames
= BFI_TX_INTERPKT_COUNT
;
348 bnad_set_coalesce(struct net_device
*netdev
, struct ethtool_coalesce
*coalesce
)
350 struct bnad
*bnad
= netdev_priv(netdev
);
354 if (coalesce
->rx_coalesce_usecs
== 0 ||
355 coalesce
->rx_coalesce_usecs
>
356 BFI_MAX_COALESCING_TIMEO
* BFI_COALESCING_TIMER_UNIT
)
359 if (coalesce
->tx_coalesce_usecs
== 0 ||
360 coalesce
->tx_coalesce_usecs
>
361 BFI_MAX_COALESCING_TIMEO
* BFI_COALESCING_TIMER_UNIT
)
364 mutex_lock(&bnad
->conf_mutex
);
366 * Do not need to store rx_coalesce_usecs here
367 * Every time DIM is disabled, we can get it from the
370 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
371 if (coalesce
->use_adaptive_rx_coalesce
) {
372 if (!(bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
)) {
373 bnad
->cfg_flags
|= BNAD_CF_DIM_ENABLED
;
374 bnad_dim_timer_start(bnad
);
377 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
) {
378 bnad
->cfg_flags
&= ~BNAD_CF_DIM_ENABLED
;
379 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
380 test_bit(BNAD_RF_DIM_TIMER_RUNNING
,
382 clear_bit(BNAD_RF_DIM_TIMER_RUNNING
,
386 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
388 del_timer_sync(&bnad
->dim_timer
);
389 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
390 bnad_rx_coalescing_timeo_set(bnad
);
393 if (bnad
->tx_coalescing_timeo
!= coalesce
->tx_coalesce_usecs
/
394 BFI_COALESCING_TIMER_UNIT
) {
395 bnad
->tx_coalescing_timeo
= coalesce
->tx_coalesce_usecs
/
396 BFI_COALESCING_TIMER_UNIT
;
397 bnad_tx_coalescing_timeo_set(bnad
);
400 if (bnad
->rx_coalescing_timeo
!= coalesce
->rx_coalesce_usecs
/
401 BFI_COALESCING_TIMER_UNIT
) {
402 bnad
->rx_coalescing_timeo
= coalesce
->rx_coalesce_usecs
/
403 BFI_COALESCING_TIMER_UNIT
;
405 if (!(bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
))
406 bnad_rx_coalescing_timeo_set(bnad
);
410 /* Add Tx Inter-pkt DMA count? */
412 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
414 mutex_unlock(&bnad
->conf_mutex
);
419 bnad_get_ringparam(struct net_device
*netdev
,
420 struct ethtool_ringparam
*ringparam
)
422 struct bnad
*bnad
= netdev_priv(netdev
);
424 ringparam
->rx_max_pending
= BNAD_MAX_RXQ_DEPTH
;
425 ringparam
->tx_max_pending
= BNAD_MAX_TXQ_DEPTH
;
427 ringparam
->rx_pending
= bnad
->rxq_depth
;
428 ringparam
->tx_pending
= bnad
->txq_depth
;
432 bnad_set_ringparam(struct net_device
*netdev
,
433 struct ethtool_ringparam
*ringparam
)
435 int i
, current_err
, err
= 0;
436 struct bnad
*bnad
= netdev_priv(netdev
);
439 mutex_lock(&bnad
->conf_mutex
);
440 if (ringparam
->rx_pending
== bnad
->rxq_depth
&&
441 ringparam
->tx_pending
== bnad
->txq_depth
) {
442 mutex_unlock(&bnad
->conf_mutex
);
446 if (ringparam
->rx_pending
< BNAD_MIN_Q_DEPTH
||
447 ringparam
->rx_pending
> BNAD_MAX_RXQ_DEPTH
||
448 !is_power_of_2(ringparam
->rx_pending
)) {
449 mutex_unlock(&bnad
->conf_mutex
);
452 if (ringparam
->tx_pending
< BNAD_MIN_Q_DEPTH
||
453 ringparam
->tx_pending
> BNAD_MAX_TXQ_DEPTH
||
454 !is_power_of_2(ringparam
->tx_pending
)) {
455 mutex_unlock(&bnad
->conf_mutex
);
459 if (ringparam
->rx_pending
!= bnad
->rxq_depth
) {
460 bnad
->rxq_depth
= ringparam
->rx_pending
;
461 if (!netif_running(netdev
)) {
462 mutex_unlock(&bnad
->conf_mutex
);
466 for (i
= 0; i
< bnad
->num_rx
; i
++) {
467 if (!bnad
->rx_info
[i
].rx
)
469 bnad_destroy_rx(bnad
, i
);
470 current_err
= bnad_setup_rx(bnad
, i
);
471 if (current_err
&& !err
)
475 if (!err
&& bnad
->rx_info
[0].rx
) {
476 /* restore rx configuration */
477 bnad_restore_vlans(bnad
, 0);
478 bnad_enable_default_bcast(bnad
);
479 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
480 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
481 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
482 bnad
->cfg_flags
&= ~(BNAD_CF_ALLMULTI
|
484 bnad_set_rx_mode(netdev
);
487 if (ringparam
->tx_pending
!= bnad
->txq_depth
) {
488 bnad
->txq_depth
= ringparam
->tx_pending
;
489 if (!netif_running(netdev
)) {
490 mutex_unlock(&bnad
->conf_mutex
);
494 for (i
= 0; i
< bnad
->num_tx
; i
++) {
495 if (!bnad
->tx_info
[i
].tx
)
497 bnad_destroy_tx(bnad
, i
);
498 current_err
= bnad_setup_tx(bnad
, i
);
499 if (current_err
&& !err
)
504 mutex_unlock(&bnad
->conf_mutex
);
509 bnad_get_pauseparam(struct net_device
*netdev
,
510 struct ethtool_pauseparam
*pauseparam
)
512 struct bnad
*bnad
= netdev_priv(netdev
);
514 pauseparam
->autoneg
= 0;
515 pauseparam
->rx_pause
= bnad
->bna
.enet
.pause_config
.rx_pause
;
516 pauseparam
->tx_pause
= bnad
->bna
.enet
.pause_config
.tx_pause
;
520 bnad_set_pauseparam(struct net_device
*netdev
,
521 struct ethtool_pauseparam
*pauseparam
)
523 struct bnad
*bnad
= netdev_priv(netdev
);
524 struct bna_pause_config pause_config
;
527 if (pauseparam
->autoneg
== AUTONEG_ENABLE
)
530 mutex_lock(&bnad
->conf_mutex
);
531 if (pauseparam
->rx_pause
!= bnad
->bna
.enet
.pause_config
.rx_pause
||
532 pauseparam
->tx_pause
!= bnad
->bna
.enet
.pause_config
.tx_pause
) {
533 pause_config
.rx_pause
= pauseparam
->rx_pause
;
534 pause_config
.tx_pause
= pauseparam
->tx_pause
;
535 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
536 bna_enet_pause_config(&bnad
->bna
.enet
, &pause_config
);
537 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
539 mutex_unlock(&bnad
->conf_mutex
);
544 bnad_get_strings(struct net_device
*netdev
, u32 stringset
, u8
*string
)
546 struct bnad
*bnad
= netdev_priv(netdev
);
550 mutex_lock(&bnad
->conf_mutex
);
554 for (i
= 0; i
< BNAD_ETHTOOL_STATS_NUM
; i
++) {
555 BUG_ON(!(strlen(bnad_net_stats_strings
[i
]) <
557 memcpy(string
, bnad_net_stats_strings
[i
],
559 string
+= ETH_GSTRING_LEN
;
561 bmap
= bna_tx_rid_mask(&bnad
->bna
);
562 for (i
= 0; bmap
; i
++) {
564 sprintf(string
, "txf%d_ucast_octets", i
);
565 string
+= ETH_GSTRING_LEN
;
566 sprintf(string
, "txf%d_ucast", i
);
567 string
+= ETH_GSTRING_LEN
;
568 sprintf(string
, "txf%d_ucast_vlan", i
);
569 string
+= ETH_GSTRING_LEN
;
570 sprintf(string
, "txf%d_mcast_octets", i
);
571 string
+= ETH_GSTRING_LEN
;
572 sprintf(string
, "txf%d_mcast", i
);
573 string
+= ETH_GSTRING_LEN
;
574 sprintf(string
, "txf%d_mcast_vlan", i
);
575 string
+= ETH_GSTRING_LEN
;
576 sprintf(string
, "txf%d_bcast_octets", i
);
577 string
+= ETH_GSTRING_LEN
;
578 sprintf(string
, "txf%d_bcast", i
);
579 string
+= ETH_GSTRING_LEN
;
580 sprintf(string
, "txf%d_bcast_vlan", i
);
581 string
+= ETH_GSTRING_LEN
;
582 sprintf(string
, "txf%d_errors", i
);
583 string
+= ETH_GSTRING_LEN
;
584 sprintf(string
, "txf%d_filter_vlan", i
);
585 string
+= ETH_GSTRING_LEN
;
586 sprintf(string
, "txf%d_filter_mac_sa", i
);
587 string
+= ETH_GSTRING_LEN
;
592 bmap
= bna_rx_rid_mask(&bnad
->bna
);
593 for (i
= 0; bmap
; i
++) {
595 sprintf(string
, "rxf%d_ucast_octets", i
);
596 string
+= ETH_GSTRING_LEN
;
597 sprintf(string
, "rxf%d_ucast", i
);
598 string
+= ETH_GSTRING_LEN
;
599 sprintf(string
, "rxf%d_ucast_vlan", i
);
600 string
+= ETH_GSTRING_LEN
;
601 sprintf(string
, "rxf%d_mcast_octets", i
);
602 string
+= ETH_GSTRING_LEN
;
603 sprintf(string
, "rxf%d_mcast", i
);
604 string
+= ETH_GSTRING_LEN
;
605 sprintf(string
, "rxf%d_mcast_vlan", i
);
606 string
+= ETH_GSTRING_LEN
;
607 sprintf(string
, "rxf%d_bcast_octets", i
);
608 string
+= ETH_GSTRING_LEN
;
609 sprintf(string
, "rxf%d_bcast", i
);
610 string
+= ETH_GSTRING_LEN
;
611 sprintf(string
, "rxf%d_bcast_vlan", i
);
612 string
+= ETH_GSTRING_LEN
;
613 sprintf(string
, "rxf%d_frame_drops", i
);
614 string
+= ETH_GSTRING_LEN
;
620 for (i
= 0; i
< bnad
->num_rx
; i
++) {
621 if (!bnad
->rx_info
[i
].rx
)
623 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
624 sprintf(string
, "cq%d_producer_index", q_num
);
625 string
+= ETH_GSTRING_LEN
;
626 sprintf(string
, "cq%d_consumer_index", q_num
);
627 string
+= ETH_GSTRING_LEN
;
628 sprintf(string
, "cq%d_hw_producer_index",
630 string
+= ETH_GSTRING_LEN
;
631 sprintf(string
, "cq%d_intr", q_num
);
632 string
+= ETH_GSTRING_LEN
;
633 sprintf(string
, "cq%d_poll", q_num
);
634 string
+= ETH_GSTRING_LEN
;
635 sprintf(string
, "cq%d_schedule", q_num
);
636 string
+= ETH_GSTRING_LEN
;
637 sprintf(string
, "cq%d_keep_poll", q_num
);
638 string
+= ETH_GSTRING_LEN
;
639 sprintf(string
, "cq%d_complete", q_num
);
640 string
+= ETH_GSTRING_LEN
;
646 for (i
= 0; i
< bnad
->num_rx
; i
++) {
647 if (!bnad
->rx_info
[i
].rx
)
649 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
650 sprintf(string
, "rxq%d_packets", q_num
);
651 string
+= ETH_GSTRING_LEN
;
652 sprintf(string
, "rxq%d_bytes", q_num
);
653 string
+= ETH_GSTRING_LEN
;
654 sprintf(string
, "rxq%d_packets_with_error",
656 string
+= ETH_GSTRING_LEN
;
657 sprintf(string
, "rxq%d_allocbuf_failed", q_num
);
658 string
+= ETH_GSTRING_LEN
;
659 sprintf(string
, "rxq%d_producer_index", q_num
);
660 string
+= ETH_GSTRING_LEN
;
661 sprintf(string
, "rxq%d_consumer_index", q_num
);
662 string
+= ETH_GSTRING_LEN
;
664 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
&&
665 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
667 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
669 sprintf(string
, "rxq%d_packets", q_num
);
670 string
+= ETH_GSTRING_LEN
;
671 sprintf(string
, "rxq%d_bytes", q_num
);
672 string
+= ETH_GSTRING_LEN
;
674 "rxq%d_packets_with_error", q_num
);
675 string
+= ETH_GSTRING_LEN
;
676 sprintf(string
, "rxq%d_allocbuf_failed",
678 string
+= ETH_GSTRING_LEN
;
679 sprintf(string
, "rxq%d_producer_index",
681 string
+= ETH_GSTRING_LEN
;
682 sprintf(string
, "rxq%d_consumer_index",
684 string
+= ETH_GSTRING_LEN
;
691 for (i
= 0; i
< bnad
->num_tx
; i
++) {
692 if (!bnad
->tx_info
[i
].tx
)
694 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
695 sprintf(string
, "txq%d_packets", q_num
);
696 string
+= ETH_GSTRING_LEN
;
697 sprintf(string
, "txq%d_bytes", q_num
);
698 string
+= ETH_GSTRING_LEN
;
699 sprintf(string
, "txq%d_producer_index", q_num
);
700 string
+= ETH_GSTRING_LEN
;
701 sprintf(string
, "txq%d_consumer_index", q_num
);
702 string
+= ETH_GSTRING_LEN
;
703 sprintf(string
, "txq%d_hw_consumer_index",
705 string
+= ETH_GSTRING_LEN
;
716 mutex_unlock(&bnad
->conf_mutex
);
720 bnad_get_stats_count_locked(struct net_device
*netdev
)
722 struct bnad
*bnad
= netdev_priv(netdev
);
723 int i
, j
, count
= 0, rxf_active_num
= 0, txf_active_num
= 0;
726 bmap
= bna_tx_rid_mask(&bnad
->bna
);
727 for (i
= 0; bmap
; i
++) {
732 bmap
= bna_rx_rid_mask(&bnad
->bna
);
733 for (i
= 0; bmap
; i
++) {
738 count
= BNAD_ETHTOOL_STATS_NUM
+
739 txf_active_num
* BNAD_NUM_TXF_COUNTERS
+
740 rxf_active_num
* BNAD_NUM_RXF_COUNTERS
;
742 for (i
= 0; i
< bnad
->num_rx
; i
++) {
743 if (!bnad
->rx_info
[i
].rx
)
745 count
+= bnad
->num_rxp_per_rx
* BNAD_NUM_CQ_COUNTERS
;
746 count
+= bnad
->num_rxp_per_rx
* BNAD_NUM_RXQ_COUNTERS
;
747 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++)
748 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
&&
749 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
750 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1]->rxq
)
751 count
+= BNAD_NUM_RXQ_COUNTERS
;
754 for (i
= 0; i
< bnad
->num_tx
; i
++) {
755 if (!bnad
->tx_info
[i
].tx
)
757 count
+= bnad
->num_txq_per_tx
* BNAD_NUM_TXQ_COUNTERS
;
763 bnad_per_q_stats_fill(struct bnad
*bnad
, u64
*buf
, int bi
)
766 struct bna_rcb
*rcb
= NULL
;
767 struct bna_tcb
*tcb
= NULL
;
769 for (i
= 0; i
< bnad
->num_rx
; i
++) {
770 if (!bnad
->rx_info
[i
].rx
)
772 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++)
773 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
&&
774 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[0] &&
775 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[0]->rxq
) {
776 buf
[bi
++] = bnad
->rx_info
[i
].rx_ctrl
[j
].
778 buf
[bi
++] = 0; /* ccb->consumer_index */
779 buf
[bi
++] = *(bnad
->rx_info
[i
].rx_ctrl
[j
].
780 ccb
->hw_producer_index
);
782 buf
[bi
++] = bnad
->rx_info
[i
].
783 rx_ctrl
[j
].rx_intr_ctr
;
784 buf
[bi
++] = bnad
->rx_info
[i
].
785 rx_ctrl
[j
].rx_poll_ctr
;
786 buf
[bi
++] = bnad
->rx_info
[i
].
787 rx_ctrl
[j
].rx_schedule
;
788 buf
[bi
++] = bnad
->rx_info
[i
].
789 rx_ctrl
[j
].rx_keep_poll
;
790 buf
[bi
++] = bnad
->rx_info
[i
].
791 rx_ctrl
[j
].rx_complete
;
794 for (i
= 0; i
< bnad
->num_rx
; i
++) {
795 if (!bnad
->rx_info
[i
].rx
)
797 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++)
798 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
) {
799 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[0] &&
800 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
802 rcb
= bnad
->rx_info
[i
].rx_ctrl
[j
].
804 buf
[bi
++] = rcb
->rxq
->rx_packets
;
805 buf
[bi
++] = rcb
->rxq
->rx_bytes
;
806 buf
[bi
++] = rcb
->rxq
->
807 rx_packets_with_error
;
808 buf
[bi
++] = rcb
->rxq
->
810 buf
[bi
++] = rcb
->producer_index
;
811 buf
[bi
++] = rcb
->consumer_index
;
813 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
814 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
816 rcb
= bnad
->rx_info
[i
].rx_ctrl
[j
].
818 buf
[bi
++] = rcb
->rxq
->rx_packets
;
819 buf
[bi
++] = rcb
->rxq
->rx_bytes
;
820 buf
[bi
++] = rcb
->rxq
->
821 rx_packets_with_error
;
822 buf
[bi
++] = rcb
->rxq
->
824 buf
[bi
++] = rcb
->producer_index
;
825 buf
[bi
++] = rcb
->consumer_index
;
830 for (i
= 0; i
< bnad
->num_tx
; i
++) {
831 if (!bnad
->tx_info
[i
].tx
)
833 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++)
834 if (bnad
->tx_info
[i
].tcb
[j
] &&
835 bnad
->tx_info
[i
].tcb
[j
]->txq
) {
836 tcb
= bnad
->tx_info
[i
].tcb
[j
];
837 buf
[bi
++] = tcb
->txq
->tx_packets
;
838 buf
[bi
++] = tcb
->txq
->tx_bytes
;
839 buf
[bi
++] = tcb
->producer_index
;
840 buf
[bi
++] = tcb
->consumer_index
;
841 buf
[bi
++] = *(tcb
->hw_consumer_index
);
849 bnad_get_ethtool_stats(struct net_device
*netdev
, struct ethtool_stats
*stats
,
852 struct bnad
*bnad
= netdev_priv(netdev
);
855 struct rtnl_link_stats64
*net_stats64
;
859 mutex_lock(&bnad
->conf_mutex
);
860 if (bnad_get_stats_count_locked(netdev
) != stats
->n_stats
) {
861 mutex_unlock(&bnad
->conf_mutex
);
866 * Used bna_lock to sync reads from bna_stats, which is written
867 * under the same lock
869 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
871 memset(buf
, 0, stats
->n_stats
* sizeof(u64
));
873 net_stats64
= (struct rtnl_link_stats64
*)buf
;
874 bnad_netdev_qstats_fill(bnad
, net_stats64
);
875 bnad_netdev_hwstats_fill(bnad
, net_stats64
);
877 bi
= sizeof(*net_stats64
) / sizeof(u64
);
879 /* Get netif_queue_stopped from stack */
880 bnad
->stats
.drv_stats
.netif_queue_stopped
= netif_queue_stopped(netdev
);
882 /* Fill driver stats into ethtool buffers */
883 stats64
= (u64
*)&bnad
->stats
.drv_stats
;
884 for (i
= 0; i
< sizeof(struct bnad_drv_stats
) / sizeof(u64
); i
++)
885 buf
[bi
++] = stats64
[i
];
887 /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
888 stats64
= (u64
*) &bnad
->stats
.bna_stats
->hw_stats
;
890 i
< offsetof(struct bfi_enet_stats
, rxf_stats
[0]) /
893 buf
[bi
++] = stats64
[i
];
895 /* Fill txf stats into ethtool buffers */
896 bmap
= bna_tx_rid_mask(&bnad
->bna
);
897 for (i
= 0; bmap
; i
++) {
899 stats64
= (u64
*)&bnad
->stats
.bna_stats
->
900 hw_stats
.txf_stats
[i
];
901 for (j
= 0; j
< sizeof(struct bfi_enet_stats_txf
) /
903 buf
[bi
++] = stats64
[j
];
908 /* Fill rxf stats into ethtool buffers */
909 bmap
= bna_rx_rid_mask(&bnad
->bna
);
910 for (i
= 0; bmap
; i
++) {
912 stats64
= (u64
*)&bnad
->stats
.bna_stats
->
913 hw_stats
.rxf_stats
[i
];
914 for (j
= 0; j
< sizeof(struct bfi_enet_stats_rxf
) /
916 buf
[bi
++] = stats64
[j
];
921 /* Fill per Q stats into ethtool buffers */
922 bi
= bnad_per_q_stats_fill(bnad
, buf
, bi
);
924 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
926 mutex_unlock(&bnad
->conf_mutex
);
930 bnad_get_sset_count(struct net_device
*netdev
, int sset
)
934 return bnad_get_stats_count_locked(netdev
);
941 bnad_get_flash_partition_by_offset(struct bnad
*bnad
, u32 offset
,
944 struct bfa_flash_attr
*flash_attr
;
945 struct bnad_iocmd_comp fcomp
;
946 u32 i
, flash_part
= 0, ret
;
947 unsigned long flags
= 0;
949 flash_attr
= kzalloc(sizeof(struct bfa_flash_attr
), GFP_KERNEL
);
954 fcomp
.comp_status
= 0;
956 init_completion(&fcomp
.comp
);
957 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
958 ret
= bfa_nw_flash_get_attr(&bnad
->bna
.flash
, flash_attr
,
959 bnad_cb_completion
, &fcomp
);
960 if (ret
!= BFA_STATUS_OK
) {
961 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
965 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
966 wait_for_completion(&fcomp
.comp
);
967 ret
= fcomp
.comp_status
;
969 /* Check for the flash type & base offset value */
970 if (ret
== BFA_STATUS_OK
) {
971 for (i
= 0; i
< flash_attr
->npart
; i
++) {
972 if (offset
>= flash_attr
->part
[i
].part_off
&&
973 offset
< (flash_attr
->part
[i
].part_off
+
974 flash_attr
->part
[i
].part_size
)) {
975 flash_part
= flash_attr
->part
[i
].part_type
;
976 *base_offset
= flash_attr
->part
[i
].part_off
;
986 bnad_get_eeprom_len(struct net_device
*netdev
)
988 return BFA_TOTAL_FLASH_SIZE
;
992 bnad_get_eeprom(struct net_device
*netdev
, struct ethtool_eeprom
*eeprom
,
995 struct bnad
*bnad
= netdev_priv(netdev
);
996 struct bnad_iocmd_comp fcomp
;
997 u32 flash_part
= 0, base_offset
= 0;
998 unsigned long flags
= 0;
1001 /* Fill the magic value */
1002 eeprom
->magic
= bnad
->pcidev
->vendor
| (bnad
->pcidev
->device
<< 16);
1004 /* Query the flash partition based on the offset */
1005 flash_part
= bnad_get_flash_partition_by_offset(bnad
,
1006 eeprom
->offset
, &base_offset
);
1007 if (flash_part
== 0)
1011 fcomp
.comp_status
= 0;
1013 init_completion(&fcomp
.comp
);
1014 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1015 ret
= bfa_nw_flash_read_part(&bnad
->bna
.flash
, flash_part
,
1016 bnad
->id
, bytes
, eeprom
->len
,
1017 eeprom
->offset
- base_offset
,
1018 bnad_cb_completion
, &fcomp
);
1019 if (ret
!= BFA_STATUS_OK
) {
1020 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1024 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1025 wait_for_completion(&fcomp
.comp
);
1026 ret
= fcomp
.comp_status
;
1032 bnad_set_eeprom(struct net_device
*netdev
, struct ethtool_eeprom
*eeprom
,
1035 struct bnad
*bnad
= netdev_priv(netdev
);
1036 struct bnad_iocmd_comp fcomp
;
1037 u32 flash_part
= 0, base_offset
= 0;
1038 unsigned long flags
= 0;
1041 /* Check if the flash update request is valid */
1042 if (eeprom
->magic
!= (bnad
->pcidev
->vendor
|
1043 (bnad
->pcidev
->device
<< 16)))
1046 /* Query the flash partition based on the offset */
1047 flash_part
= bnad_get_flash_partition_by_offset(bnad
,
1048 eeprom
->offset
, &base_offset
);
1049 if (flash_part
== 0)
1053 fcomp
.comp_status
= 0;
1055 init_completion(&fcomp
.comp
);
1056 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1057 ret
= bfa_nw_flash_update_part(&bnad
->bna
.flash
, flash_part
,
1058 bnad
->id
, bytes
, eeprom
->len
,
1059 eeprom
->offset
- base_offset
,
1060 bnad_cb_completion
, &fcomp
);
1061 if (ret
!= BFA_STATUS_OK
) {
1062 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1066 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1067 wait_for_completion(&fcomp
.comp
);
1068 ret
= fcomp
.comp_status
;
1074 bnad_flash_device(struct net_device
*netdev
, struct ethtool_flash
*eflash
)
1076 struct bnad
*bnad
= netdev_priv(netdev
);
1077 struct bnad_iocmd_comp fcomp
;
1078 const struct firmware
*fw
;
1081 ret
= request_firmware(&fw
, eflash
->data
, &bnad
->pcidev
->dev
);
1083 netdev_err(netdev
, "can't load firmware %s\n", eflash
->data
);
1088 fcomp
.comp_status
= 0;
1090 init_completion(&fcomp
.comp
);
1091 spin_lock_irq(&bnad
->bna_lock
);
1092 ret
= bfa_nw_flash_update_part(&bnad
->bna
.flash
, BFA_FLASH_PART_FWIMG
,
1093 bnad
->id
, (u8
*)fw
->data
, fw
->size
, 0,
1094 bnad_cb_completion
, &fcomp
);
1095 if (ret
!= BFA_STATUS_OK
) {
1096 netdev_warn(netdev
, "flash update failed with err=%d\n", ret
);
1098 spin_unlock_irq(&bnad
->bna_lock
);
1102 spin_unlock_irq(&bnad
->bna_lock
);
1103 wait_for_completion(&fcomp
.comp
);
1104 if (fcomp
.comp_status
!= BFA_STATUS_OK
) {
1107 "firmware image update failed with err=%d\n",
1111 release_firmware(fw
);
1115 static const struct ethtool_ops bnad_ethtool_ops
= {
1116 .get_settings
= bnad_get_settings
,
1117 .set_settings
= bnad_set_settings
,
1118 .get_drvinfo
= bnad_get_drvinfo
,
1119 .get_wol
= bnad_get_wol
,
1120 .get_link
= ethtool_op_get_link
,
1121 .get_coalesce
= bnad_get_coalesce
,
1122 .set_coalesce
= bnad_set_coalesce
,
1123 .get_ringparam
= bnad_get_ringparam
,
1124 .set_ringparam
= bnad_set_ringparam
,
1125 .get_pauseparam
= bnad_get_pauseparam
,
1126 .set_pauseparam
= bnad_set_pauseparam
,
1127 .get_strings
= bnad_get_strings
,
1128 .get_ethtool_stats
= bnad_get_ethtool_stats
,
1129 .get_sset_count
= bnad_get_sset_count
,
1130 .get_eeprom_len
= bnad_get_eeprom_len
,
1131 .get_eeprom
= bnad_get_eeprom
,
1132 .set_eeprom
= bnad_set_eeprom
,
1133 .flash_device
= bnad_flash_device
,
1134 .get_ts_info
= ethtool_op_get_ts_info
,
1138 bnad_set_ethtool_ops(struct net_device
*netdev
)
1140 netdev
->ethtool_ops
= &bnad_ethtool_ops
;