2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
34 static DEFINE_MUTEX(bnad_fwimg_mutex
);
39 static uint bnad_msix_disable
;
40 module_param(bnad_msix_disable
, uint
, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable
, "Disable MSIX mode");
43 static uint bnad_ioc_auto_recover
= 1;
44 module_param(bnad_ioc_auto_recover
, uint
, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover
, "Enable / Disable auto recovery");
47 static uint bna_debugfs_enable
= 1;
48 module_param(bna_debugfs_enable
, uint
, S_IRUGO
| S_IWUSR
);
49 MODULE_PARM_DESC(bna_debugfs_enable
, "Enables debugfs feature, default=1,"
50 " Range[false:0|true:1]");
55 static u32 bnad_rxqs_per_cq
= 2;
57 static struct mutex bnad_list_mutex
;
58 static LIST_HEAD(bnad_list
);
59 static const u8 bnad_bcast_addr
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
64 #define BNAD_GET_MBOX_IRQ(_bnad) \
65 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
66 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
67 ((_bnad)->pcidev->irq))
69 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
71 (_res_info)->res_type = BNA_RES_T_MEM; \
72 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
73 (_res_info)->res_u.mem_info.num = (_num); \
74 (_res_info)->res_u.mem_info.len = (_size); \
78 bnad_add_to_list(struct bnad
*bnad
)
80 mutex_lock(&bnad_list_mutex
);
81 list_add_tail(&bnad
->list_entry
, &bnad_list
);
83 mutex_unlock(&bnad_list_mutex
);
87 bnad_remove_from_list(struct bnad
*bnad
)
89 mutex_lock(&bnad_list_mutex
);
90 list_del(&bnad
->list_entry
);
91 mutex_unlock(&bnad_list_mutex
);
95 * Reinitialize completions in CQ, once Rx is taken down
98 bnad_cq_cleanup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
100 struct bna_cq_entry
*cmpl
;
103 for (i
= 0; i
< ccb
->q_depth
; i
++) {
104 cmpl
= &((struct bna_cq_entry
*)ccb
->sw_q
)[i
];
109 /* Tx Datapath functions */
112 /* Caller should ensure that the entry at unmap_q[index] is valid */
114 bnad_tx_buff_unmap(struct bnad
*bnad
,
115 struct bnad_tx_unmap
*unmap_q
,
116 u32 q_depth
, u32 index
)
118 struct bnad_tx_unmap
*unmap
;
122 unmap
= &unmap_q
[index
];
123 nvecs
= unmap
->nvecs
;
128 dma_unmap_single(&bnad
->pcidev
->dev
,
129 dma_unmap_addr(&unmap
->vectors
[0], dma_addr
),
130 skb_headlen(skb
), DMA_TO_DEVICE
);
131 dma_unmap_addr_set(&unmap
->vectors
[0], dma_addr
, 0);
137 if (vector
== BFI_TX_MAX_VECTORS_PER_WI
) {
139 BNA_QE_INDX_INC(index
, q_depth
);
140 unmap
= &unmap_q
[index
];
143 dma_unmap_page(&bnad
->pcidev
->dev
,
144 dma_unmap_addr(&unmap
->vectors
[vector
], dma_addr
),
145 dma_unmap_len(&unmap
->vectors
[vector
], dma_len
),
147 dma_unmap_addr_set(&unmap
->vectors
[vector
], dma_addr
, 0);
151 BNA_QE_INDX_INC(index
, q_depth
);
157 * Frees all pending Tx Bufs
158 * At this point no activity is expected on the Q,
159 * so DMA unmap & freeing is fine.
162 bnad_txq_cleanup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
164 struct bnad_tx_unmap
*unmap_q
= tcb
->unmap_q
;
168 for (i
= 0; i
< tcb
->q_depth
; i
++) {
169 skb
= unmap_q
[i
].skb
;
172 bnad_tx_buff_unmap(bnad
, unmap_q
, tcb
->q_depth
, i
);
174 dev_kfree_skb_any(skb
);
179 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
180 * Can be called in a) Interrupt context
184 bnad_txcmpl_process(struct bnad
*bnad
, struct bna_tcb
*tcb
)
186 u32 sent_packets
= 0, sent_bytes
= 0;
187 u32 wis
, unmap_wis
, hw_cons
, cons
, q_depth
;
188 struct bnad_tx_unmap
*unmap_q
= tcb
->unmap_q
;
189 struct bnad_tx_unmap
*unmap
;
192 /* Just return if TX is stopped */
193 if (!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
196 hw_cons
= *(tcb
->hw_consumer_index
);
197 cons
= tcb
->consumer_index
;
198 q_depth
= tcb
->q_depth
;
200 wis
= BNA_Q_INDEX_CHANGE(cons
, hw_cons
, q_depth
);
201 BUG_ON(!(wis
<= BNA_QE_IN_USE_CNT(tcb
, tcb
->q_depth
)));
204 unmap
= &unmap_q
[cons
];
209 sent_bytes
+= skb
->len
;
211 unmap_wis
= BNA_TXQ_WI_NEEDED(unmap
->nvecs
);
214 cons
= bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
, cons
);
215 dev_kfree_skb_any(skb
);
218 /* Update consumer pointers. */
219 tcb
->consumer_index
= hw_cons
;
221 tcb
->txq
->tx_packets
+= sent_packets
;
222 tcb
->txq
->tx_bytes
+= sent_bytes
;
228 bnad_tx_complete(struct bnad
*bnad
, struct bna_tcb
*tcb
)
230 struct net_device
*netdev
= bnad
->netdev
;
233 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
236 sent
= bnad_txcmpl_process(bnad
, tcb
);
238 if (netif_queue_stopped(netdev
) &&
239 netif_carrier_ok(netdev
) &&
240 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
241 BNAD_NETIF_WAKE_THRESHOLD
) {
242 if (test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)) {
243 netif_wake_queue(netdev
);
244 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
249 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
250 bna_ib_ack(tcb
->i_dbell
, sent
);
252 smp_mb__before_atomic();
253 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
258 /* MSIX Tx Completion Handler */
260 bnad_msix_tx(int irq
, void *data
)
262 struct bna_tcb
*tcb
= (struct bna_tcb
*)data
;
263 struct bnad
*bnad
= tcb
->bnad
;
265 bnad_tx_complete(bnad
, tcb
);
271 bnad_rxq_alloc_uninit(struct bnad
*bnad
, struct bna_rcb
*rcb
)
273 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
275 unmap_q
->reuse_pi
= -1;
276 unmap_q
->alloc_order
= -1;
277 unmap_q
->map_size
= 0;
278 unmap_q
->type
= BNAD_RXBUF_NONE
;
281 /* Default is page-based allocation. Multi-buffer support - TBD */
283 bnad_rxq_alloc_init(struct bnad
*bnad
, struct bna_rcb
*rcb
)
285 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
288 bnad_rxq_alloc_uninit(bnad
, rcb
);
290 order
= get_order(rcb
->rxq
->buffer_size
);
292 unmap_q
->type
= BNAD_RXBUF_PAGE
;
294 if (bna_is_small_rxq(rcb
->id
)) {
295 unmap_q
->alloc_order
= 0;
296 unmap_q
->map_size
= rcb
->rxq
->buffer_size
;
298 if (rcb
->rxq
->multi_buffer
) {
299 unmap_q
->alloc_order
= 0;
300 unmap_q
->map_size
= rcb
->rxq
->buffer_size
;
301 unmap_q
->type
= BNAD_RXBUF_MULTI_BUFF
;
303 unmap_q
->alloc_order
= order
;
305 (rcb
->rxq
->buffer_size
> 2048) ?
306 PAGE_SIZE
<< order
: 2048;
310 BUG_ON(((PAGE_SIZE
<< order
) % unmap_q
->map_size
));
316 bnad_rxq_cleanup_page(struct bnad
*bnad
, struct bnad_rx_unmap
*unmap
)
321 dma_unmap_page(&bnad
->pcidev
->dev
,
322 dma_unmap_addr(&unmap
->vector
, dma_addr
),
323 unmap
->vector
.len
, DMA_FROM_DEVICE
);
324 put_page(unmap
->page
);
326 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, 0);
327 unmap
->vector
.len
= 0;
331 bnad_rxq_cleanup_skb(struct bnad
*bnad
, struct bnad_rx_unmap
*unmap
)
336 dma_unmap_single(&bnad
->pcidev
->dev
,
337 dma_unmap_addr(&unmap
->vector
, dma_addr
),
338 unmap
->vector
.len
, DMA_FROM_DEVICE
);
339 dev_kfree_skb_any(unmap
->skb
);
341 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, 0);
342 unmap
->vector
.len
= 0;
346 bnad_rxq_cleanup(struct bnad
*bnad
, struct bna_rcb
*rcb
)
348 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
351 for (i
= 0; i
< rcb
->q_depth
; i
++) {
352 struct bnad_rx_unmap
*unmap
= &unmap_q
->unmap
[i
];
354 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
355 bnad_rxq_cleanup_skb(bnad
, unmap
);
357 bnad_rxq_cleanup_page(bnad
, unmap
);
359 bnad_rxq_alloc_uninit(bnad
, rcb
);
363 bnad_rxq_refill_page(struct bnad
*bnad
, struct bna_rcb
*rcb
, u32 nalloc
)
365 u32 alloced
, prod
, q_depth
;
366 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
367 struct bnad_rx_unmap
*unmap
, *prev
;
368 struct bna_rxq_entry
*rxent
;
370 u32 page_offset
, alloc_size
;
373 prod
= rcb
->producer_index
;
374 q_depth
= rcb
->q_depth
;
376 alloc_size
= PAGE_SIZE
<< unmap_q
->alloc_order
;
380 unmap
= &unmap_q
->unmap
[prod
];
382 if (unmap_q
->reuse_pi
< 0) {
383 page
= alloc_pages(GFP_ATOMIC
| __GFP_COMP
,
384 unmap_q
->alloc_order
);
387 prev
= &unmap_q
->unmap
[unmap_q
->reuse_pi
];
389 page_offset
= prev
->page_offset
+ unmap_q
->map_size
;
393 if (unlikely(!page
)) {
394 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
395 rcb
->rxq
->rxbuf_alloc_failed
++;
399 dma_addr
= dma_map_page(&bnad
->pcidev
->dev
, page
, page_offset
,
400 unmap_q
->map_size
, DMA_FROM_DEVICE
);
403 unmap
->page_offset
= page_offset
;
404 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, dma_addr
);
405 unmap
->vector
.len
= unmap_q
->map_size
;
406 page_offset
+= unmap_q
->map_size
;
408 if (page_offset
< alloc_size
)
409 unmap_q
->reuse_pi
= prod
;
411 unmap_q
->reuse_pi
= -1;
413 rxent
= &((struct bna_rxq_entry
*)rcb
->sw_q
)[prod
];
414 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
415 BNA_QE_INDX_INC(prod
, q_depth
);
420 if (likely(alloced
)) {
421 rcb
->producer_index
= prod
;
423 if (likely(test_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
)))
424 bna_rxq_prod_indx_doorbell(rcb
);
431 bnad_rxq_refill_skb(struct bnad
*bnad
, struct bna_rcb
*rcb
, u32 nalloc
)
433 u32 alloced
, prod
, q_depth
, buff_sz
;
434 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
435 struct bnad_rx_unmap
*unmap
;
436 struct bna_rxq_entry
*rxent
;
440 buff_sz
= rcb
->rxq
->buffer_size
;
441 prod
= rcb
->producer_index
;
442 q_depth
= rcb
->q_depth
;
446 unmap
= &unmap_q
->unmap
[prod
];
448 skb
= netdev_alloc_skb_ip_align(bnad
->netdev
, buff_sz
);
450 if (unlikely(!skb
)) {
451 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
452 rcb
->rxq
->rxbuf_alloc_failed
++;
455 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
456 buff_sz
, DMA_FROM_DEVICE
);
459 dma_unmap_addr_set(&unmap
->vector
, dma_addr
, dma_addr
);
460 unmap
->vector
.len
= buff_sz
;
462 rxent
= &((struct bna_rxq_entry
*)rcb
->sw_q
)[prod
];
463 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
464 BNA_QE_INDX_INC(prod
, q_depth
);
469 if (likely(alloced
)) {
470 rcb
->producer_index
= prod
;
472 if (likely(test_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
)))
473 bna_rxq_prod_indx_doorbell(rcb
);
480 bnad_rxq_post(struct bnad
*bnad
, struct bna_rcb
*rcb
)
482 struct bnad_rx_unmap_q
*unmap_q
= rcb
->unmap_q
;
485 to_alloc
= BNA_QE_FREE_CNT(rcb
, rcb
->q_depth
);
486 if (!(to_alloc
>> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
))
489 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
490 bnad_rxq_refill_skb(bnad
, rcb
, to_alloc
);
492 bnad_rxq_refill_page(bnad
, rcb
, to_alloc
);
495 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
497 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
498 BNA_CQ_EF_L4_CKSUM_OK)
500 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
501 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
502 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
503 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
504 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
505 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
506 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
507 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
510 bnad_cq_drop_packet(struct bnad
*bnad
, struct bna_rcb
*rcb
,
511 u32 sop_ci
, u32 nvecs
)
513 struct bnad_rx_unmap_q
*unmap_q
;
514 struct bnad_rx_unmap
*unmap
;
517 unmap_q
= rcb
->unmap_q
;
518 for (vec
= 0, ci
= sop_ci
; vec
< nvecs
; vec
++) {
519 unmap
= &unmap_q
->unmap
[ci
];
520 BNA_QE_INDX_INC(ci
, rcb
->q_depth
);
522 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
523 bnad_rxq_cleanup_skb(bnad
, unmap
);
525 bnad_rxq_cleanup_page(bnad
, unmap
);
530 bnad_cq_setup_skb_frags(struct bna_rcb
*rcb
, struct sk_buff
*skb
,
531 u32 sop_ci
, u32 nvecs
, u32 last_fraglen
)
534 u32 ci
, vec
, len
, totlen
= 0;
535 struct bnad_rx_unmap_q
*unmap_q
;
536 struct bnad_rx_unmap
*unmap
;
538 unmap_q
= rcb
->unmap_q
;
541 /* prefetch header */
542 prefetch(page_address(unmap_q
->unmap
[sop_ci
].page
) +
543 unmap_q
->unmap
[sop_ci
].page_offset
);
545 for (vec
= 1, ci
= sop_ci
; vec
<= nvecs
; vec
++) {
546 unmap
= &unmap_q
->unmap
[ci
];
547 BNA_QE_INDX_INC(ci
, rcb
->q_depth
);
549 dma_unmap_page(&bnad
->pcidev
->dev
,
550 dma_unmap_addr(&unmap
->vector
, dma_addr
),
551 unmap
->vector
.len
, DMA_FROM_DEVICE
);
553 len
= (vec
== nvecs
) ?
554 last_fraglen
: unmap
->vector
.len
;
557 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
558 unmap
->page
, unmap
->page_offset
, len
);
561 unmap
->vector
.len
= 0;
565 skb
->data_len
+= totlen
;
566 skb
->truesize
+= totlen
;
570 bnad_cq_setup_skb(struct bnad
*bnad
, struct sk_buff
*skb
,
571 struct bnad_rx_unmap
*unmap
, u32 len
)
575 dma_unmap_single(&bnad
->pcidev
->dev
,
576 dma_unmap_addr(&unmap
->vector
, dma_addr
),
577 unmap
->vector
.len
, DMA_FROM_DEVICE
);
580 skb
->protocol
= eth_type_trans(skb
, bnad
->netdev
);
583 unmap
->vector
.len
= 0;
587 bnad_cq_process(struct bnad
*bnad
, struct bna_ccb
*ccb
, int budget
)
589 struct bna_cq_entry
*cq
, *cmpl
, *next_cmpl
;
590 struct bna_rcb
*rcb
= NULL
;
591 struct bnad_rx_unmap_q
*unmap_q
;
592 struct bnad_rx_unmap
*unmap
= NULL
;
593 struct sk_buff
*skb
= NULL
;
594 struct bna_pkt_rate
*pkt_rt
= &ccb
->pkt_rate
;
595 struct bnad_rx_ctrl
*rx_ctrl
= ccb
->ctrl
;
596 u32 packets
= 0, len
= 0, totlen
= 0;
597 u32 pi
, vec
, sop_ci
= 0, nvecs
= 0;
598 u32 flags
, masked_flags
;
600 prefetch(bnad
->netdev
);
604 while (packets
< budget
) {
605 cmpl
= &cq
[ccb
->producer_index
];
608 /* The 'valid' field is set by the adapter, only after writing
609 * the other fields of completion entry. Hence, do not load
610 * other fields of completion entry *before* the 'valid' is
611 * loaded. Adding the rmb() here prevents the compiler and/or
612 * CPU from reordering the reads which would potentially result
613 * in reading stale values in completion entry.
617 BNA_UPDATE_PKT_CNT(pkt_rt
, ntohs(cmpl
->length
));
619 if (bna_is_small_rxq(cmpl
->rxq_id
))
624 unmap_q
= rcb
->unmap_q
;
626 /* start of packet ci */
627 sop_ci
= rcb
->consumer_index
;
629 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
)) {
630 unmap
= &unmap_q
->unmap
[sop_ci
];
633 skb
= napi_get_frags(&rx_ctrl
->napi
);
639 flags
= ntohl(cmpl
->flags
);
640 len
= ntohs(cmpl
->length
);
644 /* Check all the completions for this frame.
645 * busy-wait doesn't help much, break here.
647 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q
->type
) &&
648 (flags
& BNA_CQ_EF_EOP
) == 0) {
649 pi
= ccb
->producer_index
;
651 BNA_QE_INDX_INC(pi
, ccb
->q_depth
);
654 if (!next_cmpl
->valid
)
656 /* The 'valid' field is set by the adapter, only
657 * after writing the other fields of completion
658 * entry. Hence, do not load other fields of
659 * completion entry *before* the 'valid' is
660 * loaded. Adding the rmb() here prevents the
661 * compiler and/or CPU from reordering the reads
662 * which would potentially result in reading
663 * stale values in completion entry.
667 len
= ntohs(next_cmpl
->length
);
668 flags
= ntohl(next_cmpl
->flags
);
672 } while ((flags
& BNA_CQ_EF_EOP
) == 0);
674 if (!next_cmpl
->valid
)
678 /* TODO: BNA_CQ_EF_LOCAL ? */
679 if (unlikely(flags
& (BNA_CQ_EF_MAC_ERROR
|
680 BNA_CQ_EF_FCS_ERROR
|
681 BNA_CQ_EF_TOO_LONG
))) {
682 bnad_cq_drop_packet(bnad
, rcb
, sop_ci
, nvecs
);
683 rcb
->rxq
->rx_packets_with_error
++;
688 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
689 bnad_cq_setup_skb(bnad
, skb
, unmap
, len
);
691 bnad_cq_setup_skb_frags(rcb
, skb
, sop_ci
, nvecs
, len
);
694 rcb
->rxq
->rx_packets
++;
695 rcb
->rxq
->rx_bytes
+= totlen
;
696 ccb
->bytes_per_intr
+= totlen
;
698 masked_flags
= flags
& flags_cksum_prot_mask
;
701 ((bnad
->netdev
->features
& NETIF_F_RXCSUM
) &&
702 ((masked_flags
== flags_tcp4
) ||
703 (masked_flags
== flags_udp4
) ||
704 (masked_flags
== flags_tcp6
) ||
705 (masked_flags
== flags_udp6
))))
706 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
708 skb_checksum_none_assert(skb
);
710 if ((flags
& BNA_CQ_EF_VLAN
) &&
711 (bnad
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
))
712 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(cmpl
->vlan_tag
));
714 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q
->type
))
715 netif_receive_skb(skb
);
717 napi_gro_frags(&rx_ctrl
->napi
);
720 BNA_QE_INDX_ADD(rcb
->consumer_index
, nvecs
, rcb
->q_depth
);
721 for (vec
= 0; vec
< nvecs
; vec
++) {
722 cmpl
= &cq
[ccb
->producer_index
];
724 BNA_QE_INDX_INC(ccb
->producer_index
, ccb
->q_depth
);
726 cmpl
= &cq
[ccb
->producer_index
];
729 napi_gro_flush(&rx_ctrl
->napi
, false);
730 if (likely(test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)))
731 bna_ib_ack_disable_irq(ccb
->i_dbell
, packets
);
733 bnad_rxq_post(bnad
, ccb
->rcb
[0]);
735 bnad_rxq_post(bnad
, ccb
->rcb
[1]);
741 bnad_netif_rx_schedule_poll(struct bnad
*bnad
, struct bna_ccb
*ccb
)
743 struct bnad_rx_ctrl
*rx_ctrl
= (struct bnad_rx_ctrl
*)(ccb
->ctrl
);
744 struct napi_struct
*napi
= &rx_ctrl
->napi
;
746 if (likely(napi_schedule_prep(napi
))) {
747 __napi_schedule(napi
);
748 rx_ctrl
->rx_schedule
++;
752 /* MSIX Rx Path Handler */
754 bnad_msix_rx(int irq
, void *data
)
756 struct bna_ccb
*ccb
= (struct bna_ccb
*)data
;
759 ((struct bnad_rx_ctrl
*)(ccb
->ctrl
))->rx_intr_ctr
++;
760 bnad_netif_rx_schedule_poll(ccb
->bnad
, ccb
);
766 /* Interrupt handlers */
768 /* Mbox Interrupt Handlers */
770 bnad_msix_mbox_handler(int irq
, void *data
)
774 struct bnad
*bnad
= (struct bnad
*)data
;
776 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
777 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))) {
778 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
782 bna_intr_status_get(&bnad
->bna
, intr_status
);
784 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
785 bna_mbox_handler(&bnad
->bna
, intr_status
);
787 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
793 bnad_isr(int irq
, void *data
)
798 struct bnad
*bnad
= (struct bnad
*)data
;
799 struct bnad_rx_info
*rx_info
;
800 struct bnad_rx_ctrl
*rx_ctrl
;
801 struct bna_tcb
*tcb
= NULL
;
803 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
804 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))) {
805 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
809 bna_intr_status_get(&bnad
->bna
, intr_status
);
811 if (unlikely(!intr_status
)) {
812 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
816 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
817 bna_mbox_handler(&bnad
->bna
, intr_status
);
819 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
821 if (!BNA_IS_INTX_DATA_INTR(intr_status
))
824 /* Process data interrupts */
826 for (i
= 0; i
< bnad
->num_tx
; i
++) {
827 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
828 tcb
= bnad
->tx_info
[i
].tcb
[j
];
829 if (tcb
&& test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
830 bnad_tx_complete(bnad
, bnad
->tx_info
[i
].tcb
[j
]);
834 for (i
= 0; i
< bnad
->num_rx
; i
++) {
835 rx_info
= &bnad
->rx_info
[i
];
838 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
839 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
841 bnad_netif_rx_schedule_poll(bnad
,
849 * Called in interrupt / callback context
850 * with bna_lock held, so cfg_flags access is OK
853 bnad_enable_mbox_irq(struct bnad
*bnad
)
855 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
857 BNAD_UPDATE_CTR(bnad
, mbox_intr_enabled
);
861 * Called with bnad->bna_lock held b'cos of
862 * bnad->cfg_flags access.
865 bnad_disable_mbox_irq(struct bnad
*bnad
)
867 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
869 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
873 bnad_set_netdev_perm_addr(struct bnad
*bnad
)
875 struct net_device
*netdev
= bnad
->netdev
;
877 memcpy(netdev
->perm_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
878 if (is_zero_ether_addr(netdev
->dev_addr
))
879 memcpy(netdev
->dev_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
882 /* Control Path Handlers */
886 bnad_cb_mbox_intr_enable(struct bnad
*bnad
)
888 bnad_enable_mbox_irq(bnad
);
892 bnad_cb_mbox_intr_disable(struct bnad
*bnad
)
894 bnad_disable_mbox_irq(bnad
);
898 bnad_cb_ioceth_ready(struct bnad
*bnad
)
900 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
901 complete(&bnad
->bnad_completions
.ioc_comp
);
905 bnad_cb_ioceth_failed(struct bnad
*bnad
)
907 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_FAIL
;
908 complete(&bnad
->bnad_completions
.ioc_comp
);
912 bnad_cb_ioceth_disabled(struct bnad
*bnad
)
914 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
915 complete(&bnad
->bnad_completions
.ioc_comp
);
919 bnad_cb_enet_disabled(void *arg
)
921 struct bnad
*bnad
= (struct bnad
*)arg
;
923 netif_carrier_off(bnad
->netdev
);
924 complete(&bnad
->bnad_completions
.enet_comp
);
928 bnad_cb_ethport_link_status(struct bnad
*bnad
,
929 enum bna_link_status link_status
)
931 bool link_up
= false;
933 link_up
= (link_status
== BNA_LINK_UP
) || (link_status
== BNA_CEE_UP
);
935 if (link_status
== BNA_CEE_UP
) {
936 if (!test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
937 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
938 set_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
940 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
941 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
942 clear_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
946 if (!netif_carrier_ok(bnad
->netdev
)) {
948 printk(KERN_WARNING
"bna: %s link up\n",
950 netif_carrier_on(bnad
->netdev
);
951 BNAD_UPDATE_CTR(bnad
, link_toggle
);
952 for (tx_id
= 0; tx_id
< bnad
->num_tx
; tx_id
++) {
953 for (tcb_id
= 0; tcb_id
< bnad
->num_txq_per_tx
;
955 struct bna_tcb
*tcb
=
956 bnad
->tx_info
[tx_id
].tcb
[tcb_id
];
963 if (test_bit(BNAD_TXQ_TX_STARTED
,
967 * Transmit Schedule */
968 printk(KERN_INFO
"bna: %s %d "
975 BNAD_UPDATE_CTR(bnad
,
981 BNAD_UPDATE_CTR(bnad
,
988 if (netif_carrier_ok(bnad
->netdev
)) {
989 printk(KERN_WARNING
"bna: %s link down\n",
991 netif_carrier_off(bnad
->netdev
);
992 BNAD_UPDATE_CTR(bnad
, link_toggle
);
998 bnad_cb_tx_disabled(void *arg
, struct bna_tx
*tx
)
1000 struct bnad
*bnad
= (struct bnad
*)arg
;
1002 complete(&bnad
->bnad_completions
.tx_comp
);
1006 bnad_cb_tcb_setup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
1008 struct bnad_tx_info
*tx_info
=
1009 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
1012 tx_info
->tcb
[tcb
->id
] = tcb
;
1016 bnad_cb_tcb_destroy(struct bnad
*bnad
, struct bna_tcb
*tcb
)
1018 struct bnad_tx_info
*tx_info
=
1019 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
1021 tx_info
->tcb
[tcb
->id
] = NULL
;
1026 bnad_cb_ccb_setup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
1028 struct bnad_rx_info
*rx_info
=
1029 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
1031 rx_info
->rx_ctrl
[ccb
->id
].ccb
= ccb
;
1032 ccb
->ctrl
= &rx_info
->rx_ctrl
[ccb
->id
];
1036 bnad_cb_ccb_destroy(struct bnad
*bnad
, struct bna_ccb
*ccb
)
1038 struct bnad_rx_info
*rx_info
=
1039 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
1041 rx_info
->rx_ctrl
[ccb
->id
].ccb
= NULL
;
1045 bnad_cb_tx_stall(struct bnad
*bnad
, struct bna_tx
*tx
)
1047 struct bnad_tx_info
*tx_info
=
1048 (struct bnad_tx_info
*)tx
->priv
;
1049 struct bna_tcb
*tcb
;
1053 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1054 tcb
= tx_info
->tcb
[i
];
1058 clear_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
1059 netif_stop_subqueue(bnad
->netdev
, txq_id
);
1060 printk(KERN_INFO
"bna: %s %d TXQ_STOPPED\n",
1061 bnad
->netdev
->name
, txq_id
);
1066 bnad_cb_tx_resume(struct bnad
*bnad
, struct bna_tx
*tx
)
1068 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
1069 struct bna_tcb
*tcb
;
1073 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1074 tcb
= tx_info
->tcb
[i
];
1079 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
));
1080 set_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
1081 BUG_ON(*(tcb
->hw_consumer_index
) != 0);
1083 if (netif_carrier_ok(bnad
->netdev
)) {
1084 printk(KERN_INFO
"bna: %s %d TXQ_STARTED\n",
1085 bnad
->netdev
->name
, txq_id
);
1086 netif_wake_subqueue(bnad
->netdev
, txq_id
);
1087 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
1092 * Workaround for first ioceth enable failure & we
1093 * get a 0 MAC address. We try to get the MAC address
1096 if (is_zero_ether_addr(&bnad
->perm_addr
.mac
[0])) {
1097 bna_enet_perm_mac_get(&bnad
->bna
.enet
, &bnad
->perm_addr
);
1098 bnad_set_netdev_perm_addr(bnad
);
1103 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1106 bnad_tx_cleanup(struct delayed_work
*work
)
1108 struct bnad_tx_info
*tx_info
=
1109 container_of(work
, struct bnad_tx_info
, tx_cleanup_work
);
1110 struct bnad
*bnad
= NULL
;
1111 struct bna_tcb
*tcb
;
1112 unsigned long flags
;
1115 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1116 tcb
= tx_info
->tcb
[i
];
1122 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
1127 bnad_txq_cleanup(bnad
, tcb
);
1129 smp_mb__before_atomic();
1130 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
1134 queue_delayed_work(bnad
->work_q
, &tx_info
->tx_cleanup_work
,
1135 msecs_to_jiffies(1));
1139 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1140 bna_tx_cleanup_complete(tx_info
->tx
);
1141 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1145 bnad_cb_tx_cleanup(struct bnad
*bnad
, struct bna_tx
*tx
)
1147 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
1148 struct bna_tcb
*tcb
;
1151 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
1152 tcb
= tx_info
->tcb
[i
];
1157 queue_delayed_work(bnad
->work_q
, &tx_info
->tx_cleanup_work
, 0);
1161 bnad_cb_rx_stall(struct bnad
*bnad
, struct bna_rx
*rx
)
1163 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1164 struct bna_ccb
*ccb
;
1165 struct bnad_rx_ctrl
*rx_ctrl
;
1168 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1169 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1174 clear_bit(BNAD_RXQ_POST_OK
, &ccb
->rcb
[0]->flags
);
1177 clear_bit(BNAD_RXQ_POST_OK
, &ccb
->rcb
[1]->flags
);
1182 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1185 bnad_rx_cleanup(void *work
)
1187 struct bnad_rx_info
*rx_info
=
1188 container_of(work
, struct bnad_rx_info
, rx_cleanup_work
);
1189 struct bnad_rx_ctrl
*rx_ctrl
;
1190 struct bnad
*bnad
= NULL
;
1191 unsigned long flags
;
1194 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1195 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1200 bnad
= rx_ctrl
->ccb
->bnad
;
1203 * Wait till the poll handler has exited
1204 * and nothing can be scheduled anymore
1206 napi_disable(&rx_ctrl
->napi
);
1208 bnad_cq_cleanup(bnad
, rx_ctrl
->ccb
);
1209 bnad_rxq_cleanup(bnad
, rx_ctrl
->ccb
->rcb
[0]);
1210 if (rx_ctrl
->ccb
->rcb
[1])
1211 bnad_rxq_cleanup(bnad
, rx_ctrl
->ccb
->rcb
[1]);
1214 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1215 bna_rx_cleanup_complete(rx_info
->rx
);
1216 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1220 bnad_cb_rx_cleanup(struct bnad
*bnad
, struct bna_rx
*rx
)
1222 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1223 struct bna_ccb
*ccb
;
1224 struct bnad_rx_ctrl
*rx_ctrl
;
1227 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1228 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1233 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
);
1236 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[1]->flags
);
1239 queue_work(bnad
->work_q
, &rx_info
->rx_cleanup_work
);
1243 bnad_cb_rx_post(struct bnad
*bnad
, struct bna_rx
*rx
)
1245 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1246 struct bna_ccb
*ccb
;
1247 struct bna_rcb
*rcb
;
1248 struct bnad_rx_ctrl
*rx_ctrl
;
1251 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1252 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1257 napi_enable(&rx_ctrl
->napi
);
1259 for (j
= 0; j
< BNAD_MAX_RXQ_PER_RXP
; j
++) {
1264 bnad_rxq_alloc_init(bnad
, rcb
);
1265 set_bit(BNAD_RXQ_STARTED
, &rcb
->flags
);
1266 set_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
);
1267 bnad_rxq_post(bnad
, rcb
);
1273 bnad_cb_rx_disabled(void *arg
, struct bna_rx
*rx
)
1275 struct bnad
*bnad
= (struct bnad
*)arg
;
1277 complete(&bnad
->bnad_completions
.rx_comp
);
1281 bnad_cb_rx_mcast_add(struct bnad
*bnad
, struct bna_rx
*rx
)
1283 bnad
->bnad_completions
.mcast_comp_status
= BNA_CB_SUCCESS
;
1284 complete(&bnad
->bnad_completions
.mcast_comp
);
1288 bnad_cb_stats_get(struct bnad
*bnad
, enum bna_cb_status status
,
1289 struct bna_stats
*stats
)
1291 if (status
== BNA_CB_SUCCESS
)
1292 BNAD_UPDATE_CTR(bnad
, hw_stats_updates
);
1294 if (!netif_running(bnad
->netdev
) ||
1295 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1298 mod_timer(&bnad
->stats_timer
,
1299 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1303 bnad_cb_enet_mtu_set(struct bnad
*bnad
)
1305 bnad
->bnad_completions
.mtu_comp_status
= BNA_CB_SUCCESS
;
1306 complete(&bnad
->bnad_completions
.mtu_comp
);
1310 bnad_cb_completion(void *arg
, enum bfa_status status
)
1312 struct bnad_iocmd_comp
*iocmd_comp
=
1313 (struct bnad_iocmd_comp
*)arg
;
1315 iocmd_comp
->comp_status
= (u32
) status
;
1316 complete(&iocmd_comp
->comp
);
1319 /* Resource allocation, free functions */
1322 bnad_mem_free(struct bnad
*bnad
,
1323 struct bna_mem_info
*mem_info
)
1328 if (mem_info
->mdl
== NULL
)
1331 for (i
= 0; i
< mem_info
->num
; i
++) {
1332 if (mem_info
->mdl
[i
].kva
!= NULL
) {
1333 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1334 BNA_GET_DMA_ADDR(&(mem_info
->mdl
[i
].dma
),
1336 dma_free_coherent(&bnad
->pcidev
->dev
,
1337 mem_info
->mdl
[i
].len
,
1338 mem_info
->mdl
[i
].kva
, dma_pa
);
1340 kfree(mem_info
->mdl
[i
].kva
);
1343 kfree(mem_info
->mdl
);
1344 mem_info
->mdl
= NULL
;
1348 bnad_mem_alloc(struct bnad
*bnad
,
1349 struct bna_mem_info
*mem_info
)
1354 if ((mem_info
->num
== 0) || (mem_info
->len
== 0)) {
1355 mem_info
->mdl
= NULL
;
1359 mem_info
->mdl
= kcalloc(mem_info
->num
, sizeof(struct bna_mem_descr
),
1361 if (mem_info
->mdl
== NULL
)
1364 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1365 for (i
= 0; i
< mem_info
->num
; i
++) {
1366 mem_info
->mdl
[i
].len
= mem_info
->len
;
1367 mem_info
->mdl
[i
].kva
=
1368 dma_alloc_coherent(&bnad
->pcidev
->dev
,
1369 mem_info
->len
, &dma_pa
,
1371 if (mem_info
->mdl
[i
].kva
== NULL
)
1374 BNA_SET_DMA_ADDR(dma_pa
,
1375 &(mem_info
->mdl
[i
].dma
));
1378 for (i
= 0; i
< mem_info
->num
; i
++) {
1379 mem_info
->mdl
[i
].len
= mem_info
->len
;
1380 mem_info
->mdl
[i
].kva
= kzalloc(mem_info
->len
,
1382 if (mem_info
->mdl
[i
].kva
== NULL
)
1390 bnad_mem_free(bnad
, mem_info
);
1394 /* Free IRQ for Mailbox */
1396 bnad_mbox_irq_free(struct bnad
*bnad
)
1399 unsigned long flags
;
1401 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1402 bnad_disable_mbox_irq(bnad
);
1403 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1405 irq
= BNAD_GET_MBOX_IRQ(bnad
);
1406 free_irq(irq
, bnad
);
1410 * Allocates IRQ for Mailbox, but keep it disabled
1411 * This will be enabled once we get the mbox enable callback
1415 bnad_mbox_irq_alloc(struct bnad
*bnad
)
1418 unsigned long irq_flags
, flags
;
1420 irq_handler_t irq_handler
;
1422 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1423 if (bnad
->cfg_flags
& BNAD_CF_MSIX
) {
1424 irq_handler
= (irq_handler_t
)bnad_msix_mbox_handler
;
1425 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
1428 irq_handler
= (irq_handler_t
)bnad_isr
;
1429 irq
= bnad
->pcidev
->irq
;
1430 irq_flags
= IRQF_SHARED
;
1433 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1434 sprintf(bnad
->mbox_irq_name
, "%s", BNAD_NAME
);
1437 * Set the Mbox IRQ disable flag, so that the IRQ handler
1438 * called from request_irq() for SHARED IRQs do not execute
1440 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
1442 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
1444 err
= request_irq(irq
, irq_handler
, irq_flags
,
1445 bnad
->mbox_irq_name
, bnad
);
1451 bnad_txrx_irq_free(struct bnad
*bnad
, struct bna_intr_info
*intr_info
)
1453 kfree(intr_info
->idl
);
1454 intr_info
->idl
= NULL
;
1457 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1459 bnad_txrx_irq_alloc(struct bnad
*bnad
, enum bnad_intr_source src
,
1460 u32 txrx_id
, struct bna_intr_info
*intr_info
)
1462 int i
, vector_start
= 0;
1464 unsigned long flags
;
1466 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1467 cfg_flags
= bnad
->cfg_flags
;
1468 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1470 if (cfg_flags
& BNAD_CF_MSIX
) {
1471 intr_info
->intr_type
= BNA_INTR_T_MSIX
;
1472 intr_info
->idl
= kcalloc(intr_info
->num
,
1473 sizeof(struct bna_intr_descr
),
1475 if (!intr_info
->idl
)
1480 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+ txrx_id
;
1484 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+
1485 (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
1493 for (i
= 0; i
< intr_info
->num
; i
++)
1494 intr_info
->idl
[i
].vector
= vector_start
+ i
;
1496 intr_info
->intr_type
= BNA_INTR_T_INTX
;
1498 intr_info
->idl
= kcalloc(intr_info
->num
,
1499 sizeof(struct bna_intr_descr
),
1501 if (!intr_info
->idl
)
1506 intr_info
->idl
[0].vector
= BNAD_INTX_TX_IB_BITMASK
;
1510 intr_info
->idl
[0].vector
= BNAD_INTX_RX_IB_BITMASK
;
1517 /* NOTE: Should be called for MSIX only
1518 * Unregisters Tx MSIX vector(s) from the kernel
1521 bnad_tx_msix_unregister(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1527 for (i
= 0; i
< num_txqs
; i
++) {
1528 if (tx_info
->tcb
[i
] == NULL
)
1531 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1532 free_irq(bnad
->msix_table
[vector_num
].vector
, tx_info
->tcb
[i
]);
1536 /* NOTE: Should be called for MSIX only
1537 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1540 bnad_tx_msix_register(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1541 u32 tx_id
, int num_txqs
)
1547 for (i
= 0; i
< num_txqs
; i
++) {
1548 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1549 sprintf(tx_info
->tcb
[i
]->name
, "%s TXQ %d", bnad
->netdev
->name
,
1550 tx_id
+ tx_info
->tcb
[i
]->id
);
1551 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1552 (irq_handler_t
)bnad_msix_tx
, 0,
1553 tx_info
->tcb
[i
]->name
,
1563 bnad_tx_msix_unregister(bnad
, tx_info
, (i
- 1));
1567 /* NOTE: Should be called for MSIX only
1568 * Unregisters Rx MSIX vector(s) from the kernel
1571 bnad_rx_msix_unregister(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1577 for (i
= 0; i
< num_rxps
; i
++) {
1578 if (rx_info
->rx_ctrl
[i
].ccb
== NULL
)
1581 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1582 free_irq(bnad
->msix_table
[vector_num
].vector
,
1583 rx_info
->rx_ctrl
[i
].ccb
);
1587 /* NOTE: Should be called for MSIX only
1588 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1591 bnad_rx_msix_register(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1592 u32 rx_id
, int num_rxps
)
1598 for (i
= 0; i
< num_rxps
; i
++) {
1599 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1600 sprintf(rx_info
->rx_ctrl
[i
].ccb
->name
, "%s CQ %d",
1602 rx_id
+ rx_info
->rx_ctrl
[i
].ccb
->id
);
1603 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1604 (irq_handler_t
)bnad_msix_rx
, 0,
1605 rx_info
->rx_ctrl
[i
].ccb
->name
,
1606 rx_info
->rx_ctrl
[i
].ccb
);
1615 bnad_rx_msix_unregister(bnad
, rx_info
, (i
- 1));
1619 /* Free Tx object Resources */
1621 bnad_tx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1625 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1626 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1627 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1628 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1629 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1633 /* Allocates memory and interrupt resources for Tx object */
1635 bnad_tx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1640 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1641 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1642 err
= bnad_mem_alloc(bnad
,
1643 &res_info
[i
].res_u
.mem_info
);
1644 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1645 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_TX
, tx_id
,
1646 &res_info
[i
].res_u
.intr_info
);
1653 bnad_tx_res_free(bnad
, res_info
);
1657 /* Free Rx object Resources */
1659 bnad_rx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1663 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1664 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1665 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1666 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1667 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1671 /* Allocates memory and interrupt resources for Rx object */
1673 bnad_rx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1678 /* All memory needs to be allocated before setup_ccbs */
1679 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1680 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1681 err
= bnad_mem_alloc(bnad
,
1682 &res_info
[i
].res_u
.mem_info
);
1683 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1684 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_RX
, rx_id
,
1685 &res_info
[i
].res_u
.intr_info
);
1692 bnad_rx_res_free(bnad
, res_info
);
1696 /* Timer callbacks */
1699 bnad_ioc_timeout(unsigned long data
)
1701 struct bnad
*bnad
= (struct bnad
*)data
;
1702 unsigned long flags
;
1704 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1705 bfa_nw_ioc_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1706 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1710 bnad_ioc_hb_check(unsigned long data
)
1712 struct bnad
*bnad
= (struct bnad
*)data
;
1713 unsigned long flags
;
1715 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1716 bfa_nw_ioc_hb_check((void *) &bnad
->bna
.ioceth
.ioc
);
1717 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1721 bnad_iocpf_timeout(unsigned long data
)
1723 struct bnad
*bnad
= (struct bnad
*)data
;
1724 unsigned long flags
;
1726 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1727 bfa_nw_iocpf_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1728 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1732 bnad_iocpf_sem_timeout(unsigned long data
)
1734 struct bnad
*bnad
= (struct bnad
*)data
;
1735 unsigned long flags
;
1737 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1738 bfa_nw_iocpf_sem_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1739 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1743 * All timer routines use bnad->bna_lock to protect against
1744 * the following race, which may occur in case of no locking:
1752 /* b) Dynamic Interrupt Moderation Timer */
1754 bnad_dim_timeout(unsigned long data
)
1756 struct bnad
*bnad
= (struct bnad
*)data
;
1757 struct bnad_rx_info
*rx_info
;
1758 struct bnad_rx_ctrl
*rx_ctrl
;
1760 unsigned long flags
;
1762 if (!netif_carrier_ok(bnad
->netdev
))
1765 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1766 for (i
= 0; i
< bnad
->num_rx
; i
++) {
1767 rx_info
= &bnad
->rx_info
[i
];
1770 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
1771 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
1774 bna_rx_dim_update(rx_ctrl
->ccb
);
1778 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1779 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
))
1780 mod_timer(&bnad
->dim_timer
,
1781 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1782 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1785 /* c) Statistics Timer */
1787 bnad_stats_timeout(unsigned long data
)
1789 struct bnad
*bnad
= (struct bnad
*)data
;
1790 unsigned long flags
;
1792 if (!netif_running(bnad
->netdev
) ||
1793 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1796 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1797 bna_hw_stats_get(&bnad
->bna
);
1798 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1802 * Set up timer for DIM
1803 * Called with bnad->bna_lock held
1806 bnad_dim_timer_start(struct bnad
*bnad
)
1808 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
1809 !test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
1810 setup_timer(&bnad
->dim_timer
, bnad_dim_timeout
,
1811 (unsigned long)bnad
);
1812 set_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1813 mod_timer(&bnad
->dim_timer
,
1814 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1819 * Set up timer for statistics
1820 * Called with mutex_lock(&bnad->conf_mutex) held
1823 bnad_stats_timer_start(struct bnad
*bnad
)
1825 unsigned long flags
;
1827 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1828 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
)) {
1829 setup_timer(&bnad
->stats_timer
, bnad_stats_timeout
,
1830 (unsigned long)bnad
);
1831 mod_timer(&bnad
->stats_timer
,
1832 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1834 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1838 * Stops the stats timer
1839 * Called with mutex_lock(&bnad->conf_mutex) held
1842 bnad_stats_timer_stop(struct bnad
*bnad
)
1845 unsigned long flags
;
1847 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1848 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1850 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1852 del_timer_sync(&bnad
->stats_timer
);
1858 bnad_netdev_mc_list_get(struct net_device
*netdev
, u8
*mc_list
)
1860 int i
= 1; /* Index 0 has broadcast address */
1861 struct netdev_hw_addr
*mc_addr
;
1863 netdev_for_each_mc_addr(mc_addr
, netdev
) {
1864 memcpy(&mc_list
[i
* ETH_ALEN
], &mc_addr
->addr
[0],
1871 bnad_napi_poll_rx(struct napi_struct
*napi
, int budget
)
1873 struct bnad_rx_ctrl
*rx_ctrl
=
1874 container_of(napi
, struct bnad_rx_ctrl
, napi
);
1875 struct bnad
*bnad
= rx_ctrl
->bnad
;
1878 rx_ctrl
->rx_poll_ctr
++;
1880 if (!netif_carrier_ok(bnad
->netdev
))
1883 rcvd
= bnad_cq_process(bnad
, rx_ctrl
->ccb
, budget
);
1888 napi_complete(napi
);
1890 rx_ctrl
->rx_complete
++;
1893 bnad_enable_rx_irq_unsafe(rx_ctrl
->ccb
);
1898 #define BNAD_NAPI_POLL_QUOTA 64
1900 bnad_napi_add(struct bnad
*bnad
, u32 rx_id
)
1902 struct bnad_rx_ctrl
*rx_ctrl
;
1905 /* Initialize & enable NAPI */
1906 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1907 rx_ctrl
= &bnad
->rx_info
[rx_id
].rx_ctrl
[i
];
1908 netif_napi_add(bnad
->netdev
, &rx_ctrl
->napi
,
1909 bnad_napi_poll_rx
, BNAD_NAPI_POLL_QUOTA
);
1914 bnad_napi_delete(struct bnad
*bnad
, u32 rx_id
)
1918 /* First disable and then clean up */
1919 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++)
1920 netif_napi_del(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1923 /* Should be held with conf_lock held */
1925 bnad_destroy_tx(struct bnad
*bnad
, u32 tx_id
)
1927 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1928 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1929 unsigned long flags
;
1934 init_completion(&bnad
->bnad_completions
.tx_comp
);
1935 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1936 bna_tx_disable(tx_info
->tx
, BNA_HARD_CLEANUP
, bnad_cb_tx_disabled
);
1937 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1938 wait_for_completion(&bnad
->bnad_completions
.tx_comp
);
1940 if (tx_info
->tcb
[0]->intr_type
== BNA_INTR_T_MSIX
)
1941 bnad_tx_msix_unregister(bnad
, tx_info
,
1942 bnad
->num_txq_per_tx
);
1944 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1945 bna_tx_destroy(tx_info
->tx
);
1946 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1951 bnad_tx_res_free(bnad
, res_info
);
1954 /* Should be held with conf_lock held */
1956 bnad_setup_tx(struct bnad
*bnad
, u32 tx_id
)
1959 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1960 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1961 struct bna_intr_info
*intr_info
=
1962 &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
1963 struct bna_tx_config
*tx_config
= &bnad
->tx_config
[tx_id
];
1964 static const struct bna_tx_event_cbfn tx_cbfn
= {
1965 .tcb_setup_cbfn
= bnad_cb_tcb_setup
,
1966 .tcb_destroy_cbfn
= bnad_cb_tcb_destroy
,
1967 .tx_stall_cbfn
= bnad_cb_tx_stall
,
1968 .tx_resume_cbfn
= bnad_cb_tx_resume
,
1969 .tx_cleanup_cbfn
= bnad_cb_tx_cleanup
,
1973 unsigned long flags
;
1975 tx_info
->tx_id
= tx_id
;
1977 /* Initialize the Tx object configuration */
1978 tx_config
->num_txq
= bnad
->num_txq_per_tx
;
1979 tx_config
->txq_depth
= bnad
->txq_depth
;
1980 tx_config
->tx_type
= BNA_TX_T_REGULAR
;
1981 tx_config
->coalescing_timeo
= bnad
->tx_coalescing_timeo
;
1983 /* Get BNA's resource requirement for one tx object */
1984 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1985 bna_tx_res_req(bnad
->num_txq_per_tx
,
1986 bnad
->txq_depth
, res_info
);
1987 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1989 /* Fill Unmap Q memory requirements */
1990 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info
[BNA_TX_RES_MEM_T_UNMAPQ
],
1991 bnad
->num_txq_per_tx
, (sizeof(struct bnad_tx_unmap
) *
1994 /* Allocate resources */
1995 err
= bnad_tx_res_alloc(bnad
, res_info
, tx_id
);
1999 /* Ask BNA to create one Tx object, supplying required resources */
2000 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2001 tx
= bna_tx_create(&bnad
->bna
, bnad
, tx_config
, &tx_cbfn
, res_info
,
2003 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2010 INIT_DELAYED_WORK(&tx_info
->tx_cleanup_work
,
2011 (work_func_t
)bnad_tx_cleanup
);
2013 /* Register ISR for the Tx object */
2014 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
2015 err
= bnad_tx_msix_register(bnad
, tx_info
,
2016 tx_id
, bnad
->num_txq_per_tx
);
2021 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2023 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2028 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2029 bna_tx_destroy(tx_info
->tx
);
2030 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2034 bnad_tx_res_free(bnad
, res_info
);
2038 /* Setup the rx config for bna_rx_create */
2039 /* bnad decides the configuration */
2041 bnad_init_rx_config(struct bnad
*bnad
, struct bna_rx_config
*rx_config
)
2043 memset(rx_config
, 0, sizeof(*rx_config
));
2044 rx_config
->rx_type
= BNA_RX_T_REGULAR
;
2045 rx_config
->num_paths
= bnad
->num_rxp_per_rx
;
2046 rx_config
->coalescing_timeo
= bnad
->rx_coalescing_timeo
;
2048 if (bnad
->num_rxp_per_rx
> 1) {
2049 rx_config
->rss_status
= BNA_STATUS_T_ENABLED
;
2050 rx_config
->rss_config
.hash_type
=
2051 (BFI_ENET_RSS_IPV6
|
2052 BFI_ENET_RSS_IPV6_TCP
|
2054 BFI_ENET_RSS_IPV4_TCP
);
2055 rx_config
->rss_config
.hash_mask
=
2056 bnad
->num_rxp_per_rx
- 1;
2057 get_random_bytes(rx_config
->rss_config
.toeplitz_hash_key
,
2058 sizeof(rx_config
->rss_config
.toeplitz_hash_key
));
2060 rx_config
->rss_status
= BNA_STATUS_T_DISABLED
;
2061 memset(&rx_config
->rss_config
, 0,
2062 sizeof(rx_config
->rss_config
));
2065 rx_config
->frame_size
= BNAD_FRAME_SIZE(bnad
->netdev
->mtu
);
2066 rx_config
->q0_multi_buf
= BNA_STATUS_T_DISABLED
;
2068 /* BNA_RXP_SINGLE - one data-buffer queue
2069 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2070 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2072 /* TODO: configurable param for queue type */
2073 rx_config
->rxp_type
= BNA_RXP_SLR
;
2075 if (BNAD_PCI_DEV_IS_CAT2(bnad
) &&
2076 rx_config
->frame_size
> 4096) {
2077 /* though size_routing_enable is set in SLR,
2078 * small packets may get routed to same rxq.
2079 * set buf_size to 2048 instead of PAGE_SIZE.
2081 rx_config
->q0_buf_size
= 2048;
2082 /* this should be in multiples of 2 */
2083 rx_config
->q0_num_vecs
= 4;
2084 rx_config
->q0_depth
= bnad
->rxq_depth
* rx_config
->q0_num_vecs
;
2085 rx_config
->q0_multi_buf
= BNA_STATUS_T_ENABLED
;
2087 rx_config
->q0_buf_size
= rx_config
->frame_size
;
2088 rx_config
->q0_num_vecs
= 1;
2089 rx_config
->q0_depth
= bnad
->rxq_depth
;
2092 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2093 if (rx_config
->rxp_type
== BNA_RXP_SLR
) {
2094 rx_config
->q1_depth
= bnad
->rxq_depth
;
2095 rx_config
->q1_buf_size
= BFI_SMALL_RXBUF_SIZE
;
2098 rx_config
->vlan_strip_status
=
2099 (bnad
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) ?
2100 BNA_STATUS_T_ENABLED
: BNA_STATUS_T_DISABLED
;
2104 bnad_rx_ctrl_init(struct bnad
*bnad
, u32 rx_id
)
2106 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
2109 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++)
2110 rx_info
->rx_ctrl
[i
].bnad
= bnad
;
2113 /* Called with mutex_lock(&bnad->conf_mutex) held */
2115 bnad_reinit_rx(struct bnad
*bnad
)
2117 struct net_device
*netdev
= bnad
->netdev
;
2118 u32 err
= 0, current_err
= 0;
2119 u32 rx_id
= 0, count
= 0;
2120 unsigned long flags
;
2122 /* destroy and create new rx objects */
2123 for (rx_id
= 0; rx_id
< bnad
->num_rx
; rx_id
++) {
2124 if (!bnad
->rx_info
[rx_id
].rx
)
2126 bnad_destroy_rx(bnad
, rx_id
);
2129 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2130 bna_enet_mtu_set(&bnad
->bna
.enet
,
2131 BNAD_FRAME_SIZE(bnad
->netdev
->mtu
), NULL
);
2132 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2134 for (rx_id
= 0; rx_id
< bnad
->num_rx
; rx_id
++) {
2136 current_err
= bnad_setup_rx(bnad
, rx_id
);
2137 if (current_err
&& !err
) {
2139 pr_err("RXQ:%u setup failed\n", rx_id
);
2143 /* restore rx configuration */
2144 if (bnad
->rx_info
[0].rx
&& !err
) {
2145 bnad_restore_vlans(bnad
, 0);
2146 bnad_enable_default_bcast(bnad
);
2147 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2148 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
2149 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2150 bnad_set_rx_mode(netdev
);
2156 /* Called with bnad_conf_lock() held */
2158 bnad_destroy_rx(struct bnad
*bnad
, u32 rx_id
)
2160 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
2161 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
2162 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
2163 unsigned long flags
;
2170 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2171 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
2172 test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
2173 clear_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
2176 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2178 del_timer_sync(&bnad
->dim_timer
);
2181 init_completion(&bnad
->bnad_completions
.rx_comp
);
2182 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2183 bna_rx_disable(rx_info
->rx
, BNA_HARD_CLEANUP
, bnad_cb_rx_disabled
);
2184 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2185 wait_for_completion(&bnad
->bnad_completions
.rx_comp
);
2187 if (rx_info
->rx_ctrl
[0].ccb
->intr_type
== BNA_INTR_T_MSIX
)
2188 bnad_rx_msix_unregister(bnad
, rx_info
, rx_config
->num_paths
);
2190 bnad_napi_delete(bnad
, rx_id
);
2192 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2193 bna_rx_destroy(rx_info
->rx
);
2197 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2199 bnad_rx_res_free(bnad
, res_info
);
2202 /* Called with mutex_lock(&bnad->conf_mutex) held */
2204 bnad_setup_rx(struct bnad
*bnad
, u32 rx_id
)
2207 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
2208 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
2209 struct bna_intr_info
*intr_info
=
2210 &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
2211 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
2212 static const struct bna_rx_event_cbfn rx_cbfn
= {
2213 .rcb_setup_cbfn
= NULL
,
2214 .rcb_destroy_cbfn
= NULL
,
2215 .ccb_setup_cbfn
= bnad_cb_ccb_setup
,
2216 .ccb_destroy_cbfn
= bnad_cb_ccb_destroy
,
2217 .rx_stall_cbfn
= bnad_cb_rx_stall
,
2218 .rx_cleanup_cbfn
= bnad_cb_rx_cleanup
,
2219 .rx_post_cbfn
= bnad_cb_rx_post
,
2222 unsigned long flags
;
2224 rx_info
->rx_id
= rx_id
;
2226 /* Initialize the Rx object configuration */
2227 bnad_init_rx_config(bnad
, rx_config
);
2229 /* Get BNA's resource requirement for one Rx object */
2230 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2231 bna_rx_res_req(rx_config
, res_info
);
2232 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2234 /* Fill Unmap Q memory requirements */
2235 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info
[BNA_RX_RES_MEM_T_UNMAPDQ
],
2236 rx_config
->num_paths
,
2237 (rx_config
->q0_depth
*
2238 sizeof(struct bnad_rx_unmap
)) +
2239 sizeof(struct bnad_rx_unmap_q
));
2241 if (rx_config
->rxp_type
!= BNA_RXP_SINGLE
) {
2242 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info
[BNA_RX_RES_MEM_T_UNMAPHQ
],
2243 rx_config
->num_paths
,
2244 (rx_config
->q1_depth
*
2245 sizeof(struct bnad_rx_unmap
) +
2246 sizeof(struct bnad_rx_unmap_q
)));
2248 /* Allocate resource */
2249 err
= bnad_rx_res_alloc(bnad
, res_info
, rx_id
);
2253 bnad_rx_ctrl_init(bnad
, rx_id
);
2255 /* Ask BNA to create one Rx object, supplying required resources */
2256 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2257 rx
= bna_rx_create(&bnad
->bna
, bnad
, rx_config
, &rx_cbfn
, res_info
,
2261 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2265 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2267 INIT_WORK(&rx_info
->rx_cleanup_work
,
2268 (work_func_t
)(bnad_rx_cleanup
));
2271 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2272 * so that IRQ handler cannot schedule NAPI at this point.
2274 bnad_napi_add(bnad
, rx_id
);
2276 /* Register ISR for the Rx object */
2277 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
2278 err
= bnad_rx_msix_register(bnad
, rx_info
, rx_id
,
2279 rx_config
->num_paths
);
2284 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2286 /* Set up Dynamic Interrupt Moderation Vector */
2287 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
)
2288 bna_rx_dim_reconfig(&bnad
->bna
, bna_napi_dim_vector
);
2290 /* Enable VLAN filtering only on the default Rx */
2291 bna_rx_vlanfilter_enable(rx
);
2293 /* Start the DIM timer */
2294 bnad_dim_timer_start(bnad
);
2298 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2303 bnad_destroy_rx(bnad
, rx_id
);
2307 /* Called with conf_lock & bnad->bna_lock held */
2309 bnad_tx_coalescing_timeo_set(struct bnad
*bnad
)
2311 struct bnad_tx_info
*tx_info
;
2313 tx_info
= &bnad
->tx_info
[0];
2317 bna_tx_coalescing_timeo_set(tx_info
->tx
, bnad
->tx_coalescing_timeo
);
2320 /* Called with conf_lock & bnad->bna_lock held */
2322 bnad_rx_coalescing_timeo_set(struct bnad
*bnad
)
2324 struct bnad_rx_info
*rx_info
;
2327 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2328 rx_info
= &bnad
->rx_info
[i
];
2331 bna_rx_coalescing_timeo_set(rx_info
->rx
,
2332 bnad
->rx_coalescing_timeo
);
2337 * Called with bnad->bna_lock held
2340 bnad_mac_addr_set_locked(struct bnad
*bnad
, u8
*mac_addr
)
2344 if (!is_valid_ether_addr(mac_addr
))
2345 return -EADDRNOTAVAIL
;
2347 /* If datapath is down, pretend everything went through */
2348 if (!bnad
->rx_info
[0].rx
)
2351 ret
= bna_rx_ucast_set(bnad
->rx_info
[0].rx
, mac_addr
, NULL
);
2352 if (ret
!= BNA_CB_SUCCESS
)
2353 return -EADDRNOTAVAIL
;
2358 /* Should be called with conf_lock held */
2360 bnad_enable_default_bcast(struct bnad
*bnad
)
2362 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[0];
2364 unsigned long flags
;
2366 init_completion(&bnad
->bnad_completions
.mcast_comp
);
2368 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2369 ret
= bna_rx_mcast_add(rx_info
->rx
, (u8
*)bnad_bcast_addr
,
2370 bnad_cb_rx_mcast_add
);
2371 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2373 if (ret
== BNA_CB_SUCCESS
)
2374 wait_for_completion(&bnad
->bnad_completions
.mcast_comp
);
2378 if (bnad
->bnad_completions
.mcast_comp_status
!= BNA_CB_SUCCESS
)
2384 /* Called with mutex_lock(&bnad->conf_mutex) held */
2386 bnad_restore_vlans(struct bnad
*bnad
, u32 rx_id
)
2389 unsigned long flags
;
2391 for_each_set_bit(vid
, bnad
->active_vlans
, VLAN_N_VID
) {
2392 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2393 bna_rx_vlan_add(bnad
->rx_info
[rx_id
].rx
, vid
);
2394 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2398 /* Statistics utilities */
2400 bnad_netdev_qstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2404 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2405 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
2406 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
) {
2407 stats
->rx_packets
+= bnad
->rx_info
[i
].
2408 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_packets
;
2409 stats
->rx_bytes
+= bnad
->rx_info
[i
].
2410 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_bytes
;
2411 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
2412 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
2414 stats
->rx_packets
+=
2415 bnad
->rx_info
[i
].rx_ctrl
[j
].
2416 ccb
->rcb
[1]->rxq
->rx_packets
;
2418 bnad
->rx_info
[i
].rx_ctrl
[j
].
2419 ccb
->rcb
[1]->rxq
->rx_bytes
;
2424 for (i
= 0; i
< bnad
->num_tx
; i
++) {
2425 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
2426 if (bnad
->tx_info
[i
].tcb
[j
]) {
2427 stats
->tx_packets
+=
2428 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_packets
;
2430 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_bytes
;
2437 * Must be called with the bna_lock held.
2440 bnad_netdev_hwstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2442 struct bfi_enet_stats_mac
*mac_stats
;
2446 mac_stats
= &bnad
->stats
.bna_stats
->hw_stats
.mac_stats
;
2448 mac_stats
->rx_fcs_error
+ mac_stats
->rx_alignment_error
+
2449 mac_stats
->rx_frame_length_error
+ mac_stats
->rx_code_error
+
2450 mac_stats
->rx_undersize
;
2451 stats
->tx_errors
= mac_stats
->tx_fcs_error
+
2452 mac_stats
->tx_undersize
;
2453 stats
->rx_dropped
= mac_stats
->rx_drop
;
2454 stats
->tx_dropped
= mac_stats
->tx_drop
;
2455 stats
->multicast
= mac_stats
->rx_multicast
;
2456 stats
->collisions
= mac_stats
->tx_total_collision
;
2458 stats
->rx_length_errors
= mac_stats
->rx_frame_length_error
;
2460 /* receive ring buffer overflow ?? */
2462 stats
->rx_crc_errors
= mac_stats
->rx_fcs_error
;
2463 stats
->rx_frame_errors
= mac_stats
->rx_alignment_error
;
2464 /* recv'r fifo overrun */
2465 bmap
= bna_rx_rid_mask(&bnad
->bna
);
2466 for (i
= 0; bmap
; i
++) {
2468 stats
->rx_fifo_errors
+=
2469 bnad
->stats
.bna_stats
->
2470 hw_stats
.rxf_stats
[i
].frame_drops
;
2478 bnad_mbox_irq_sync(struct bnad
*bnad
)
2481 unsigned long flags
;
2483 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2484 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2485 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
2487 irq
= bnad
->pcidev
->irq
;
2488 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2490 synchronize_irq(irq
);
2493 /* Utility used by bnad_start_xmit, for doing TSO */
2495 bnad_tso_prepare(struct bnad
*bnad
, struct sk_buff
*skb
)
2499 err
= skb_cow_head(skb
, 0);
2501 BNAD_UPDATE_CTR(bnad
, tso_err
);
2506 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2507 * excluding the length field.
2509 if (skb
->protocol
== htons(ETH_P_IP
)) {
2510 struct iphdr
*iph
= ip_hdr(skb
);
2512 /* Do we really need these? */
2516 tcp_hdr(skb
)->check
=
2517 ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
2519 BNAD_UPDATE_CTR(bnad
, tso4
);
2521 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
2523 ipv6h
->payload_len
= 0;
2524 tcp_hdr(skb
)->check
=
2525 ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, 0,
2527 BNAD_UPDATE_CTR(bnad
, tso6
);
2534 * Initialize Q numbers depending on Rx Paths
2535 * Called with bnad->bna_lock held, because of cfg_flags
2539 bnad_q_num_init(struct bnad
*bnad
)
2543 rxps
= min((uint
)num_online_cpus(),
2544 (uint
)(BNAD_MAX_RX
* BNAD_MAX_RXP_PER_RX
));
2546 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
))
2547 rxps
= 1; /* INTx */
2551 bnad
->num_rxp_per_rx
= rxps
;
2552 bnad
->num_txq_per_tx
= BNAD_TXQ_NUM
;
2556 * Adjusts the Q numbers, given a number of msix vectors
2557 * Give preference to RSS as opposed to Tx priority Queues,
2558 * in such a case, just use 1 Tx Q
2559 * Called with bnad->bna_lock held b'cos of cfg_flags access
2562 bnad_q_num_adjust(struct bnad
*bnad
, int msix_vectors
, int temp
)
2564 bnad
->num_txq_per_tx
= 1;
2565 if ((msix_vectors
>= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
2566 bnad_rxqs_per_cq
+ BNAD_MAILBOX_MSIX_VECTORS
) &&
2567 (bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2568 bnad
->num_rxp_per_rx
= msix_vectors
-
2569 (bnad
->num_tx
* bnad
->num_txq_per_tx
) -
2570 BNAD_MAILBOX_MSIX_VECTORS
;
2572 bnad
->num_rxp_per_rx
= 1;
2575 /* Enable / disable ioceth */
2577 bnad_ioceth_disable(struct bnad
*bnad
)
2579 unsigned long flags
;
2582 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2583 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2584 bna_ioceth_disable(&bnad
->bna
.ioceth
, BNA_HARD_CLEANUP
);
2585 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2587 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2588 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2590 err
= bnad
->bnad_completions
.ioc_comp_status
;
2595 bnad_ioceth_enable(struct bnad
*bnad
)
2598 unsigned long flags
;
2600 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2601 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2602 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_WAITING
;
2603 bna_ioceth_enable(&bnad
->bna
.ioceth
);
2604 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2606 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2607 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2609 err
= bnad
->bnad_completions
.ioc_comp_status
;
2614 /* Free BNA resources */
2616 bnad_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2621 for (i
= 0; i
< res_val_max
; i
++)
2622 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
2625 /* Allocates memory and interrupt resources for BNA */
2627 bnad_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2632 for (i
= 0; i
< res_val_max
; i
++) {
2633 err
= bnad_mem_alloc(bnad
, &res_info
[i
].res_u
.mem_info
);
2640 bnad_res_free(bnad
, res_info
, res_val_max
);
2644 /* Interrupt enable / disable */
2646 bnad_enable_msix(struct bnad
*bnad
)
2649 unsigned long flags
;
2651 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2652 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2653 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2656 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2658 if (bnad
->msix_table
)
2662 kcalloc(bnad
->msix_num
, sizeof(struct msix_entry
), GFP_KERNEL
);
2664 if (!bnad
->msix_table
)
2667 for (i
= 0; i
< bnad
->msix_num
; i
++)
2668 bnad
->msix_table
[i
].entry
= i
;
2670 ret
= pci_enable_msix_range(bnad
->pcidev
, bnad
->msix_table
,
2674 } else if (ret
< bnad
->msix_num
) {
2675 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2676 ret
, bnad
->msix_num
);
2678 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2679 /* ret = #of vectors that we got */
2680 bnad_q_num_adjust(bnad
, (ret
- BNAD_MAILBOX_MSIX_VECTORS
) / 2,
2681 (ret
- BNAD_MAILBOX_MSIX_VECTORS
) / 2);
2682 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2684 bnad
->msix_num
= BNAD_NUM_TXQ
+ BNAD_NUM_RXP
+
2685 BNAD_MAILBOX_MSIX_VECTORS
;
2687 if (bnad
->msix_num
> ret
) {
2688 pci_disable_msix(bnad
->pcidev
);
2693 pci_intx(bnad
->pcidev
, 0);
2698 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2700 kfree(bnad
->msix_table
);
2701 bnad
->msix_table
= NULL
;
2703 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2704 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2705 bnad_q_num_init(bnad
);
2706 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2710 bnad_disable_msix(struct bnad
*bnad
)
2713 unsigned long flags
;
2715 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2716 cfg_flags
= bnad
->cfg_flags
;
2717 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2718 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2719 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2721 if (cfg_flags
& BNAD_CF_MSIX
) {
2722 pci_disable_msix(bnad
->pcidev
);
2723 kfree(bnad
->msix_table
);
2724 bnad
->msix_table
= NULL
;
2728 /* Netdev entry points */
2730 bnad_open(struct net_device
*netdev
)
2733 struct bnad
*bnad
= netdev_priv(netdev
);
2734 struct bna_pause_config pause_config
;
2735 unsigned long flags
;
2737 mutex_lock(&bnad
->conf_mutex
);
2740 err
= bnad_setup_tx(bnad
, 0);
2745 err
= bnad_setup_rx(bnad
, 0);
2750 pause_config
.tx_pause
= 0;
2751 pause_config
.rx_pause
= 0;
2753 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2754 bna_enet_mtu_set(&bnad
->bna
.enet
,
2755 BNAD_FRAME_SIZE(bnad
->netdev
->mtu
), NULL
);
2756 bna_enet_pause_config(&bnad
->bna
.enet
, &pause_config
, NULL
);
2757 bna_enet_enable(&bnad
->bna
.enet
);
2758 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2760 /* Enable broadcast */
2761 bnad_enable_default_bcast(bnad
);
2763 /* Restore VLANs, if any */
2764 bnad_restore_vlans(bnad
, 0);
2766 /* Set the UCAST address */
2767 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2768 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
2769 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2771 /* Start the stats timer */
2772 bnad_stats_timer_start(bnad
);
2774 mutex_unlock(&bnad
->conf_mutex
);
2779 bnad_destroy_tx(bnad
, 0);
2782 mutex_unlock(&bnad
->conf_mutex
);
2787 bnad_stop(struct net_device
*netdev
)
2789 struct bnad
*bnad
= netdev_priv(netdev
);
2790 unsigned long flags
;
2792 mutex_lock(&bnad
->conf_mutex
);
2794 /* Stop the stats timer */
2795 bnad_stats_timer_stop(bnad
);
2797 init_completion(&bnad
->bnad_completions
.enet_comp
);
2799 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2800 bna_enet_disable(&bnad
->bna
.enet
, BNA_HARD_CLEANUP
,
2801 bnad_cb_enet_disabled
);
2802 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2804 wait_for_completion(&bnad
->bnad_completions
.enet_comp
);
2806 bnad_destroy_tx(bnad
, 0);
2807 bnad_destroy_rx(bnad
, 0);
2809 /* Synchronize mailbox IRQ */
2810 bnad_mbox_irq_sync(bnad
);
2812 mutex_unlock(&bnad
->conf_mutex
);
2818 /* Returns 0 for success */
2820 bnad_txq_wi_prepare(struct bnad
*bnad
, struct bna_tcb
*tcb
,
2821 struct sk_buff
*skb
, struct bna_txq_entry
*txqent
)
2827 if (vlan_tx_tag_present(skb
)) {
2828 vlan_tag
= (u16
)vlan_tx_tag_get(skb
);
2829 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2831 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
)) {
2832 vlan_tag
= ((tcb
->priority
& 0x7) << VLAN_PRIO_SHIFT
)
2833 | (vlan_tag
& 0x1fff);
2834 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2836 txqent
->hdr
.wi
.vlan_tag
= htons(vlan_tag
);
2838 if (skb_is_gso(skb
)) {
2839 gso_size
= skb_shinfo(skb
)->gso_size
;
2840 if (unlikely(gso_size
> bnad
->netdev
->mtu
)) {
2841 BNAD_UPDATE_CTR(bnad
, tx_skb_mss_too_long
);
2844 if (unlikely((gso_size
+ skb_transport_offset(skb
) +
2845 tcp_hdrlen(skb
)) >= skb
->len
)) {
2846 txqent
->hdr
.wi
.opcode
= htons(BNA_TXQ_WI_SEND
);
2847 txqent
->hdr
.wi
.lso_mss
= 0;
2848 BNAD_UPDATE_CTR(bnad
, tx_skb_tso_too_short
);
2850 txqent
->hdr
.wi
.opcode
= htons(BNA_TXQ_WI_SEND_LSO
);
2851 txqent
->hdr
.wi
.lso_mss
= htons(gso_size
);
2854 if (bnad_tso_prepare(bnad
, skb
)) {
2855 BNAD_UPDATE_CTR(bnad
, tx_skb_tso_prepare
);
2859 flags
|= (BNA_TXQ_WI_CF_IP_CKSUM
| BNA_TXQ_WI_CF_TCP_CKSUM
);
2860 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2861 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2862 tcp_hdrlen(skb
) >> 2, skb_transport_offset(skb
)));
2864 txqent
->hdr
.wi
.opcode
= htons(BNA_TXQ_WI_SEND
);
2865 txqent
->hdr
.wi
.lso_mss
= 0;
2867 if (unlikely(skb
->len
> (bnad
->netdev
->mtu
+ ETH_HLEN
))) {
2868 BNAD_UPDATE_CTR(bnad
, tx_skb_non_tso_too_long
);
2872 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2875 if (skb
->protocol
== htons(ETH_P_IP
))
2876 proto
= ip_hdr(skb
)->protocol
;
2877 #ifdef NETIF_F_IPV6_CSUM
2878 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2879 /* nexthdr may not be TCP immediately. */
2880 proto
= ipv6_hdr(skb
)->nexthdr
;
2883 if (proto
== IPPROTO_TCP
) {
2884 flags
|= BNA_TXQ_WI_CF_TCP_CKSUM
;
2885 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2886 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2887 (0, skb_transport_offset(skb
)));
2889 BNAD_UPDATE_CTR(bnad
, tcpcsum_offload
);
2891 if (unlikely(skb_headlen(skb
) <
2892 skb_transport_offset(skb
) +
2894 BNAD_UPDATE_CTR(bnad
, tx_skb_tcp_hdr
);
2897 } else if (proto
== IPPROTO_UDP
) {
2898 flags
|= BNA_TXQ_WI_CF_UDP_CKSUM
;
2899 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2900 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2901 (0, skb_transport_offset(skb
)));
2903 BNAD_UPDATE_CTR(bnad
, udpcsum_offload
);
2904 if (unlikely(skb_headlen(skb
) <
2905 skb_transport_offset(skb
) +
2906 sizeof(struct udphdr
))) {
2907 BNAD_UPDATE_CTR(bnad
, tx_skb_udp_hdr
);
2912 BNAD_UPDATE_CTR(bnad
, tx_skb_csum_err
);
2916 txqent
->hdr
.wi
.l4_hdr_size_n_offset
= 0;
2919 txqent
->hdr
.wi
.flags
= htons(flags
);
2920 txqent
->hdr
.wi
.frame_length
= htonl(skb
->len
);
2926 * bnad_start_xmit : Netdev entry point for Transmit
2927 * Called under lock held by net_device
2930 bnad_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2932 struct bnad
*bnad
= netdev_priv(netdev
);
2934 struct bna_tcb
*tcb
= NULL
;
2935 struct bnad_tx_unmap
*unmap_q
, *unmap
, *head_unmap
;
2936 u32 prod
, q_depth
, vect_id
;
2937 u32 wis
, vectors
, len
;
2939 dma_addr_t dma_addr
;
2940 struct bna_txq_entry
*txqent
;
2942 len
= skb_headlen(skb
);
2944 /* Sanity checks for the skb */
2946 if (unlikely(skb
->len
<= ETH_HLEN
)) {
2947 dev_kfree_skb_any(skb
);
2948 BNAD_UPDATE_CTR(bnad
, tx_skb_too_short
);
2949 return NETDEV_TX_OK
;
2951 if (unlikely(len
> BFI_TX_MAX_DATA_PER_VECTOR
)) {
2952 dev_kfree_skb_any(skb
);
2953 BNAD_UPDATE_CTR(bnad
, tx_skb_headlen_zero
);
2954 return NETDEV_TX_OK
;
2956 if (unlikely(len
== 0)) {
2957 dev_kfree_skb_any(skb
);
2958 BNAD_UPDATE_CTR(bnad
, tx_skb_headlen_zero
);
2959 return NETDEV_TX_OK
;
2962 tcb
= bnad
->tx_info
[0].tcb
[txq_id
];
2965 * Takes care of the Tx that is scheduled between clearing the flag
2966 * and the netif_tx_stop_all_queues() call.
2968 if (unlikely(!tcb
|| !test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))) {
2969 dev_kfree_skb_any(skb
);
2970 BNAD_UPDATE_CTR(bnad
, tx_skb_stopping
);
2971 return NETDEV_TX_OK
;
2974 q_depth
= tcb
->q_depth
;
2975 prod
= tcb
->producer_index
;
2976 unmap_q
= tcb
->unmap_q
;
2978 vectors
= 1 + skb_shinfo(skb
)->nr_frags
;
2979 wis
= BNA_TXQ_WI_NEEDED(vectors
); /* 4 vectors per work item */
2981 if (unlikely(vectors
> BFI_TX_MAX_VECTORS_PER_PKT
)) {
2982 dev_kfree_skb_any(skb
);
2983 BNAD_UPDATE_CTR(bnad
, tx_skb_max_vectors
);
2984 return NETDEV_TX_OK
;
2987 /* Check for available TxQ resources */
2988 if (unlikely(wis
> BNA_QE_FREE_CNT(tcb
, q_depth
))) {
2989 if ((*tcb
->hw_consumer_index
!= tcb
->consumer_index
) &&
2990 !test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
2992 sent
= bnad_txcmpl_process(bnad
, tcb
);
2993 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
2994 bna_ib_ack(tcb
->i_dbell
, sent
);
2995 smp_mb__before_atomic();
2996 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
2998 netif_stop_queue(netdev
);
2999 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
3004 * Check again to deal with race condition between
3005 * netif_stop_queue here, and netif_wake_queue in
3006 * interrupt handler which is not inside netif tx lock.
3008 if (likely(wis
> BNA_QE_FREE_CNT(tcb
, q_depth
))) {
3009 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
3010 return NETDEV_TX_BUSY
;
3012 netif_wake_queue(netdev
);
3013 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
3017 txqent
= &((struct bna_txq_entry
*)tcb
->sw_q
)[prod
];
3018 head_unmap
= &unmap_q
[prod
];
3020 /* Program the opcode, flags, frame_len, num_vectors in WI */
3021 if (bnad_txq_wi_prepare(bnad
, tcb
, skb
, txqent
)) {
3022 dev_kfree_skb_any(skb
);
3023 return NETDEV_TX_OK
;
3025 txqent
->hdr
.wi
.reserved
= 0;
3026 txqent
->hdr
.wi
.num_vectors
= vectors
;
3028 head_unmap
->skb
= skb
;
3029 head_unmap
->nvecs
= 0;
3031 /* Program the vectors */
3033 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
3034 len
, DMA_TO_DEVICE
);
3035 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[0].host_addr
);
3036 txqent
->vector
[0].length
= htons(len
);
3037 dma_unmap_addr_set(&unmap
->vectors
[0], dma_addr
, dma_addr
);
3038 head_unmap
->nvecs
++;
3040 for (i
= 0, vect_id
= 0; i
< vectors
- 1; i
++) {
3041 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
3042 u32 size
= skb_frag_size(frag
);
3044 if (unlikely(size
== 0)) {
3045 /* Undo the changes starting at tcb->producer_index */
3046 bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
,
3047 tcb
->producer_index
);
3048 dev_kfree_skb_any(skb
);
3049 BNAD_UPDATE_CTR(bnad
, tx_skb_frag_zero
);
3050 return NETDEV_TX_OK
;
3056 if (vect_id
== BFI_TX_MAX_VECTORS_PER_WI
) {
3058 BNA_QE_INDX_INC(prod
, q_depth
);
3059 txqent
= &((struct bna_txq_entry
*)tcb
->sw_q
)[prod
];
3060 txqent
->hdr
.wi_ext
.opcode
= htons(BNA_TXQ_WI_EXTENSION
);
3061 unmap
= &unmap_q
[prod
];
3064 dma_addr
= skb_frag_dma_map(&bnad
->pcidev
->dev
, frag
,
3065 0, size
, DMA_TO_DEVICE
);
3066 dma_unmap_len_set(&unmap
->vectors
[vect_id
], dma_len
, size
);
3067 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[vect_id
].host_addr
);
3068 txqent
->vector
[vect_id
].length
= htons(size
);
3069 dma_unmap_addr_set(&unmap
->vectors
[vect_id
], dma_addr
,
3071 head_unmap
->nvecs
++;
3074 if (unlikely(len
!= skb
->len
)) {
3075 /* Undo the changes starting at tcb->producer_index */
3076 bnad_tx_buff_unmap(bnad
, unmap_q
, q_depth
, tcb
->producer_index
);
3077 dev_kfree_skb_any(skb
);
3078 BNAD_UPDATE_CTR(bnad
, tx_skb_len_mismatch
);
3079 return NETDEV_TX_OK
;
3082 BNA_QE_INDX_INC(prod
, q_depth
);
3083 tcb
->producer_index
= prod
;
3087 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
3088 return NETDEV_TX_OK
;
3090 skb_tx_timestamp(skb
);
3092 bna_txq_prod_indx_doorbell(tcb
);
3095 return NETDEV_TX_OK
;
3099 * Used spin_lock to synchronize reading of stats structures, which
3100 * is written by BNA under the same lock.
3102 static struct rtnl_link_stats64
*
3103 bnad_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
3105 struct bnad
*bnad
= netdev_priv(netdev
);
3106 unsigned long flags
;
3108 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3110 bnad_netdev_qstats_fill(bnad
, stats
);
3111 bnad_netdev_hwstats_fill(bnad
, stats
);
3113 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3119 bnad_set_rx_ucast_fltr(struct bnad
*bnad
)
3121 struct net_device
*netdev
= bnad
->netdev
;
3122 int uc_count
= netdev_uc_count(netdev
);
3123 enum bna_cb_status ret
;
3125 struct netdev_hw_addr
*ha
;
3128 if (netdev_uc_empty(bnad
->netdev
)) {
3129 bna_rx_ucast_listset(bnad
->rx_info
[0].rx
, 0, NULL
, NULL
);
3133 if (uc_count
> bna_attr(&bnad
->bna
)->num_ucmac
)
3136 mac_list
= kzalloc(uc_count
* ETH_ALEN
, GFP_ATOMIC
);
3137 if (mac_list
== NULL
)
3141 netdev_for_each_uc_addr(ha
, netdev
) {
3142 memcpy(&mac_list
[entry
* ETH_ALEN
],
3143 &ha
->addr
[0], ETH_ALEN
);
3147 ret
= bna_rx_ucast_listset(bnad
->rx_info
[0].rx
, entry
,
3151 if (ret
!= BNA_CB_SUCCESS
)
3156 /* ucast packets not in UCAM are routed to default function */
3158 bnad
->cfg_flags
|= BNAD_CF_DEFAULT
;
3159 bna_rx_ucast_listset(bnad
->rx_info
[0].rx
, 0, NULL
, NULL
);
3163 bnad_set_rx_mcast_fltr(struct bnad
*bnad
)
3165 struct net_device
*netdev
= bnad
->netdev
;
3166 int mc_count
= netdev_mc_count(netdev
);
3167 enum bna_cb_status ret
;
3170 if (netdev
->flags
& IFF_ALLMULTI
)
3173 if (netdev_mc_empty(netdev
))
3176 if (mc_count
> bna_attr(&bnad
->bna
)->num_mcmac
)
3179 mac_list
= kzalloc((mc_count
+ 1) * ETH_ALEN
, GFP_ATOMIC
);
3181 if (mac_list
== NULL
)
3184 memcpy(&mac_list
[0], &bnad_bcast_addr
[0], ETH_ALEN
);
3186 /* copy rest of the MCAST addresses */
3187 bnad_netdev_mc_list_get(netdev
, mac_list
);
3188 ret
= bna_rx_mcast_listset(bnad
->rx_info
[0].rx
, mc_count
+ 1,
3192 if (ret
!= BNA_CB_SUCCESS
)
3198 bnad
->cfg_flags
|= BNAD_CF_ALLMULTI
;
3199 bna_rx_mcast_delall(bnad
->rx_info
[0].rx
, NULL
);
3203 bnad_set_rx_mode(struct net_device
*netdev
)
3205 struct bnad
*bnad
= netdev_priv(netdev
);
3206 enum bna_rxmode new_mode
, mode_mask
;
3207 unsigned long flags
;
3209 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3211 if (bnad
->rx_info
[0].rx
== NULL
) {
3212 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3216 /* clear bnad flags to update it with new settings */
3217 bnad
->cfg_flags
&= ~(BNAD_CF_PROMISC
| BNAD_CF_DEFAULT
|
3221 if (netdev
->flags
& IFF_PROMISC
) {
3222 new_mode
|= BNAD_RXMODE_PROMISC_DEFAULT
;
3223 bnad
->cfg_flags
|= BNAD_CF_PROMISC
;
3225 bnad_set_rx_mcast_fltr(bnad
);
3227 if (bnad
->cfg_flags
& BNAD_CF_ALLMULTI
)
3228 new_mode
|= BNA_RXMODE_ALLMULTI
;
3230 bnad_set_rx_ucast_fltr(bnad
);
3232 if (bnad
->cfg_flags
& BNAD_CF_DEFAULT
)
3233 new_mode
|= BNA_RXMODE_DEFAULT
;
3236 mode_mask
= BNA_RXMODE_PROMISC
| BNA_RXMODE_DEFAULT
|
3237 BNA_RXMODE_ALLMULTI
;
3238 bna_rx_mode_set(bnad
->rx_info
[0].rx
, new_mode
, mode_mask
, NULL
);
3240 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3244 * bna_lock is used to sync writes to netdev->addr
3245 * conf_lock cannot be used since this call may be made
3246 * in a non-blocking context.
3249 bnad_set_mac_address(struct net_device
*netdev
, void *mac_addr
)
3252 struct bnad
*bnad
= netdev_priv(netdev
);
3253 struct sockaddr
*sa
= (struct sockaddr
*)mac_addr
;
3254 unsigned long flags
;
3256 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3258 err
= bnad_mac_addr_set_locked(bnad
, sa
->sa_data
);
3261 memcpy(netdev
->dev_addr
, sa
->sa_data
, netdev
->addr_len
);
3263 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3269 bnad_mtu_set(struct bnad
*bnad
, int frame_size
)
3271 unsigned long flags
;
3273 init_completion(&bnad
->bnad_completions
.mtu_comp
);
3275 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3276 bna_enet_mtu_set(&bnad
->bna
.enet
, frame_size
, bnad_cb_enet_mtu_set
);
3277 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3279 wait_for_completion(&bnad
->bnad_completions
.mtu_comp
);
3281 return bnad
->bnad_completions
.mtu_comp_status
;
3285 bnad_change_mtu(struct net_device
*netdev
, int new_mtu
)
3288 struct bnad
*bnad
= netdev_priv(netdev
);
3289 u32 rx_count
= 0, frame
, new_frame
;
3291 if (new_mtu
+ ETH_HLEN
< ETH_ZLEN
|| new_mtu
> BNAD_JUMBO_MTU
)
3294 mutex_lock(&bnad
->conf_mutex
);
3297 netdev
->mtu
= new_mtu
;
3299 frame
= BNAD_FRAME_SIZE(mtu
);
3300 new_frame
= BNAD_FRAME_SIZE(new_mtu
);
3302 /* check if multi-buffer needs to be enabled */
3303 if (BNAD_PCI_DEV_IS_CAT2(bnad
) &&
3304 netif_running(bnad
->netdev
)) {
3305 /* only when transition is over 4K */
3306 if ((frame
<= 4096 && new_frame
> 4096) ||
3307 (frame
> 4096 && new_frame
<= 4096))
3308 rx_count
= bnad_reinit_rx(bnad
);
3311 /* rx_count > 0 - new rx created
3312 * - Linux set err = 0 and return
3314 err
= bnad_mtu_set(bnad
, new_frame
);
3318 mutex_unlock(&bnad
->conf_mutex
);
3323 bnad_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
3325 struct bnad
*bnad
= netdev_priv(netdev
);
3326 unsigned long flags
;
3328 if (!bnad
->rx_info
[0].rx
)
3331 mutex_lock(&bnad
->conf_mutex
);
3333 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3334 bna_rx_vlan_add(bnad
->rx_info
[0].rx
, vid
);
3335 set_bit(vid
, bnad
->active_vlans
);
3336 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3338 mutex_unlock(&bnad
->conf_mutex
);
3344 bnad_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
, u16 vid
)
3346 struct bnad
*bnad
= netdev_priv(netdev
);
3347 unsigned long flags
;
3349 if (!bnad
->rx_info
[0].rx
)
3352 mutex_lock(&bnad
->conf_mutex
);
3354 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3355 clear_bit(vid
, bnad
->active_vlans
);
3356 bna_rx_vlan_del(bnad
->rx_info
[0].rx
, vid
);
3357 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3359 mutex_unlock(&bnad
->conf_mutex
);
3364 static int bnad_set_features(struct net_device
*dev
, netdev_features_t features
)
3366 struct bnad
*bnad
= netdev_priv(dev
);
3367 netdev_features_t changed
= features
^ dev
->features
;
3369 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) && netif_running(dev
)) {
3370 unsigned long flags
;
3372 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3374 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
3375 bna_rx_vlan_strip_enable(bnad
->rx_info
[0].rx
);
3377 bna_rx_vlan_strip_disable(bnad
->rx_info
[0].rx
);
3379 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3385 #ifdef CONFIG_NET_POLL_CONTROLLER
3387 bnad_netpoll(struct net_device
*netdev
)
3389 struct bnad
*bnad
= netdev_priv(netdev
);
3390 struct bnad_rx_info
*rx_info
;
3391 struct bnad_rx_ctrl
*rx_ctrl
;
3395 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
3396 bna_intx_disable(&bnad
->bna
, curr_mask
);
3397 bnad_isr(bnad
->pcidev
->irq
, netdev
);
3398 bna_intx_enable(&bnad
->bna
, curr_mask
);
3401 * Tx processing may happen in sending context, so no need
3402 * to explicitly process completions here
3406 for (i
= 0; i
< bnad
->num_rx
; i
++) {
3407 rx_info
= &bnad
->rx_info
[i
];
3410 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
3411 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
3413 bnad_netif_rx_schedule_poll(bnad
,
3421 static const struct net_device_ops bnad_netdev_ops
= {
3422 .ndo_open
= bnad_open
,
3423 .ndo_stop
= bnad_stop
,
3424 .ndo_start_xmit
= bnad_start_xmit
,
3425 .ndo_get_stats64
= bnad_get_stats64
,
3426 .ndo_set_rx_mode
= bnad_set_rx_mode
,
3427 .ndo_validate_addr
= eth_validate_addr
,
3428 .ndo_set_mac_address
= bnad_set_mac_address
,
3429 .ndo_change_mtu
= bnad_change_mtu
,
3430 .ndo_vlan_rx_add_vid
= bnad_vlan_rx_add_vid
,
3431 .ndo_vlan_rx_kill_vid
= bnad_vlan_rx_kill_vid
,
3432 .ndo_set_features
= bnad_set_features
,
3433 #ifdef CONFIG_NET_POLL_CONTROLLER
3434 .ndo_poll_controller
= bnad_netpoll
3439 bnad_netdev_init(struct bnad
*bnad
, bool using_dac
)
3441 struct net_device
*netdev
= bnad
->netdev
;
3443 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
3444 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3445 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_HW_VLAN_CTAG_TX
|
3446 NETIF_F_HW_VLAN_CTAG_RX
;
3448 netdev
->vlan_features
= NETIF_F_SG
| NETIF_F_HIGHDMA
|
3449 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3450 NETIF_F_TSO
| NETIF_F_TSO6
;
3452 netdev
->features
|= netdev
->hw_features
| NETIF_F_HW_VLAN_CTAG_FILTER
;
3455 netdev
->features
|= NETIF_F_HIGHDMA
;
3457 netdev
->mem_start
= bnad
->mmio_start
;
3458 netdev
->mem_end
= bnad
->mmio_start
+ bnad
->mmio_len
- 1;
3460 netdev
->netdev_ops
= &bnad_netdev_ops
;
3461 bnad_set_ethtool_ops(netdev
);
3465 * 1. Initialize the bnad structure
3466 * 2. Setup netdev pointer in pci_dev
3467 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3468 * 4. Initialize work queue.
3471 bnad_init(struct bnad
*bnad
,
3472 struct pci_dev
*pdev
, struct net_device
*netdev
)
3474 unsigned long flags
;
3476 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3477 pci_set_drvdata(pdev
, netdev
);
3479 bnad
->netdev
= netdev
;
3480 bnad
->pcidev
= pdev
;
3481 bnad
->mmio_start
= pci_resource_start(pdev
, 0);
3482 bnad
->mmio_len
= pci_resource_len(pdev
, 0);
3483 bnad
->bar0
= ioremap_nocache(bnad
->mmio_start
, bnad
->mmio_len
);
3485 dev_err(&pdev
->dev
, "ioremap for bar0 failed\n");
3488 pr_info("bar0 mapped to %p, len %llu\n", bnad
->bar0
,
3489 (unsigned long long) bnad
->mmio_len
);
3491 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3492 if (!bnad_msix_disable
)
3493 bnad
->cfg_flags
= BNAD_CF_MSIX
;
3495 bnad
->cfg_flags
|= BNAD_CF_DIM_ENABLED
;
3497 bnad_q_num_init(bnad
);
3498 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3500 bnad
->msix_num
= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
3501 (bnad
->num_rx
* bnad
->num_rxp_per_rx
) +
3502 BNAD_MAILBOX_MSIX_VECTORS
;
3504 bnad
->txq_depth
= BNAD_TXQ_DEPTH
;
3505 bnad
->rxq_depth
= BNAD_RXQ_DEPTH
;
3507 bnad
->tx_coalescing_timeo
= BFI_TX_COALESCING_TIMEO
;
3508 bnad
->rx_coalescing_timeo
= BFI_RX_COALESCING_TIMEO
;
3510 sprintf(bnad
->wq_name
, "%s_wq_%d", BNAD_NAME
, bnad
->id
);
3511 bnad
->work_q
= create_singlethread_workqueue(bnad
->wq_name
);
3512 if (!bnad
->work_q
) {
3513 iounmap(bnad
->bar0
);
3521 * Must be called after bnad_pci_uninit()
3522 * so that iounmap() and pci_set_drvdata(NULL)
3523 * happens only after PCI uninitialization.
3526 bnad_uninit(struct bnad
*bnad
)
3529 flush_workqueue(bnad
->work_q
);
3530 destroy_workqueue(bnad
->work_q
);
3531 bnad
->work_q
= NULL
;
3535 iounmap(bnad
->bar0
);
3540 a) Per ioceth mutes used for serializing configuration
3541 changes from OS interface
3542 b) spin lock used to protect bna state machine
3545 bnad_lock_init(struct bnad
*bnad
)
3547 spin_lock_init(&bnad
->bna_lock
);
3548 mutex_init(&bnad
->conf_mutex
);
3549 mutex_init(&bnad_list_mutex
);
3553 bnad_lock_uninit(struct bnad
*bnad
)
3555 mutex_destroy(&bnad
->conf_mutex
);
3556 mutex_destroy(&bnad_list_mutex
);
3559 /* PCI Initialization */
3561 bnad_pci_init(struct bnad
*bnad
,
3562 struct pci_dev
*pdev
, bool *using_dac
)
3566 err
= pci_enable_device(pdev
);
3569 err
= pci_request_regions(pdev
, BNAD_NAME
);
3571 goto disable_device
;
3572 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
3575 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
3577 goto release_regions
;
3580 pci_set_master(pdev
);
3584 pci_release_regions(pdev
);
3586 pci_disable_device(pdev
);
3592 bnad_pci_uninit(struct pci_dev
*pdev
)
3594 pci_release_regions(pdev
);
3595 pci_disable_device(pdev
);
3599 bnad_pci_probe(struct pci_dev
*pdev
,
3600 const struct pci_device_id
*pcidev_id
)
3606 struct net_device
*netdev
;
3607 struct bfa_pcidev pcidev_info
;
3608 unsigned long flags
;
3610 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3611 pdev
, pcidev_id
, PCI_FUNC(pdev
->devfn
));
3613 mutex_lock(&bnad_fwimg_mutex
);
3614 if (!cna_get_firmware_buf(pdev
)) {
3615 mutex_unlock(&bnad_fwimg_mutex
);
3616 pr_warn("Failed to load Firmware Image!\n");
3619 mutex_unlock(&bnad_fwimg_mutex
);
3622 * Allocates sizeof(struct net_device + struct bnad)
3623 * bnad = netdev->priv
3625 netdev
= alloc_etherdev(sizeof(struct bnad
));
3630 bnad
= netdev_priv(netdev
);
3631 bnad_lock_init(bnad
);
3632 bnad_add_to_list(bnad
);
3634 mutex_lock(&bnad
->conf_mutex
);
3636 * PCI initialization
3637 * Output : using_dac = 1 for 64 bit DMA
3638 * = 0 for 32 bit DMA
3641 err
= bnad_pci_init(bnad
, pdev
, &using_dac
);
3646 * Initialize bnad structure
3647 * Setup relation between pci_dev & netdev
3649 err
= bnad_init(bnad
, pdev
, netdev
);
3653 /* Initialize netdev structure, set up ethtool ops */
3654 bnad_netdev_init(bnad
, using_dac
);
3656 /* Set link to down state */
3657 netif_carrier_off(netdev
);
3659 /* Setup the debugfs node for this bfad */
3660 if (bna_debugfs_enable
)
3661 bnad_debugfs_init(bnad
);
3663 /* Get resource requirement form bna */
3664 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3665 bna_res_req(&bnad
->res_info
[0]);
3666 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3668 /* Allocate resources from bna */
3669 err
= bnad_res_alloc(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3675 /* Setup pcidev_info for bna_init() */
3676 pcidev_info
.pci_slot
= PCI_SLOT(bnad
->pcidev
->devfn
);
3677 pcidev_info
.pci_func
= PCI_FUNC(bnad
->pcidev
->devfn
);
3678 pcidev_info
.device_id
= bnad
->pcidev
->device
;
3679 pcidev_info
.pci_bar_kva
= bnad
->bar0
;
3681 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3682 bna_init(bna
, bnad
, &pcidev_info
, &bnad
->res_info
[0]);
3683 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3685 bnad
->stats
.bna_stats
= &bna
->stats
;
3687 bnad_enable_msix(bnad
);
3688 err
= bnad_mbox_irq_alloc(bnad
);
3693 setup_timer(&bnad
->bna
.ioceth
.ioc
.ioc_timer
, bnad_ioc_timeout
,
3694 ((unsigned long)bnad
));
3695 setup_timer(&bnad
->bna
.ioceth
.ioc
.hb_timer
, bnad_ioc_hb_check
,
3696 ((unsigned long)bnad
));
3697 setup_timer(&bnad
->bna
.ioceth
.ioc
.iocpf_timer
, bnad_iocpf_timeout
,
3698 ((unsigned long)bnad
));
3699 setup_timer(&bnad
->bna
.ioceth
.ioc
.sem_timer
, bnad_iocpf_sem_timeout
,
3700 ((unsigned long)bnad
));
3702 /* Now start the timer before calling IOC */
3703 mod_timer(&bnad
->bna
.ioceth
.ioc
.iocpf_timer
,
3704 jiffies
+ msecs_to_jiffies(BNA_IOC_TIMER_FREQ
));
3708 * If the call back comes with error, we bail out.
3709 * This is a catastrophic error.
3711 err
= bnad_ioceth_enable(bnad
);
3713 pr_err("BNA: Initialization failed err=%d\n",
3718 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3719 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3720 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1)) {
3721 bnad_q_num_adjust(bnad
, bna_attr(bna
)->num_txq
- 1,
3722 bna_attr(bna
)->num_rxp
- 1);
3723 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3724 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1))
3727 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3729 goto disable_ioceth
;
3731 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3732 bna_mod_res_req(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3733 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3735 err
= bnad_res_alloc(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3738 goto disable_ioceth
;
3741 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3742 bna_mod_init(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3743 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3745 /* Get the burnt-in mac */
3746 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3747 bna_enet_perm_mac_get(&bna
->enet
, &bnad
->perm_addr
);
3748 bnad_set_netdev_perm_addr(bnad
);
3749 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3751 mutex_unlock(&bnad
->conf_mutex
);
3753 /* Finally, reguister with net_device layer */
3754 err
= register_netdev(netdev
);
3756 pr_err("BNA : Registering with netdev failed\n");
3759 set_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
);
3764 mutex_unlock(&bnad
->conf_mutex
);
3768 mutex_lock(&bnad
->conf_mutex
);
3769 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3771 bnad_ioceth_disable(bnad
);
3772 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3773 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3774 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3775 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3777 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3778 bnad_mbox_irq_free(bnad
);
3779 bnad_disable_msix(bnad
);
3781 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3783 /* Remove the debugfs node for this bnad */
3784 kfree(bnad
->regdata
);
3785 bnad_debugfs_uninit(bnad
);
3788 bnad_pci_uninit(pdev
);
3790 mutex_unlock(&bnad
->conf_mutex
);
3791 bnad_remove_from_list(bnad
);
3792 bnad_lock_uninit(bnad
);
3793 free_netdev(netdev
);
3798 bnad_pci_remove(struct pci_dev
*pdev
)
3800 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3803 unsigned long flags
;
3808 pr_info("%s bnad_pci_remove\n", netdev
->name
);
3809 bnad
= netdev_priv(netdev
);
3812 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
))
3813 unregister_netdev(netdev
);
3815 mutex_lock(&bnad
->conf_mutex
);
3816 bnad_ioceth_disable(bnad
);
3817 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3818 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3819 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3820 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3822 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3824 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3825 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3826 bnad_mbox_irq_free(bnad
);
3827 bnad_disable_msix(bnad
);
3828 bnad_pci_uninit(pdev
);
3829 mutex_unlock(&bnad
->conf_mutex
);
3830 bnad_remove_from_list(bnad
);
3831 bnad_lock_uninit(bnad
);
3832 /* Remove the debugfs node for this bnad */
3833 kfree(bnad
->regdata
);
3834 bnad_debugfs_uninit(bnad
);
3836 free_netdev(netdev
);
3839 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table
) = {
3841 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3842 PCI_DEVICE_ID_BROCADE_CT
),
3843 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3844 .class_mask
= 0xffff00
3847 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3848 BFA_PCI_DEVICE_ID_CT2
),
3849 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3850 .class_mask
= 0xffff00
3855 MODULE_DEVICE_TABLE(pci
, bnad_pci_id_table
);
3857 static struct pci_driver bnad_pci_driver
= {
3859 .id_table
= bnad_pci_id_table
,
3860 .probe
= bnad_pci_probe
,
3861 .remove
= bnad_pci_remove
,
3865 bnad_module_init(void)
3869 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3872 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover
);
3874 err
= pci_register_driver(&bnad_pci_driver
);
3876 pr_err("bna : PCI registration failed in module init "
3885 bnad_module_exit(void)
3887 pci_unregister_driver(&bnad_pci_driver
);
3888 release_firmware(bfi_fw
);
3891 module_init(bnad_module_init
);
3892 module_exit(bnad_module_exit
);
3894 MODULE_AUTHOR("Brocade");
3895 MODULE_LICENSE("GPL");
3896 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3897 MODULE_VERSION(BNAD_VERSION
);
3898 MODULE_FIRMWARE(CNA_FW_FILE_CT
);
3899 MODULE_FIRMWARE(CNA_FW_FILE_CT2
);