]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/brocade/bna/bnad.c
bna: MBOX IRQ Flag Check after Locking
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / brocade / bna / bnad.c
CommitLineData
8b230ed8
RM
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
f859d7cb 18#include <linux/bitops.h>
8b230ed8
RM
19#include <linux/netdevice.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/in.h>
23#include <linux/ethtool.h>
24#include <linux/if_vlan.h>
25#include <linux/if_ether.h>
26#include <linux/ip.h>
70c71606 27#include <linux/prefetch.h>
8b230ed8
RM
28
29#include "bnad.h"
30#include "bna.h"
31#include "cna.h"
32
b7ee31c5 33static DEFINE_MUTEX(bnad_fwimg_mutex);
8b230ed8
RM
34
35/*
36 * Module params
37 */
38static uint bnad_msix_disable;
39module_param(bnad_msix_disable, uint, 0444);
40MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
41
42static uint bnad_ioc_auto_recover = 1;
43module_param(bnad_ioc_auto_recover, uint, 0444);
44MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45
46/*
47 * Global variables
48 */
49u32 bnad_rxqs_per_cq = 2;
50
b7ee31c5 51static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8b230ed8
RM
52
53/*
54 * Local MACROS
55 */
56#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
57
58#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
59
60#define BNAD_GET_MBOX_IRQ(_bnad) \
61 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
8811e267 62 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
8b230ed8
RM
63 ((_bnad)->pcidev->irq))
64
65#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
66do { \
67 (_res_info)->res_type = BNA_RES_T_MEM; \
68 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
69 (_res_info)->res_u.mem_info.num = (_num); \
70 (_res_info)->res_u.mem_info.len = \
71 sizeof(struct bnad_unmap_q) + \
72 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
73} while (0)
74
be7fa326
RM
75#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
76
8b230ed8
RM
77/*
78 * Reinitialize completions in CQ, once Rx is taken down
79 */
80static void
81bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
82{
83 struct bna_cq_entry *cmpl, *next_cmpl;
84 unsigned int wi_range, wis = 0, ccb_prod = 0;
85 int i;
86
87 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
88 wi_range);
89
90 for (i = 0; i < ccb->q_depth; i++) {
91 wis++;
92 if (likely(--wi_range))
93 next_cmpl = cmpl + 1;
94 else {
95 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
96 wis = 0;
97 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
98 next_cmpl, wi_range);
99 }
100 cmpl->valid = 0;
101 cmpl = next_cmpl;
102 }
103}
104
271e8b79
RM
105static u32
106bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
107 u32 index, u32 depth, struct sk_buff *skb, u32 frag)
108{
109 int j;
110 array[index].skb = NULL;
111
112 dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
113 skb_headlen(skb), DMA_TO_DEVICE);
114 dma_unmap_addr_set(&array[index], dma_addr, 0);
115 BNA_QE_INDX_ADD(index, 1, depth);
116
117 for (j = 0; j < frag; j++) {
118 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
119 skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
120 dma_unmap_addr_set(&array[index], dma_addr, 0);
121 BNA_QE_INDX_ADD(index, 1, depth);
122 }
123
124 return index;
125}
126
8b230ed8
RM
127/*
128 * Frees all pending Tx Bufs
129 * At this point no activity is expected on the Q,
130 * so DMA unmap & freeing is fine.
131 */
132static void
133bnad_free_all_txbufs(struct bnad *bnad,
134 struct bna_tcb *tcb)
135{
0120b99c 136 u32 unmap_cons;
8b230ed8
RM
137 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
138 struct bnad_skb_unmap *unmap_array;
0120b99c 139 struct sk_buff *skb = NULL;
8b230ed8
RM
140 int i;
141
142 unmap_array = unmap_q->unmap_array;
143
144 unmap_cons = 0;
145 while (unmap_cons < unmap_q->q_depth) {
146 skb = unmap_array[unmap_cons].skb;
147 if (!skb) {
148 unmap_cons++;
149 continue;
150 }
151 unmap_array[unmap_cons].skb = NULL;
152
5ea74318
IV
153 dma_unmap_single(&bnad->pcidev->dev,
154 dma_unmap_addr(&unmap_array[unmap_cons],
8b230ed8 155 dma_addr), skb_headlen(skb),
5ea74318 156 DMA_TO_DEVICE);
8b230ed8 157
5ea74318 158 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
be7fa326
RM
159 if (++unmap_cons >= unmap_q->q_depth)
160 break;
161
8b230ed8 162 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5ea74318
IV
163 dma_unmap_page(&bnad->pcidev->dev,
164 dma_unmap_addr(&unmap_array[unmap_cons],
8b230ed8
RM
165 dma_addr),
166 skb_shinfo(skb)->frags[i].size,
5ea74318
IV
167 DMA_TO_DEVICE);
168 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
8b230ed8 169 0);
be7fa326
RM
170 if (++unmap_cons >= unmap_q->q_depth)
171 break;
8b230ed8
RM
172 }
173 dev_kfree_skb_any(skb);
174 }
175}
176
177/* Data Path Handlers */
178
179/*
180 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
181 * Can be called in a) Interrupt context
182 * b) Sending context
183 * c) Tasklet context
184 */
185static u32
186bnad_free_txbufs(struct bnad *bnad,
187 struct bna_tcb *tcb)
188{
271e8b79
RM
189 u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
190 u16 wis, updated_hw_cons;
8b230ed8
RM
191 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
192 struct bnad_skb_unmap *unmap_array;
0120b99c 193 struct sk_buff *skb;
8b230ed8
RM
194
195 /*
196 * Just return if TX is stopped. This check is useful
197 * when bnad_free_txbufs() runs out of a tasklet scheduled
be7fa326 198 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
8b230ed8
RM
199 * but this routine runs actually after the cleanup has been
200 * executed.
201 */
be7fa326 202 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
8b230ed8
RM
203 return 0;
204
205 updated_hw_cons = *(tcb->hw_consumer_index);
206
207 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
208 updated_hw_cons, tcb->q_depth);
209
210 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
211
212 unmap_array = unmap_q->unmap_array;
213 unmap_cons = unmap_q->consumer_index;
214
215 prefetch(&unmap_array[unmap_cons + 1]);
216 while (wis) {
217 skb = unmap_array[unmap_cons].skb;
218
8b230ed8
RM
219 sent_packets++;
220 sent_bytes += skb->len;
221 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
222
271e8b79
RM
223 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
224 unmap_cons, unmap_q->q_depth, skb,
225 skb_shinfo(skb)->nr_frags);
8b230ed8 226
8b230ed8
RM
227 dev_kfree_skb_any(skb);
228 }
229
230 /* Update consumer pointers. */
231 tcb->consumer_index = updated_hw_cons;
232 unmap_q->consumer_index = unmap_cons;
233
234 tcb->txq->tx_packets += sent_packets;
235 tcb->txq->tx_bytes += sent_bytes;
236
237 return sent_packets;
238}
239
240/* Tx Free Tasklet function */
241/* Frees for all the tcb's in all the Tx's */
242/*
243 * Scheduled from sending context, so that
244 * the fat Tx lock is not held for too long
245 * in the sending context.
246 */
247static void
248bnad_tx_free_tasklet(unsigned long bnad_ptr)
249{
250 struct bnad *bnad = (struct bnad *)bnad_ptr;
251 struct bna_tcb *tcb;
0120b99c 252 u32 acked = 0;
8b230ed8
RM
253 int i, j;
254
255 for (i = 0; i < bnad->num_tx; i++) {
256 for (j = 0; j < bnad->num_txq_per_tx; j++) {
257 tcb = bnad->tx_info[i].tcb[j];
258 if (!tcb)
259 continue;
260 if (((u16) (*tcb->hw_consumer_index) !=
261 tcb->consumer_index) &&
262 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
263 &tcb->flags))) {
264 acked = bnad_free_txbufs(bnad, tcb);
be7fa326
RM
265 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
266 &tcb->flags)))
267 bna_ib_ack(tcb->i_dbell, acked);
8b230ed8
RM
268 smp_mb__before_clear_bit();
269 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
270 }
f7c0fa4c
RM
271 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
272 &tcb->flags)))
273 continue;
274 if (netif_queue_stopped(bnad->netdev)) {
275 if (acked && netif_carrier_ok(bnad->netdev) &&
276 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
277 BNAD_NETIF_WAKE_THRESHOLD) {
278 netif_wake_queue(bnad->netdev);
279 /* TODO */
280 /* Counters for individual TxQs? */
281 BNAD_UPDATE_CTR(bnad,
282 netif_queue_wakeup);
283 }
284 }
8b230ed8
RM
285 }
286 }
287}
288
289static u32
290bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
291{
292 struct net_device *netdev = bnad->netdev;
be7fa326 293 u32 sent = 0;
8b230ed8
RM
294
295 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
296 return 0;
297
298 sent = bnad_free_txbufs(bnad, tcb);
299 if (sent) {
300 if (netif_queue_stopped(netdev) &&
301 netif_carrier_ok(netdev) &&
302 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
303 BNAD_NETIF_WAKE_THRESHOLD) {
be7fa326
RM
304 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
305 netif_wake_queue(netdev);
306 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
307 }
8b230ed8 308 }
be7fa326
RM
309 }
310
311 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
8b230ed8 312 bna_ib_ack(tcb->i_dbell, sent);
8b230ed8
RM
313
314 smp_mb__before_clear_bit();
315 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
316
317 return sent;
318}
319
320/* MSIX Tx Completion Handler */
321static irqreturn_t
322bnad_msix_tx(int irq, void *data)
323{
324 struct bna_tcb *tcb = (struct bna_tcb *)data;
325 struct bnad *bnad = tcb->bnad;
326
327 bnad_tx(bnad, tcb);
328
329 return IRQ_HANDLED;
330}
331
332static void
333bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
334{
335 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
336
337 rcb->producer_index = 0;
338 rcb->consumer_index = 0;
339
340 unmap_q->producer_index = 0;
341 unmap_q->consumer_index = 0;
342}
343
344static void
be7fa326 345bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
8b230ed8
RM
346{
347 struct bnad_unmap_q *unmap_q;
5ea74318 348 struct bnad_skb_unmap *unmap_array;
8b230ed8 349 struct sk_buff *skb;
be7fa326 350 int unmap_cons;
8b230ed8
RM
351
352 unmap_q = rcb->unmap_q;
5ea74318 353 unmap_array = unmap_q->unmap_array;
be7fa326 354 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
5ea74318 355 skb = unmap_array[unmap_cons].skb;
be7fa326
RM
356 if (!skb)
357 continue;
5ea74318
IV
358 unmap_array[unmap_cons].skb = NULL;
359 dma_unmap_single(&bnad->pcidev->dev,
360 dma_unmap_addr(&unmap_array[unmap_cons],
361 dma_addr),
362 rcb->rxq->buffer_size,
363 DMA_FROM_DEVICE);
8b230ed8 364 dev_kfree_skb(skb);
8b230ed8 365 }
8b230ed8
RM
366 bnad_reset_rcb(bnad, rcb);
367}
368
369static void
370bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
371{
372 u16 to_alloc, alloced, unmap_prod, wi_range;
373 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
374 struct bnad_skb_unmap *unmap_array;
375 struct bna_rxq_entry *rxent;
376 struct sk_buff *skb;
377 dma_addr_t dma_addr;
378
379 alloced = 0;
380 to_alloc =
381 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
382
383 unmap_array = unmap_q->unmap_array;
384 unmap_prod = unmap_q->producer_index;
385
386 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
387
388 while (to_alloc--) {
19dbff9f 389 if (!wi_range)
8b230ed8
RM
390 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
391 wi_range);
0a0e2344
ED
392 skb = netdev_alloc_skb_ip_align(bnad->netdev,
393 rcb->rxq->buffer_size);
8b230ed8
RM
394 if (unlikely(!skb)) {
395 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
3caa1e95 396 rcb->rxq->rxbuf_alloc_failed++;
8b230ed8
RM
397 goto finishing;
398 }
8b230ed8 399 unmap_array[unmap_prod].skb = skb;
5ea74318
IV
400 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
401 rcb->rxq->buffer_size,
402 DMA_FROM_DEVICE);
403 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
8b230ed8
RM
404 dma_addr);
405 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
406 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
407
408 rxent++;
409 wi_range--;
410 alloced++;
411 }
412
413finishing:
414 if (likely(alloced)) {
415 unmap_q->producer_index = unmap_prod;
416 rcb->producer_index = unmap_prod;
417 smp_mb();
be7fa326
RM
418 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
419 bna_rxq_prod_indx_doorbell(rcb);
8b230ed8 420 }
8b230ed8
RM
421}
422
423static inline void
424bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
425{
426 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
427
428 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
429 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
430 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
431 bnad_alloc_n_post_rxbufs(bnad, rcb);
432 smp_mb__before_clear_bit();
433 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
434 }
435}
436
437static u32
438bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
439{
440 struct bna_cq_entry *cmpl, *next_cmpl;
441 struct bna_rcb *rcb = NULL;
442 unsigned int wi_range, packets = 0, wis = 0;
443 struct bnad_unmap_q *unmap_q;
5ea74318 444 struct bnad_skb_unmap *unmap_array;
8b230ed8 445 struct sk_buff *skb;
5ea74318 446 u32 flags, unmap_cons;
8b230ed8 447 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
078086f3
RM
448 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
449
450 set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
8b230ed8 451
078086f3
RM
452 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
453 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
be7fa326 454 return 0;
078086f3 455 }
be7fa326 456
8b230ed8
RM
457 prefetch(bnad->netdev);
458 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
459 wi_range);
460 BUG_ON(!(wi_range <= ccb->q_depth));
461 while (cmpl->valid && packets < budget) {
462 packets++;
463 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
464
078086f3 465 if (bna_is_small_rxq(cmpl->rxq_id))
8b230ed8 466 rcb = ccb->rcb[1];
078086f3
RM
467 else
468 rcb = ccb->rcb[0];
8b230ed8
RM
469
470 unmap_q = rcb->unmap_q;
5ea74318
IV
471 unmap_array = unmap_q->unmap_array;
472 unmap_cons = unmap_q->consumer_index;
8b230ed8 473
5ea74318 474 skb = unmap_array[unmap_cons].skb;
8b230ed8 475 BUG_ON(!(skb));
5ea74318
IV
476 unmap_array[unmap_cons].skb = NULL;
477 dma_unmap_single(&bnad->pcidev->dev,
478 dma_unmap_addr(&unmap_array[unmap_cons],
8b230ed8 479 dma_addr),
5ea74318
IV
480 rcb->rxq->buffer_size,
481 DMA_FROM_DEVICE);
8b230ed8
RM
482 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
483
484 /* Should be more efficient ? Performance ? */
485 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
486
487 wis++;
488 if (likely(--wi_range))
489 next_cmpl = cmpl + 1;
490 else {
491 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
492 wis = 0;
493 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
494 next_cmpl, wi_range);
495 BUG_ON(!(wi_range <= ccb->q_depth));
496 }
497 prefetch(next_cmpl);
498
499 flags = ntohl(cmpl->flags);
500 if (unlikely
501 (flags &
502 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
503 BNA_CQ_EF_TOO_LONG))) {
504 dev_kfree_skb_any(skb);
505 rcb->rxq->rx_packets_with_error++;
506 goto next;
507 }
508
509 skb_put(skb, ntohs(cmpl->length));
510 if (likely
e5ee20e7 511 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
8b230ed8
RM
512 (((flags & BNA_CQ_EF_IPV4) &&
513 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
514 (flags & BNA_CQ_EF_IPV6)) &&
515 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
516 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
517 skb->ip_summed = CHECKSUM_UNNECESSARY;
518 else
bc8acf2c 519 skb_checksum_none_assert(skb);
8b230ed8
RM
520
521 rcb->rxq->rx_packets++;
522 rcb->rxq->rx_bytes += skb->len;
523 skb->protocol = eth_type_trans(skb, bnad->netdev);
524
f859d7cb
JP
525 if (flags & BNA_CQ_EF_VLAN)
526 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
527
078086f3 528 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
f859d7cb 529 napi_gro_receive(&rx_ctrl->napi, skb);
078086f3 530 else {
f859d7cb 531 netif_receive_skb(skb);
8b230ed8
RM
532 }
533
534next:
535 cmpl->valid = 0;
536 cmpl = next_cmpl;
537 }
538
539 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
540
2be67144 541 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
271e8b79
RM
542 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
543
2be67144
RM
544 bnad_refill_rxq(bnad, ccb->rcb[0]);
545 if (ccb->rcb[1])
546 bnad_refill_rxq(bnad, ccb->rcb[1]);
8b230ed8 547
078086f3
RM
548 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
549
8b230ed8
RM
550 return packets;
551}
552
8b230ed8
RM
553static void
554bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
555{
556 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
be7fa326
RM
557 struct napi_struct *napi = &rx_ctrl->napi;
558
559 if (likely(napi_schedule_prep(napi))) {
be7fa326 560 __napi_schedule(napi);
271e8b79 561 rx_ctrl->rx_schedule++;
8b230ed8 562 }
8b230ed8
RM
563}
564
565/* MSIX Rx Path Handler */
566static irqreturn_t
567bnad_msix_rx(int irq, void *data)
568{
569 struct bna_ccb *ccb = (struct bna_ccb *)data;
8b230ed8 570
271e8b79
RM
571 if (ccb) {
572 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
2be67144 573 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
271e8b79 574 }
8b230ed8
RM
575
576 return IRQ_HANDLED;
577}
578
579/* Interrupt handlers */
580
581/* Mbox Interrupt Handlers */
582static irqreturn_t
583bnad_msix_mbox_handler(int irq, void *data)
584{
585 u32 intr_status;
e2fa6f2e 586 unsigned long flags;
be7fa326 587 struct bnad *bnad = (struct bnad *)data;
8b230ed8 588
8b230ed8 589 spin_lock_irqsave(&bnad->bna_lock, flags);
dfee325a
RM
590 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
591 spin_unlock_irqrestore(&bnad->bna_lock, flags);
592 return IRQ_HANDLED;
593 }
8b230ed8
RM
594
595 bna_intr_status_get(&bnad->bna, intr_status);
596
078086f3 597 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
8b230ed8
RM
598 bna_mbox_handler(&bnad->bna, intr_status);
599
600 spin_unlock_irqrestore(&bnad->bna_lock, flags);
601
8b230ed8
RM
602 return IRQ_HANDLED;
603}
604
605static irqreturn_t
606bnad_isr(int irq, void *data)
607{
608 int i, j;
609 u32 intr_status;
610 unsigned long flags;
be7fa326 611 struct bnad *bnad = (struct bnad *)data;
8b230ed8
RM
612 struct bnad_rx_info *rx_info;
613 struct bnad_rx_ctrl *rx_ctrl;
078086f3 614 struct bna_tcb *tcb = NULL;
8b230ed8 615
dfee325a
RM
616 spin_lock_irqsave(&bnad->bna_lock, flags);
617 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
618 spin_unlock_irqrestore(&bnad->bna_lock, flags);
e2fa6f2e 619 return IRQ_NONE;
dfee325a 620 }
8b230ed8
RM
621
622 bna_intr_status_get(&bnad->bna, intr_status);
e2fa6f2e 623
dfee325a
RM
624 if (unlikely(!intr_status)) {
625 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 626 return IRQ_NONE;
dfee325a 627 }
8b230ed8 628
078086f3 629 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
8b230ed8 630 bna_mbox_handler(&bnad->bna, intr_status);
be7fa326 631
8b230ed8
RM
632 spin_unlock_irqrestore(&bnad->bna_lock, flags);
633
be7fa326
RM
634 if (!BNA_IS_INTX_DATA_INTR(intr_status))
635 return IRQ_HANDLED;
636
8b230ed8 637 /* Process data interrupts */
be7fa326
RM
638 /* Tx processing */
639 for (i = 0; i < bnad->num_tx; i++) {
078086f3
RM
640 for (j = 0; j < bnad->num_txq_per_tx; j++) {
641 tcb = bnad->tx_info[i].tcb[j];
642 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
643 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
644 }
be7fa326
RM
645 }
646 /* Rx processing */
8b230ed8
RM
647 for (i = 0; i < bnad->num_rx; i++) {
648 rx_info = &bnad->rx_info[i];
649 if (!rx_info->rx)
650 continue;
651 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
652 rx_ctrl = &rx_info->rx_ctrl[j];
653 if (rx_ctrl->ccb)
654 bnad_netif_rx_schedule_poll(bnad,
655 rx_ctrl->ccb);
656 }
657 }
8b230ed8
RM
658 return IRQ_HANDLED;
659}
660
661/*
662 * Called in interrupt / callback context
663 * with bna_lock held, so cfg_flags access is OK
664 */
665static void
666bnad_enable_mbox_irq(struct bnad *bnad)
667{
be7fa326 668 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
e2fa6f2e 669
8b230ed8
RM
670 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
671}
672
673/*
674 * Called with bnad->bna_lock held b'cos of
675 * bnad->cfg_flags access.
676 */
b7ee31c5 677static void
8b230ed8
RM
678bnad_disable_mbox_irq(struct bnad *bnad)
679{
be7fa326 680 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
8b230ed8 681
be7fa326
RM
682 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
683}
8b230ed8 684
be7fa326
RM
685static void
686bnad_set_netdev_perm_addr(struct bnad *bnad)
687{
688 struct net_device *netdev = bnad->netdev;
e2fa6f2e 689
be7fa326
RM
690 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
691 if (is_zero_ether_addr(netdev->dev_addr))
692 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
8b230ed8
RM
693}
694
695/* Control Path Handlers */
696
697/* Callbacks */
698void
078086f3 699bnad_cb_mbox_intr_enable(struct bnad *bnad)
8b230ed8
RM
700{
701 bnad_enable_mbox_irq(bnad);
702}
703
704void
078086f3 705bnad_cb_mbox_intr_disable(struct bnad *bnad)
8b230ed8
RM
706{
707 bnad_disable_mbox_irq(bnad);
708}
709
710void
078086f3
RM
711bnad_cb_ioceth_ready(struct bnad *bnad)
712{
713 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
714 complete(&bnad->bnad_completions.ioc_comp);
715}
716
717void
718bnad_cb_ioceth_failed(struct bnad *bnad)
8b230ed8 719{
078086f3 720 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
8b230ed8 721 complete(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
722}
723
724void
078086f3 725bnad_cb_ioceth_disabled(struct bnad *bnad)
8b230ed8 726{
078086f3 727 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
8b230ed8 728 complete(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
729}
730
731static void
078086f3 732bnad_cb_enet_disabled(void *arg)
8b230ed8
RM
733{
734 struct bnad *bnad = (struct bnad *)arg;
735
8b230ed8 736 netif_carrier_off(bnad->netdev);
078086f3 737 complete(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
738}
739
740void
078086f3 741bnad_cb_ethport_link_status(struct bnad *bnad,
8b230ed8
RM
742 enum bna_link_status link_status)
743{
744 bool link_up = 0;
745
746 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
747
748 if (link_status == BNA_CEE_UP) {
078086f3
RM
749 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
750 BNAD_UPDATE_CTR(bnad, cee_toggle);
8b230ed8 751 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
078086f3
RM
752 } else {
753 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
754 BNAD_UPDATE_CTR(bnad, cee_toggle);
8b230ed8 755 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
078086f3 756 }
8b230ed8
RM
757
758 if (link_up) {
759 if (!netif_carrier_ok(bnad->netdev)) {
078086f3
RM
760 uint tx_id, tcb_id;
761 printk(KERN_WARNING "bna: %s link up\n",
8b230ed8
RM
762 bnad->netdev->name);
763 netif_carrier_on(bnad->netdev);
764 BNAD_UPDATE_CTR(bnad, link_toggle);
078086f3
RM
765 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
766 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
767 tcb_id++) {
768 struct bna_tcb *tcb =
769 bnad->tx_info[tx_id].tcb[tcb_id];
770 u32 txq_id;
771 if (!tcb)
772 continue;
773
774 txq_id = tcb->id;
775
776 if (test_bit(BNAD_TXQ_TX_STARTED,
777 &tcb->flags)) {
778 /*
779 * Force an immediate
780 * Transmit Schedule */
781 printk(KERN_INFO "bna: %s %d "
782 "TXQ_STARTED\n",
783 bnad->netdev->name,
784 txq_id);
785 netif_wake_subqueue(
786 bnad->netdev,
787 txq_id);
788 BNAD_UPDATE_CTR(bnad,
789 netif_queue_wakeup);
790 } else {
791 netif_stop_subqueue(
792 bnad->netdev,
793 txq_id);
794 BNAD_UPDATE_CTR(bnad,
795 netif_queue_stop);
796 }
797 }
8b230ed8
RM
798 }
799 }
800 } else {
801 if (netif_carrier_ok(bnad->netdev)) {
078086f3 802 printk(KERN_WARNING "bna: %s link down\n",
8b230ed8
RM
803 bnad->netdev->name);
804 netif_carrier_off(bnad->netdev);
805 BNAD_UPDATE_CTR(bnad, link_toggle);
806 }
807 }
808}
809
810static void
078086f3 811bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
8b230ed8
RM
812{
813 struct bnad *bnad = (struct bnad *)arg;
814
815 complete(&bnad->bnad_completions.tx_comp);
816}
817
818static void
819bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
820{
821 struct bnad_tx_info *tx_info =
822 (struct bnad_tx_info *)tcb->txq->tx->priv;
823 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
824
825 tx_info->tcb[tcb->id] = tcb;
826 unmap_q->producer_index = 0;
827 unmap_q->consumer_index = 0;
828 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
829}
830
831static void
832bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
833{
834 struct bnad_tx_info *tx_info =
835 (struct bnad_tx_info *)tcb->txq->tx->priv;
be7fa326
RM
836 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
837
838 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
839 cpu_relax();
840
841 bnad_free_all_txbufs(bnad, tcb);
842
843 unmap_q->producer_index = 0;
844 unmap_q->consumer_index = 0;
845
846 smp_mb__before_clear_bit();
847 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
8b230ed8
RM
848
849 tx_info->tcb[tcb->id] = NULL;
850}
851
852static void
853bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
854{
855 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
856
857 unmap_q->producer_index = 0;
858 unmap_q->consumer_index = 0;
859 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
860}
861
be7fa326
RM
862static void
863bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
864{
865 bnad_free_all_rxbufs(bnad, rcb);
866}
867
8b230ed8
RM
868static void
869bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
870{
871 struct bnad_rx_info *rx_info =
872 (struct bnad_rx_info *)ccb->cq->rx->priv;
873
874 rx_info->rx_ctrl[ccb->id].ccb = ccb;
875 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
876}
877
878static void
879bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
880{
881 struct bnad_rx_info *rx_info =
882 (struct bnad_rx_info *)ccb->cq->rx->priv;
883
884 rx_info->rx_ctrl[ccb->id].ccb = NULL;
885}
886
887static void
078086f3 888bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
8b230ed8
RM
889{
890 struct bnad_tx_info *tx_info =
078086f3
RM
891 (struct bnad_tx_info *)tx->priv;
892 struct bna_tcb *tcb;
893 u32 txq_id;
894 int i;
8b230ed8 895
078086f3
RM
896 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
897 tcb = tx_info->tcb[i];
898 if (!tcb)
899 continue;
900 txq_id = tcb->id;
901 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
902 netif_stop_subqueue(bnad->netdev, txq_id);
903 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
904 bnad->netdev->name, txq_id);
905 }
8b230ed8
RM
906}
907
908static void
078086f3 909bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
8b230ed8 910{
078086f3
RM
911 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
912 struct bna_tcb *tcb;
913 struct bnad_unmap_q *unmap_q;
914 u32 txq_id;
915 int i;
8b230ed8 916
078086f3
RM
917 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
918 tcb = tx_info->tcb[i];
919 if (!tcb)
920 continue;
921 txq_id = tcb->id;
8b230ed8 922
078086f3 923 unmap_q = tcb->unmap_q;
8b230ed8 924
078086f3
RM
925 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
926 continue;
8b230ed8 927
078086f3
RM
928 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
929 cpu_relax();
8b230ed8 930
078086f3 931 bnad_free_all_txbufs(bnad, tcb);
8b230ed8 932
078086f3
RM
933 unmap_q->producer_index = 0;
934 unmap_q->consumer_index = 0;
935
936 smp_mb__before_clear_bit();
937 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
938
939 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
940
941 if (netif_carrier_ok(bnad->netdev)) {
942 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
943 bnad->netdev->name, txq_id);
944 netif_wake_subqueue(bnad->netdev, txq_id);
945 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
946 }
947 }
be7fa326
RM
948
949 /*
078086f3 950 * Workaround for first ioceth enable failure & we
be7fa326
RM
951 * get a 0 MAC address. We try to get the MAC address
952 * again here.
953 */
954 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
078086f3 955 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
be7fa326
RM
956 bnad_set_netdev_perm_addr(bnad);
957 }
be7fa326
RM
958}
959
960static void
078086f3 961bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
be7fa326 962{
078086f3
RM
963 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
964 struct bna_tcb *tcb;
965 int i;
966
967 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
968 tcb = tx_info->tcb[i];
969 if (!tcb)
970 continue;
971 }
972
973 mdelay(BNAD_TXRX_SYNC_MDELAY);
974 bna_tx_cleanup_complete(tx);
8b230ed8
RM
975}
976
977static void
078086f3 978bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 979{
078086f3
RM
980 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
981 struct bna_ccb *ccb;
982 struct bnad_rx_ctrl *rx_ctrl;
983 int i;
984
985 mdelay(BNAD_TXRX_SYNC_MDELAY);
986
772b5235 987 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
078086f3
RM
988 rx_ctrl = &rx_info->rx_ctrl[i];
989 ccb = rx_ctrl->ccb;
990 if (!ccb)
991 continue;
992
993 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
994
995 if (ccb->rcb[1])
996 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
8b230ed8 997
078086f3
RM
998 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
999 cpu_relax();
1000 }
be7fa326 1001
078086f3 1002 bna_rx_cleanup_complete(rx);
8b230ed8
RM
1003}
1004
1005static void
078086f3 1006bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1007{
078086f3
RM
1008 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1009 struct bna_ccb *ccb;
1010 struct bna_rcb *rcb;
1011 struct bnad_rx_ctrl *rx_ctrl;
1012 struct bnad_unmap_q *unmap_q;
1013 int i;
1014 int j;
be7fa326 1015
772b5235 1016 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
078086f3
RM
1017 rx_ctrl = &rx_info->rx_ctrl[i];
1018 ccb = rx_ctrl->ccb;
1019 if (!ccb)
1020 continue;
be7fa326 1021
078086f3 1022 bnad_cq_cmpl_init(bnad, ccb);
8b230ed8 1023
078086f3
RM
1024 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1025 rcb = ccb->rcb[j];
1026 if (!rcb)
1027 continue;
1028 bnad_free_all_rxbufs(bnad, rcb);
1029
1030 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1031 unmap_q = rcb->unmap_q;
1032
1033 /* Now allocate & post buffers for this RCB */
1034 /* !!Allocation in callback context */
1035 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1036 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1037 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1038 bnad_alloc_n_post_rxbufs(bnad, rcb);
1039 smp_mb__before_clear_bit();
1040 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1041 }
1042 }
8b230ed8
RM
1043 }
1044}
1045
1046static void
078086f3 1047bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
8b230ed8
RM
1048{
1049 struct bnad *bnad = (struct bnad *)arg;
1050
1051 complete(&bnad->bnad_completions.rx_comp);
1052}
1053
1054static void
078086f3 1055bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1056{
078086f3 1057 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
8b230ed8
RM
1058 complete(&bnad->bnad_completions.mcast_comp);
1059}
1060
1061void
1062bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1063 struct bna_stats *stats)
1064{
1065 if (status == BNA_CB_SUCCESS)
1066 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1067
1068 if (!netif_running(bnad->netdev) ||
1069 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1070 return;
1071
1072 mod_timer(&bnad->stats_timer,
1073 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1074}
1075
078086f3
RM
1076static void
1077bnad_cb_enet_mtu_set(struct bnad *bnad)
1078{
1079 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1080 complete(&bnad->bnad_completions.mtu_comp);
1081}
1082
8b230ed8
RM
1083/* Resource allocation, free functions */
1084
1085static void
1086bnad_mem_free(struct bnad *bnad,
1087 struct bna_mem_info *mem_info)
1088{
1089 int i;
1090 dma_addr_t dma_pa;
1091
1092 if (mem_info->mdl == NULL)
1093 return;
1094
1095 for (i = 0; i < mem_info->num; i++) {
1096 if (mem_info->mdl[i].kva != NULL) {
1097 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1098 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1099 dma_pa);
5ea74318
IV
1100 dma_free_coherent(&bnad->pcidev->dev,
1101 mem_info->mdl[i].len,
1102 mem_info->mdl[i].kva, dma_pa);
8b230ed8
RM
1103 } else
1104 kfree(mem_info->mdl[i].kva);
1105 }
1106 }
1107 kfree(mem_info->mdl);
1108 mem_info->mdl = NULL;
1109}
1110
1111static int
1112bnad_mem_alloc(struct bnad *bnad,
1113 struct bna_mem_info *mem_info)
1114{
1115 int i;
1116 dma_addr_t dma_pa;
1117
1118 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1119 mem_info->mdl = NULL;
1120 return 0;
1121 }
1122
1123 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1124 GFP_KERNEL);
1125 if (mem_info->mdl == NULL)
1126 return -ENOMEM;
1127
1128 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1129 for (i = 0; i < mem_info->num; i++) {
1130 mem_info->mdl[i].len = mem_info->len;
1131 mem_info->mdl[i].kva =
5ea74318
IV
1132 dma_alloc_coherent(&bnad->pcidev->dev,
1133 mem_info->len, &dma_pa,
1134 GFP_KERNEL);
8b230ed8
RM
1135
1136 if (mem_info->mdl[i].kva == NULL)
1137 goto err_return;
1138
1139 BNA_SET_DMA_ADDR(dma_pa,
1140 &(mem_info->mdl[i].dma));
1141 }
1142 } else {
1143 for (i = 0; i < mem_info->num; i++) {
1144 mem_info->mdl[i].len = mem_info->len;
1145 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1146 GFP_KERNEL);
1147 if (mem_info->mdl[i].kva == NULL)
1148 goto err_return;
1149 }
1150 }
1151
1152 return 0;
1153
1154err_return:
1155 bnad_mem_free(bnad, mem_info);
1156 return -ENOMEM;
1157}
1158
1159/* Free IRQ for Mailbox */
1160static void
078086f3 1161bnad_mbox_irq_free(struct bnad *bnad)
8b230ed8
RM
1162{
1163 int irq;
1164 unsigned long flags;
1165
8b230ed8 1166 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 1167 bnad_disable_mbox_irq(bnad);
e2fa6f2e 1168 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1169
1170 irq = BNAD_GET_MBOX_IRQ(bnad);
be7fa326 1171 free_irq(irq, bnad);
8b230ed8
RM
1172}
1173
1174/*
1175 * Allocates IRQ for Mailbox, but keep it disabled
1176 * This will be enabled once we get the mbox enable callback
1177 * from bna
1178 */
1179static int
078086f3 1180bnad_mbox_irq_alloc(struct bnad *bnad)
8b230ed8 1181{
0120b99c
RM
1182 int err = 0;
1183 unsigned long irq_flags, flags;
8b230ed8 1184 u32 irq;
0120b99c 1185 irq_handler_t irq_handler;
8b230ed8 1186
8b230ed8
RM
1187 spin_lock_irqsave(&bnad->bna_lock, flags);
1188 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1189 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
8811e267 1190 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
8279171a 1191 irq_flags = 0;
8b230ed8
RM
1192 } else {
1193 irq_handler = (irq_handler_t)bnad_isr;
1194 irq = bnad->pcidev->irq;
5f77898d 1195 irq_flags = IRQF_SHARED;
8b230ed8 1196 }
8811e267 1197
8b230ed8 1198 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1199 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1200
e2fa6f2e
RM
1201 /*
1202 * Set the Mbox IRQ disable flag, so that the IRQ handler
1203 * called from request_irq() for SHARED IRQs do not execute
1204 */
1205 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1206
be7fa326
RM
1207 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1208
8279171a 1209 err = request_irq(irq, irq_handler, irq_flags,
be7fa326 1210 bnad->mbox_irq_name, bnad);
e2fa6f2e 1211
be7fa326 1212 return err;
8b230ed8
RM
1213}
1214
1215static void
1216bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1217{
1218 kfree(intr_info->idl);
1219 intr_info->idl = NULL;
1220}
1221
1222/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1223static int
1224bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
078086f3 1225 u32 txrx_id, struct bna_intr_info *intr_info)
8b230ed8
RM
1226{
1227 int i, vector_start = 0;
1228 u32 cfg_flags;
1229 unsigned long flags;
1230
1231 spin_lock_irqsave(&bnad->bna_lock, flags);
1232 cfg_flags = bnad->cfg_flags;
1233 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1234
1235 if (cfg_flags & BNAD_CF_MSIX) {
1236 intr_info->intr_type = BNA_INTR_T_MSIX;
1237 intr_info->idl = kcalloc(intr_info->num,
1238 sizeof(struct bna_intr_descr),
1239 GFP_KERNEL);
1240 if (!intr_info->idl)
1241 return -ENOMEM;
1242
1243 switch (src) {
1244 case BNAD_INTR_TX:
8811e267 1245 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
8b230ed8
RM
1246 break;
1247
1248 case BNAD_INTR_RX:
8811e267
RM
1249 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1250 (bnad->num_tx * bnad->num_txq_per_tx) +
8b230ed8
RM
1251 txrx_id;
1252 break;
1253
1254 default:
1255 BUG();
1256 }
1257
1258 for (i = 0; i < intr_info->num; i++)
1259 intr_info->idl[i].vector = vector_start + i;
1260 } else {
1261 intr_info->intr_type = BNA_INTR_T_INTX;
1262 intr_info->num = 1;
1263 intr_info->idl = kcalloc(intr_info->num,
1264 sizeof(struct bna_intr_descr),
1265 GFP_KERNEL);
1266 if (!intr_info->idl)
1267 return -ENOMEM;
1268
1269 switch (src) {
1270 case BNAD_INTR_TX:
8811e267 1271 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
8b230ed8
RM
1272 break;
1273
1274 case BNAD_INTR_RX:
8811e267 1275 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
8b230ed8
RM
1276 break;
1277 }
1278 }
1279 return 0;
1280}
1281
1282/**
1283 * NOTE: Should be called for MSIX only
1284 * Unregisters Tx MSIX vector(s) from the kernel
1285 */
1286static void
1287bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1288 int num_txqs)
1289{
1290 int i;
1291 int vector_num;
1292
1293 for (i = 0; i < num_txqs; i++) {
1294 if (tx_info->tcb[i] == NULL)
1295 continue;
1296
1297 vector_num = tx_info->tcb[i]->intr_vector;
1298 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1299 }
1300}
1301
1302/**
1303 * NOTE: Should be called for MSIX only
1304 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1305 */
1306static int
1307bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
078086f3 1308 u32 tx_id, int num_txqs)
8b230ed8
RM
1309{
1310 int i;
1311 int err;
1312 int vector_num;
1313
1314 for (i = 0; i < num_txqs; i++) {
1315 vector_num = tx_info->tcb[i]->intr_vector;
1316 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1317 tx_id + tx_info->tcb[i]->id);
1318 err = request_irq(bnad->msix_table[vector_num].vector,
1319 (irq_handler_t)bnad_msix_tx, 0,
1320 tx_info->tcb[i]->name,
1321 tx_info->tcb[i]);
1322 if (err)
1323 goto err_return;
1324 }
1325
1326 return 0;
1327
1328err_return:
1329 if (i > 0)
1330 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1331 return -1;
1332}
1333
1334/**
1335 * NOTE: Should be called for MSIX only
1336 * Unregisters Rx MSIX vector(s) from the kernel
1337 */
1338static void
1339bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1340 int num_rxps)
1341{
1342 int i;
1343 int vector_num;
1344
1345 for (i = 0; i < num_rxps; i++) {
1346 if (rx_info->rx_ctrl[i].ccb == NULL)
1347 continue;
1348
1349 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1350 free_irq(bnad->msix_table[vector_num].vector,
1351 rx_info->rx_ctrl[i].ccb);
1352 }
1353}
1354
1355/**
1356 * NOTE: Should be called for MSIX only
1357 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1358 */
1359static int
1360bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
078086f3 1361 u32 rx_id, int num_rxps)
8b230ed8
RM
1362{
1363 int i;
1364 int err;
1365 int vector_num;
1366
1367 for (i = 0; i < num_rxps; i++) {
1368 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1369 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1370 bnad->netdev->name,
1371 rx_id + rx_info->rx_ctrl[i].ccb->id);
1372 err = request_irq(bnad->msix_table[vector_num].vector,
1373 (irq_handler_t)bnad_msix_rx, 0,
1374 rx_info->rx_ctrl[i].ccb->name,
1375 rx_info->rx_ctrl[i].ccb);
1376 if (err)
1377 goto err_return;
1378 }
1379
1380 return 0;
1381
1382err_return:
1383 if (i > 0)
1384 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1385 return -1;
1386}
1387
1388/* Free Tx object Resources */
1389static void
1390bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1391{
1392 int i;
1393
1394 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1395 if (res_info[i].res_type == BNA_RES_T_MEM)
1396 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1397 else if (res_info[i].res_type == BNA_RES_T_INTR)
1398 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1399 }
1400}
1401
1402/* Allocates memory and interrupt resources for Tx object */
1403static int
1404bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
078086f3 1405 u32 tx_id)
8b230ed8
RM
1406{
1407 int i, err = 0;
1408
1409 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1410 if (res_info[i].res_type == BNA_RES_T_MEM)
1411 err = bnad_mem_alloc(bnad,
1412 &res_info[i].res_u.mem_info);
1413 else if (res_info[i].res_type == BNA_RES_T_INTR)
1414 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1415 &res_info[i].res_u.intr_info);
1416 if (err)
1417 goto err_return;
1418 }
1419 return 0;
1420
1421err_return:
1422 bnad_tx_res_free(bnad, res_info);
1423 return err;
1424}
1425
1426/* Free Rx object Resources */
1427static void
1428bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1429{
1430 int i;
1431
1432 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1433 if (res_info[i].res_type == BNA_RES_T_MEM)
1434 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1435 else if (res_info[i].res_type == BNA_RES_T_INTR)
1436 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1437 }
1438}
1439
1440/* Allocates memory and interrupt resources for Rx object */
1441static int
1442bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1443 uint rx_id)
1444{
1445 int i, err = 0;
1446
1447 /* All memory needs to be allocated before setup_ccbs */
1448 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1449 if (res_info[i].res_type == BNA_RES_T_MEM)
1450 err = bnad_mem_alloc(bnad,
1451 &res_info[i].res_u.mem_info);
1452 else if (res_info[i].res_type == BNA_RES_T_INTR)
1453 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1454 &res_info[i].res_u.intr_info);
1455 if (err)
1456 goto err_return;
1457 }
1458 return 0;
1459
1460err_return:
1461 bnad_rx_res_free(bnad, res_info);
1462 return err;
1463}
1464
1465/* Timer callbacks */
1466/* a) IOC timer */
1467static void
1468bnad_ioc_timeout(unsigned long data)
1469{
1470 struct bnad *bnad = (struct bnad *)data;
1471 unsigned long flags;
1472
1473 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1474 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1475 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1476}
1477
1478static void
1479bnad_ioc_hb_check(unsigned long data)
1480{
1481 struct bnad *bnad = (struct bnad *)data;
1482 unsigned long flags;
1483
1484 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1485 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1486 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1487}
1488
1489static void
1d32f769 1490bnad_iocpf_timeout(unsigned long data)
8b230ed8
RM
1491{
1492 struct bnad *bnad = (struct bnad *)data;
1493 unsigned long flags;
1494
1495 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1496 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1d32f769
RM
1497 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1498}
1499
1500static void
1501bnad_iocpf_sem_timeout(unsigned long data)
1502{
1503 struct bnad *bnad = (struct bnad *)data;
1504 unsigned long flags;
1505
1506 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1507 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1508 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1509}
1510
1511/*
1512 * All timer routines use bnad->bna_lock to protect against
1513 * the following race, which may occur in case of no locking:
0120b99c 1514 * Time CPU m CPU n
8b230ed8
RM
1515 * 0 1 = test_bit
1516 * 1 clear_bit
1517 * 2 del_timer_sync
1518 * 3 mod_timer
1519 */
1520
1521/* b) Dynamic Interrupt Moderation Timer */
1522static void
1523bnad_dim_timeout(unsigned long data)
1524{
1525 struct bnad *bnad = (struct bnad *)data;
1526 struct bnad_rx_info *rx_info;
1527 struct bnad_rx_ctrl *rx_ctrl;
1528 int i, j;
1529 unsigned long flags;
1530
1531 if (!netif_carrier_ok(bnad->netdev))
1532 return;
1533
1534 spin_lock_irqsave(&bnad->bna_lock, flags);
1535 for (i = 0; i < bnad->num_rx; i++) {
1536 rx_info = &bnad->rx_info[i];
1537 if (!rx_info->rx)
1538 continue;
1539 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1540 rx_ctrl = &rx_info->rx_ctrl[j];
1541 if (!rx_ctrl->ccb)
1542 continue;
1543 bna_rx_dim_update(rx_ctrl->ccb);
1544 }
1545 }
1546
1547 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1548 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1549 mod_timer(&bnad->dim_timer,
1550 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1551 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1552}
1553
1554/* c) Statistics Timer */
1555static void
1556bnad_stats_timeout(unsigned long data)
1557{
1558 struct bnad *bnad = (struct bnad *)data;
1559 unsigned long flags;
1560
1561 if (!netif_running(bnad->netdev) ||
1562 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1563 return;
1564
1565 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1566 bna_hw_stats_get(&bnad->bna);
8b230ed8
RM
1567 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1568}
1569
1570/*
1571 * Set up timer for DIM
1572 * Called with bnad->bna_lock held
1573 */
1574void
1575bnad_dim_timer_start(struct bnad *bnad)
1576{
1577 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1578 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1579 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1580 (unsigned long)bnad);
1581 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1582 mod_timer(&bnad->dim_timer,
1583 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1584 }
1585}
1586
1587/*
1588 * Set up timer for statistics
1589 * Called with mutex_lock(&bnad->conf_mutex) held
1590 */
1591static void
1592bnad_stats_timer_start(struct bnad *bnad)
1593{
1594 unsigned long flags;
1595
1596 spin_lock_irqsave(&bnad->bna_lock, flags);
1597 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1598 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1599 (unsigned long)bnad);
1600 mod_timer(&bnad->stats_timer,
1601 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1602 }
1603 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1604}
1605
1606/*
1607 * Stops the stats timer
1608 * Called with mutex_lock(&bnad->conf_mutex) held
1609 */
1610static void
1611bnad_stats_timer_stop(struct bnad *bnad)
1612{
1613 int to_del = 0;
1614 unsigned long flags;
1615
1616 spin_lock_irqsave(&bnad->bna_lock, flags);
1617 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1618 to_del = 1;
1619 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1620 if (to_del)
1621 del_timer_sync(&bnad->stats_timer);
1622}
1623
1624/* Utilities */
1625
1626static void
1627bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1628{
1629 int i = 1; /* Index 0 has broadcast address */
1630 struct netdev_hw_addr *mc_addr;
1631
1632 netdev_for_each_mc_addr(mc_addr, netdev) {
1633 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1634 ETH_ALEN);
1635 i++;
1636 }
1637}
1638
1639static int
1640bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1641{
1642 struct bnad_rx_ctrl *rx_ctrl =
1643 container_of(napi, struct bnad_rx_ctrl, napi);
2be67144 1644 struct bnad *bnad = rx_ctrl->bnad;
8b230ed8
RM
1645 int rcvd = 0;
1646
271e8b79 1647 rx_ctrl->rx_poll_ctr++;
8b230ed8
RM
1648
1649 if (!netif_carrier_ok(bnad->netdev))
1650 goto poll_exit;
1651
2be67144 1652 rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
271e8b79 1653 if (rcvd >= budget)
8b230ed8
RM
1654 return rcvd;
1655
1656poll_exit:
19dbff9f 1657 napi_complete(napi);
8b230ed8 1658
271e8b79 1659 rx_ctrl->rx_complete++;
2be67144
RM
1660
1661 if (rx_ctrl->ccb)
271e8b79
RM
1662 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1663
8b230ed8
RM
1664 return rcvd;
1665}
1666
2be67144 1667#define BNAD_NAPI_POLL_QUOTA 64
8b230ed8 1668static void
2be67144 1669bnad_napi_init(struct bnad *bnad, u32 rx_id)
8b230ed8 1670{
8b230ed8
RM
1671 struct bnad_rx_ctrl *rx_ctrl;
1672 int i;
8b230ed8
RM
1673
1674 /* Initialize & enable NAPI */
1675 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1676 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1677 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
2be67144
RM
1678 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1679 }
1680}
1681
1682static void
1683bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1684{
1685 struct bnad_rx_ctrl *rx_ctrl;
1686 int i;
1687
1688 /* Initialize & enable NAPI */
1689 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1690 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
be7fa326 1691
8b230ed8
RM
1692 napi_enable(&rx_ctrl->napi);
1693 }
1694}
1695
1696static void
1697bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1698{
1699 int i;
1700
1701 /* First disable and then clean up */
1702 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1703 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1704 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1705 }
1706}
1707
1708/* Should be held with conf_lock held */
1709void
078086f3 1710bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
8b230ed8
RM
1711{
1712 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1713 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1714 unsigned long flags;
1715
1716 if (!tx_info->tx)
1717 return;
1718
1719 init_completion(&bnad->bnad_completions.tx_comp);
1720 spin_lock_irqsave(&bnad->bna_lock, flags);
1721 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1722 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1723 wait_for_completion(&bnad->bnad_completions.tx_comp);
1724
1725 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1726 bnad_tx_msix_unregister(bnad, tx_info,
1727 bnad->num_txq_per_tx);
1728
2be67144
RM
1729 if (0 == tx_id)
1730 tasklet_kill(&bnad->tx_free_tasklet);
1731
8b230ed8
RM
1732 spin_lock_irqsave(&bnad->bna_lock, flags);
1733 bna_tx_destroy(tx_info->tx);
1734 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1735
1736 tx_info->tx = NULL;
078086f3 1737 tx_info->tx_id = 0;
8b230ed8 1738
8b230ed8
RM
1739 bnad_tx_res_free(bnad, res_info);
1740}
1741
1742/* Should be held with conf_lock held */
1743int
078086f3 1744bnad_setup_tx(struct bnad *bnad, u32 tx_id)
8b230ed8
RM
1745{
1746 int err;
1747 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1748 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1749 struct bna_intr_info *intr_info =
1750 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1751 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1752 struct bna_tx_event_cbfn tx_cbfn;
1753 struct bna_tx *tx;
1754 unsigned long flags;
1755
078086f3
RM
1756 tx_info->tx_id = tx_id;
1757
8b230ed8
RM
1758 /* Initialize the Tx object configuration */
1759 tx_config->num_txq = bnad->num_txq_per_tx;
1760 tx_config->txq_depth = bnad->txq_depth;
1761 tx_config->tx_type = BNA_TX_T_REGULAR;
078086f3 1762 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
8b230ed8
RM
1763
1764 /* Initialize the tx event handlers */
1765 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1766 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1767 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1768 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1769 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1770
1771 /* Get BNA's resource requirement for one tx object */
1772 spin_lock_irqsave(&bnad->bna_lock, flags);
1773 bna_tx_res_req(bnad->num_txq_per_tx,
1774 bnad->txq_depth, res_info);
1775 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1776
1777 /* Fill Unmap Q memory requirements */
1778 BNAD_FILL_UNMAPQ_MEM_REQ(
1779 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1780 bnad->num_txq_per_tx,
1781 BNAD_TX_UNMAPQ_DEPTH);
1782
1783 /* Allocate resources */
1784 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1785 if (err)
1786 return err;
1787
1788 /* Ask BNA to create one Tx object, supplying required resources */
1789 spin_lock_irqsave(&bnad->bna_lock, flags);
1790 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1791 tx_info);
1792 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1793 if (!tx)
1794 goto err_return;
1795 tx_info->tx = tx;
1796
1797 /* Register ISR for the Tx object */
1798 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1799 err = bnad_tx_msix_register(bnad, tx_info,
1800 tx_id, bnad->num_txq_per_tx);
1801 if (err)
1802 goto err_return;
1803 }
1804
1805 spin_lock_irqsave(&bnad->bna_lock, flags);
1806 bna_tx_enable(tx);
1807 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1808
1809 return 0;
1810
1811err_return:
1812 bnad_tx_res_free(bnad, res_info);
1813 return err;
1814}
1815
1816/* Setup the rx config for bna_rx_create */
1817/* bnad decides the configuration */
1818static void
1819bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1820{
1821 rx_config->rx_type = BNA_RX_T_REGULAR;
1822 rx_config->num_paths = bnad->num_rxp_per_rx;
078086f3 1823 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
8b230ed8
RM
1824
1825 if (bnad->num_rxp_per_rx > 1) {
1826 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1827 rx_config->rss_config.hash_type =
078086f3
RM
1828 (BFI_ENET_RSS_IPV6 |
1829 BFI_ENET_RSS_IPV6_TCP |
1830 BFI_ENET_RSS_IPV4 |
1831 BFI_ENET_RSS_IPV4_TCP);
8b230ed8
RM
1832 rx_config->rss_config.hash_mask =
1833 bnad->num_rxp_per_rx - 1;
1834 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1835 sizeof(rx_config->rss_config.toeplitz_hash_key));
1836 } else {
1837 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1838 memset(&rx_config->rss_config, 0,
1839 sizeof(rx_config->rss_config));
1840 }
1841 rx_config->rxp_type = BNA_RXP_SLR;
1842 rx_config->q_depth = bnad->rxq_depth;
1843
1844 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1845
1846 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1847}
1848
2be67144
RM
1849static void
1850bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1851{
1852 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1853 int i;
1854
1855 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1856 rx_info->rx_ctrl[i].bnad = bnad;
1857}
1858
8b230ed8
RM
1859/* Called with mutex_lock(&bnad->conf_mutex) held */
1860void
078086f3 1861bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
1862{
1863 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1864 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1865 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1866 unsigned long flags;
271e8b79 1867 int to_del = 0;
8b230ed8
RM
1868
1869 if (!rx_info->rx)
1870 return;
1871
1872 if (0 == rx_id) {
1873 spin_lock_irqsave(&bnad->bna_lock, flags);
271e8b79
RM
1874 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1875 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
8b230ed8 1876 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
271e8b79
RM
1877 to_del = 1;
1878 }
8b230ed8 1879 spin_unlock_irqrestore(&bnad->bna_lock, flags);
271e8b79 1880 if (to_del)
8b230ed8
RM
1881 del_timer_sync(&bnad->dim_timer);
1882 }
1883
8b230ed8
RM
1884 init_completion(&bnad->bnad_completions.rx_comp);
1885 spin_lock_irqsave(&bnad->bna_lock, flags);
1886 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1887 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1888 wait_for_completion(&bnad->bnad_completions.rx_comp);
1889
1890 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1891 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1892
2be67144
RM
1893 bnad_napi_disable(bnad, rx_id);
1894
8b230ed8
RM
1895 spin_lock_irqsave(&bnad->bna_lock, flags);
1896 bna_rx_destroy(rx_info->rx);
1897 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1898
1899 rx_info->rx = NULL;
3caa1e95 1900 rx_info->rx_id = 0;
8b230ed8
RM
1901
1902 bnad_rx_res_free(bnad, res_info);
1903}
1904
1905/* Called with mutex_lock(&bnad->conf_mutex) held */
1906int
078086f3 1907bnad_setup_rx(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
1908{
1909 int err;
1910 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1911 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1912 struct bna_intr_info *intr_info =
1913 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1914 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1915 struct bna_rx_event_cbfn rx_cbfn;
1916 struct bna_rx *rx;
1917 unsigned long flags;
1918
078086f3
RM
1919 rx_info->rx_id = rx_id;
1920
8b230ed8
RM
1921 /* Initialize the Rx object configuration */
1922 bnad_init_rx_config(bnad, rx_config);
1923
1924 /* Initialize the Rx event handlers */
1925 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
be7fa326 1926 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
8b230ed8
RM
1927 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1928 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1929 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1930 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1931
1932 /* Get BNA's resource requirement for one Rx object */
1933 spin_lock_irqsave(&bnad->bna_lock, flags);
1934 bna_rx_res_req(rx_config, res_info);
1935 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1936
1937 /* Fill Unmap Q memory requirements */
1938 BNAD_FILL_UNMAPQ_MEM_REQ(
1939 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1940 rx_config->num_paths +
1941 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1942 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1943
1944 /* Allocate resource */
1945 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1946 if (err)
1947 return err;
1948
2be67144
RM
1949 bnad_rx_ctrl_init(bnad, rx_id);
1950
8b230ed8
RM
1951 /* Ask BNA to create one Rx object, supplying required resources */
1952 spin_lock_irqsave(&bnad->bna_lock, flags);
1953 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1954 rx_info);
1955 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3caa1e95
RM
1956 if (!rx) {
1957 err = -ENOMEM;
8b230ed8 1958 goto err_return;
3caa1e95 1959 }
8b230ed8
RM
1960 rx_info->rx = rx;
1961
2be67144
RM
1962 /*
1963 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1964 * so that IRQ handler cannot schedule NAPI at this point.
1965 */
1966 bnad_napi_init(bnad, rx_id);
1967
8b230ed8
RM
1968 /* Register ISR for the Rx object */
1969 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1970 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1971 rx_config->num_paths);
1972 if (err)
1973 goto err_return;
1974 }
1975
8b230ed8
RM
1976 spin_lock_irqsave(&bnad->bna_lock, flags);
1977 if (0 == rx_id) {
1978 /* Set up Dynamic Interrupt Moderation Vector */
1979 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1980 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1981
1982 /* Enable VLAN filtering only on the default Rx */
1983 bna_rx_vlanfilter_enable(rx);
1984
1985 /* Start the DIM timer */
1986 bnad_dim_timer_start(bnad);
1987 }
1988
1989 bna_rx_enable(rx);
1990 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1991
2be67144
RM
1992 /* Enable scheduling of NAPI */
1993 bnad_napi_enable(bnad, rx_id);
1994
8b230ed8
RM
1995 return 0;
1996
1997err_return:
1998 bnad_cleanup_rx(bnad, rx_id);
1999 return err;
2000}
2001
2002/* Called with conf_lock & bnad->bna_lock held */
2003void
2004bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2005{
2006 struct bnad_tx_info *tx_info;
2007
2008 tx_info = &bnad->tx_info[0];
2009 if (!tx_info->tx)
2010 return;
2011
2012 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2013}
2014
2015/* Called with conf_lock & bnad->bna_lock held */
2016void
2017bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2018{
2019 struct bnad_rx_info *rx_info;
0120b99c 2020 int i;
8b230ed8
RM
2021
2022 for (i = 0; i < bnad->num_rx; i++) {
2023 rx_info = &bnad->rx_info[i];
2024 if (!rx_info->rx)
2025 continue;
2026 bna_rx_coalescing_timeo_set(rx_info->rx,
2027 bnad->rx_coalescing_timeo);
2028 }
2029}
2030
2031/*
2032 * Called with bnad->bna_lock held
2033 */
a2122d95 2034int
8b230ed8
RM
2035bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2036{
2037 int ret;
2038
2039 if (!is_valid_ether_addr(mac_addr))
2040 return -EADDRNOTAVAIL;
2041
2042 /* If datapath is down, pretend everything went through */
2043 if (!bnad->rx_info[0].rx)
2044 return 0;
2045
2046 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2047 if (ret != BNA_CB_SUCCESS)
2048 return -EADDRNOTAVAIL;
2049
2050 return 0;
2051}
2052
2053/* Should be called with conf_lock held */
a2122d95 2054int
8b230ed8
RM
2055bnad_enable_default_bcast(struct bnad *bnad)
2056{
2057 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2058 int ret;
2059 unsigned long flags;
2060
2061 init_completion(&bnad->bnad_completions.mcast_comp);
2062
2063 spin_lock_irqsave(&bnad->bna_lock, flags);
2064 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2065 bnad_cb_rx_mcast_add);
2066 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2067
2068 if (ret == BNA_CB_SUCCESS)
2069 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2070 else
2071 return -ENODEV;
2072
2073 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2074 return -ENODEV;
2075
2076 return 0;
2077}
2078
19dbff9f 2079/* Called with mutex_lock(&bnad->conf_mutex) held */
a2122d95 2080void
aad75b66
RM
2081bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2082{
f859d7cb 2083 u16 vid;
aad75b66
RM
2084 unsigned long flags;
2085
f859d7cb 2086 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
aad75b66 2087 spin_lock_irqsave(&bnad->bna_lock, flags);
f859d7cb 2088 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
aad75b66
RM
2089 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2090 }
2091}
2092
8b230ed8
RM
2093/* Statistics utilities */
2094void
250e061e 2095bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 2096{
8b230ed8
RM
2097 int i, j;
2098
2099 for (i = 0; i < bnad->num_rx; i++) {
2100 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2101 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
250e061e 2102 stats->rx_packets += bnad->rx_info[i].
8b230ed8 2103 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
250e061e 2104 stats->rx_bytes += bnad->rx_info[i].
8b230ed8
RM
2105 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2106 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2107 bnad->rx_info[i].rx_ctrl[j].ccb->
2108 rcb[1]->rxq) {
250e061e 2109 stats->rx_packets +=
8b230ed8
RM
2110 bnad->rx_info[i].rx_ctrl[j].
2111 ccb->rcb[1]->rxq->rx_packets;
250e061e 2112 stats->rx_bytes +=
8b230ed8
RM
2113 bnad->rx_info[i].rx_ctrl[j].
2114 ccb->rcb[1]->rxq->rx_bytes;
2115 }
2116 }
2117 }
2118 }
2119 for (i = 0; i < bnad->num_tx; i++) {
2120 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2121 if (bnad->tx_info[i].tcb[j]) {
250e061e 2122 stats->tx_packets +=
8b230ed8 2123 bnad->tx_info[i].tcb[j]->txq->tx_packets;
250e061e 2124 stats->tx_bytes +=
8b230ed8
RM
2125 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2126 }
2127 }
2128 }
2129}
2130
2131/*
2132 * Must be called with the bna_lock held.
2133 */
2134void
250e061e 2135bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 2136{
078086f3
RM
2137 struct bfi_enet_stats_mac *mac_stats;
2138 u32 bmap;
8b230ed8
RM
2139 int i;
2140
078086f3 2141 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
250e061e 2142 stats->rx_errors =
8b230ed8
RM
2143 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2144 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2145 mac_stats->rx_undersize;
250e061e 2146 stats->tx_errors = mac_stats->tx_fcs_error +
8b230ed8 2147 mac_stats->tx_undersize;
250e061e
ED
2148 stats->rx_dropped = mac_stats->rx_drop;
2149 stats->tx_dropped = mac_stats->tx_drop;
2150 stats->multicast = mac_stats->rx_multicast;
2151 stats->collisions = mac_stats->tx_total_collision;
8b230ed8 2152
250e061e 2153 stats->rx_length_errors = mac_stats->rx_frame_length_error;
8b230ed8
RM
2154
2155 /* receive ring buffer overflow ?? */
2156
250e061e
ED
2157 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2158 stats->rx_frame_errors = mac_stats->rx_alignment_error;
8b230ed8 2159 /* recv'r fifo overrun */
078086f3
RM
2160 bmap = bna_rx_rid_mask(&bnad->bna);
2161 for (i = 0; bmap; i++) {
8b230ed8 2162 if (bmap & 1) {
250e061e 2163 stats->rx_fifo_errors +=
8b230ed8 2164 bnad->stats.bna_stats->
078086f3 2165 hw_stats.rxf_stats[i].frame_drops;
8b230ed8
RM
2166 break;
2167 }
2168 bmap >>= 1;
2169 }
2170}
2171
2172static void
2173bnad_mbox_irq_sync(struct bnad *bnad)
2174{
2175 u32 irq;
2176 unsigned long flags;
2177
2178 spin_lock_irqsave(&bnad->bna_lock, flags);
2179 if (bnad->cfg_flags & BNAD_CF_MSIX)
8811e267 2180 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
8b230ed8
RM
2181 else
2182 irq = bnad->pcidev->irq;
2183 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2184
2185 synchronize_irq(irq);
2186}
2187
2188/* Utility used by bnad_start_xmit, for doing TSO */
2189static int
2190bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2191{
2192 int err;
2193
8b230ed8
RM
2194 if (skb_header_cloned(skb)) {
2195 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2196 if (err) {
2197 BNAD_UPDATE_CTR(bnad, tso_err);
2198 return err;
2199 }
2200 }
2201
2202 /*
2203 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2204 * excluding the length field.
2205 */
2206 if (skb->protocol == htons(ETH_P_IP)) {
2207 struct iphdr *iph = ip_hdr(skb);
2208
2209 /* Do we really need these? */
2210 iph->tot_len = 0;
2211 iph->check = 0;
2212
2213 tcp_hdr(skb)->check =
2214 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2215 IPPROTO_TCP, 0);
2216 BNAD_UPDATE_CTR(bnad, tso4);
2217 } else {
2218 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2219
8b230ed8
RM
2220 ipv6h->payload_len = 0;
2221 tcp_hdr(skb)->check =
2222 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2223 IPPROTO_TCP, 0);
2224 BNAD_UPDATE_CTR(bnad, tso6);
2225 }
2226
2227 return 0;
2228}
2229
2230/*
2231 * Initialize Q numbers depending on Rx Paths
2232 * Called with bnad->bna_lock held, because of cfg_flags
2233 * access.
2234 */
2235static void
2236bnad_q_num_init(struct bnad *bnad)
2237{
2238 int rxps;
2239
2240 rxps = min((uint)num_online_cpus(),
772b5235 2241 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
8b230ed8
RM
2242
2243 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2244 rxps = 1; /* INTx */
2245
2246 bnad->num_rx = 1;
2247 bnad->num_tx = 1;
2248 bnad->num_rxp_per_rx = rxps;
2249 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2250}
2251
2252/*
2253 * Adjusts the Q numbers, given a number of msix vectors
2254 * Give preference to RSS as opposed to Tx priority Queues,
2255 * in such a case, just use 1 Tx Q
2256 * Called with bnad->bna_lock held b'cos of cfg_flags access
2257 */
2258static void
078086f3 2259bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
8b230ed8
RM
2260{
2261 bnad->num_txq_per_tx = 1;
2262 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2263 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2264 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2265 bnad->num_rxp_per_rx = msix_vectors -
2266 (bnad->num_tx * bnad->num_txq_per_tx) -
2267 BNAD_MAILBOX_MSIX_VECTORS;
2268 } else
2269 bnad->num_rxp_per_rx = 1;
2270}
2271
078086f3
RM
2272/* Enable / disable ioceth */
2273static int
2274bnad_ioceth_disable(struct bnad *bnad)
8b230ed8
RM
2275{
2276 unsigned long flags;
078086f3 2277 int err = 0;
8b230ed8
RM
2278
2279 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2280 init_completion(&bnad->bnad_completions.ioc_comp);
2281 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
8b230ed8
RM
2282 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2283
078086f3
RM
2284 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2285 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2286
2287 err = bnad->bnad_completions.ioc_comp_status;
2288 return err;
8b230ed8
RM
2289}
2290
2291static int
078086f3 2292bnad_ioceth_enable(struct bnad *bnad)
8b230ed8
RM
2293{
2294 int err = 0;
2295 unsigned long flags;
2296
8b230ed8 2297 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2298 init_completion(&bnad->bnad_completions.ioc_comp);
2299 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2300 bna_ioceth_enable(&bnad->bna.ioceth);
8b230ed8
RM
2301 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2302
078086f3
RM
2303 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2304 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
8b230ed8 2305
078086f3 2306 err = bnad->bnad_completions.ioc_comp_status;
8b230ed8
RM
2307
2308 return err;
2309}
2310
2311/* Free BNA resources */
2312static void
078086f3
RM
2313bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2314 u32 res_val_max)
8b230ed8
RM
2315{
2316 int i;
8b230ed8 2317
078086f3
RM
2318 for (i = 0; i < res_val_max; i++)
2319 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
8b230ed8
RM
2320}
2321
2322/* Allocates memory and interrupt resources for BNA */
2323static int
078086f3
RM
2324bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2325 u32 res_val_max)
8b230ed8
RM
2326{
2327 int i, err;
8b230ed8 2328
078086f3
RM
2329 for (i = 0; i < res_val_max; i++) {
2330 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
8b230ed8
RM
2331 if (err)
2332 goto err_return;
2333 }
2334 return 0;
2335
2336err_return:
078086f3 2337 bnad_res_free(bnad, res_info, res_val_max);
8b230ed8
RM
2338 return err;
2339}
2340
2341/* Interrupt enable / disable */
2342static void
2343bnad_enable_msix(struct bnad *bnad)
2344{
2345 int i, ret;
8b230ed8
RM
2346 unsigned long flags;
2347
2348 spin_lock_irqsave(&bnad->bna_lock, flags);
2349 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2350 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2351 return;
2352 }
2353 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2354
2355 if (bnad->msix_table)
2356 return;
2357
8b230ed8 2358 bnad->msix_table =
b7ee31c5 2359 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
8b230ed8
RM
2360
2361 if (!bnad->msix_table)
2362 goto intx_mode;
2363
b7ee31c5 2364 for (i = 0; i < bnad->msix_num; i++)
8b230ed8
RM
2365 bnad->msix_table[i].entry = i;
2366
b7ee31c5 2367 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
8b230ed8
RM
2368 if (ret > 0) {
2369 /* Not enough MSI-X vectors. */
19dbff9f
RM
2370 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2371 ret, bnad->msix_num);
8b230ed8
RM
2372
2373 spin_lock_irqsave(&bnad->bna_lock, flags);
2374 /* ret = #of vectors that we got */
271e8b79
RM
2375 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2376 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
8b230ed8
RM
2377 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2378
271e8b79 2379 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
8b230ed8 2380 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8 2381
078086f3
RM
2382 if (bnad->msix_num > ret)
2383 goto intx_mode;
2384
8b230ed8
RM
2385 /* Try once more with adjusted numbers */
2386 /* If this fails, fall back to INTx */
2387 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
b7ee31c5 2388 bnad->msix_num);
8b230ed8
RM
2389 if (ret)
2390 goto intx_mode;
2391
2392 } else if (ret < 0)
2393 goto intx_mode;
078086f3
RM
2394
2395 pci_intx(bnad->pcidev, 0);
2396
8b230ed8
RM
2397 return;
2398
2399intx_mode:
19dbff9f 2400 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
8b230ed8
RM
2401
2402 kfree(bnad->msix_table);
2403 bnad->msix_table = NULL;
2404 bnad->msix_num = 0;
8b230ed8
RM
2405 spin_lock_irqsave(&bnad->bna_lock, flags);
2406 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2407 bnad_q_num_init(bnad);
2408 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2409}
2410
2411static void
2412bnad_disable_msix(struct bnad *bnad)
2413{
2414 u32 cfg_flags;
2415 unsigned long flags;
2416
2417 spin_lock_irqsave(&bnad->bna_lock, flags);
2418 cfg_flags = bnad->cfg_flags;
2419 if (bnad->cfg_flags & BNAD_CF_MSIX)
2420 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2421 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2422
2423 if (cfg_flags & BNAD_CF_MSIX) {
2424 pci_disable_msix(bnad->pcidev);
2425 kfree(bnad->msix_table);
2426 bnad->msix_table = NULL;
2427 }
2428}
2429
2430/* Netdev entry points */
2431static int
2432bnad_open(struct net_device *netdev)
2433{
2434 int err;
2435 struct bnad *bnad = netdev_priv(netdev);
2436 struct bna_pause_config pause_config;
2437 int mtu;
2438 unsigned long flags;
2439
2440 mutex_lock(&bnad->conf_mutex);
2441
2442 /* Tx */
2443 err = bnad_setup_tx(bnad, 0);
2444 if (err)
2445 goto err_return;
2446
2447 /* Rx */
2448 err = bnad_setup_rx(bnad, 0);
2449 if (err)
2450 goto cleanup_tx;
2451
2452 /* Port */
2453 pause_config.tx_pause = 0;
2454 pause_config.rx_pause = 0;
2455
078086f3 2456 mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
8b230ed8
RM
2457
2458 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2459 bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2460 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2461 bna_enet_enable(&bnad->bna.enet);
8b230ed8
RM
2462 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2463
2464 /* Enable broadcast */
2465 bnad_enable_default_bcast(bnad);
2466
aad75b66
RM
2467 /* Restore VLANs, if any */
2468 bnad_restore_vlans(bnad, 0);
2469
8b230ed8
RM
2470 /* Set the UCAST address */
2471 spin_lock_irqsave(&bnad->bna_lock, flags);
2472 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2473 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2474
2475 /* Start the stats timer */
2476 bnad_stats_timer_start(bnad);
2477
2478 mutex_unlock(&bnad->conf_mutex);
2479
2480 return 0;
2481
2482cleanup_tx:
2483 bnad_cleanup_tx(bnad, 0);
2484
2485err_return:
2486 mutex_unlock(&bnad->conf_mutex);
2487 return err;
2488}
2489
2490static int
2491bnad_stop(struct net_device *netdev)
2492{
2493 struct bnad *bnad = netdev_priv(netdev);
2494 unsigned long flags;
2495
2496 mutex_lock(&bnad->conf_mutex);
2497
2498 /* Stop the stats timer */
2499 bnad_stats_timer_stop(bnad);
2500
078086f3 2501 init_completion(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
2502
2503 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2504 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2505 bnad_cb_enet_disabled);
8b230ed8
RM
2506 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2507
078086f3 2508 wait_for_completion(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
2509
2510 bnad_cleanup_tx(bnad, 0);
2511 bnad_cleanup_rx(bnad, 0);
2512
2513 /* Synchronize mailbox IRQ */
2514 bnad_mbox_irq_sync(bnad);
2515
2516 mutex_unlock(&bnad->conf_mutex);
2517
2518 return 0;
2519}
2520
2521/* TX */
2522/*
2523 * bnad_start_xmit : Netdev entry point for Transmit
2524 * Called under lock held by net_device
2525 */
2526static netdev_tx_t
2527bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2528{
2529 struct bnad *bnad = netdev_priv(netdev);
078086f3
RM
2530 u32 txq_id = 0;
2531 struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
8b230ed8 2532
0120b99c
RM
2533 u16 txq_prod, vlan_tag = 0;
2534 u32 unmap_prod, wis, wis_used, wi_range;
2535 u32 vectors, vect_id, i, acked;
0120b99c 2536 int err;
271e8b79
RM
2537 unsigned int len;
2538 u32 gso_size;
8b230ed8 2539
078086f3 2540 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
0120b99c 2541 dma_addr_t dma_addr;
8b230ed8 2542 struct bna_txq_entry *txqent;
078086f3 2543 u16 flags;
8b230ed8 2544
271e8b79
RM
2545 if (unlikely(skb->len <= ETH_HLEN)) {
2546 dev_kfree_skb(skb);
2547 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2548 return NETDEV_TX_OK;
2549 }
2550 if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
8b230ed8 2551 dev_kfree_skb(skb);
271e8b79
RM
2552 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2553 return NETDEV_TX_OK;
2554 }
2555 if (unlikely(skb_headlen(skb) == 0)) {
2556 dev_kfree_skb(skb);
2557 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
8b230ed8
RM
2558 return NETDEV_TX_OK;
2559 }
2560
2561 /*
2562 * Takes care of the Tx that is scheduled between clearing the flag
19dbff9f 2563 * and the netif_tx_stop_all_queues() call.
8b230ed8 2564 */
be7fa326 2565 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
8b230ed8 2566 dev_kfree_skb(skb);
271e8b79 2567 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
8b230ed8
RM
2568 return NETDEV_TX_OK;
2569 }
2570
8b230ed8 2571 vectors = 1 + skb_shinfo(skb)->nr_frags;
271e8b79 2572 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
8b230ed8 2573 dev_kfree_skb(skb);
271e8b79 2574 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
8b230ed8
RM
2575 return NETDEV_TX_OK;
2576 }
2577 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2578 acked = 0;
078086f3
RM
2579 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2580 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
8b230ed8
RM
2581 if ((u16) (*tcb->hw_consumer_index) !=
2582 tcb->consumer_index &&
2583 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2584 acked = bnad_free_txbufs(bnad, tcb);
be7fa326
RM
2585 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2586 bna_ib_ack(tcb->i_dbell, acked);
8b230ed8
RM
2587 smp_mb__before_clear_bit();
2588 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2589 } else {
2590 netif_stop_queue(netdev);
2591 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2592 }
2593
2594 smp_mb();
2595 /*
2596 * Check again to deal with race condition between
2597 * netif_stop_queue here, and netif_wake_queue in
2598 * interrupt handler which is not inside netif tx lock.
2599 */
2600 if (likely
2601 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2602 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2603 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2604 return NETDEV_TX_BUSY;
2605 } else {
2606 netif_wake_queue(netdev);
2607 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2608 }
2609 }
2610
2611 unmap_prod = unmap_q->producer_index;
8b230ed8
RM
2612 flags = 0;
2613
2614 txq_prod = tcb->producer_index;
2615 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
8b230ed8
RM
2616 txqent->hdr.wi.reserved = 0;
2617 txqent->hdr.wi.num_vectors = vectors;
8b230ed8 2618
eab6d18d 2619 if (vlan_tx_tag_present(skb)) {
8b230ed8
RM
2620 vlan_tag = (u16) vlan_tx_tag_get(skb);
2621 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2622 }
2623 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2624 vlan_tag =
2625 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2626 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2627 }
2628
2629 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2630
2631 if (skb_is_gso(skb)) {
271e8b79
RM
2632 gso_size = skb_shinfo(skb)->gso_size;
2633
2634 if (unlikely(gso_size > netdev->mtu)) {
2635 dev_kfree_skb(skb);
2636 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2637 return NETDEV_TX_OK;
2638 }
2639 if (unlikely((gso_size + skb_transport_offset(skb) +
2640 tcp_hdrlen(skb)) >= skb->len)) {
2641 txqent->hdr.wi.opcode =
2642 __constant_htons(BNA_TXQ_WI_SEND);
2643 txqent->hdr.wi.lso_mss = 0;
2644 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2645 } else {
2646 txqent->hdr.wi.opcode =
2647 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2648 txqent->hdr.wi.lso_mss = htons(gso_size);
2649 }
2650
8b230ed8 2651 err = bnad_tso_prepare(bnad, skb);
271e8b79 2652 if (unlikely(err)) {
8b230ed8 2653 dev_kfree_skb(skb);
271e8b79 2654 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
8b230ed8
RM
2655 return NETDEV_TX_OK;
2656 }
8b230ed8
RM
2657 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2658 txqent->hdr.wi.l4_hdr_size_n_offset =
2659 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2660 (tcp_hdrlen(skb) >> 2,
2661 skb_transport_offset(skb)));
271e8b79
RM
2662 } else {
2663 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
8b230ed8
RM
2664 txqent->hdr.wi.lso_mss = 0;
2665
271e8b79
RM
2666 if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2667 dev_kfree_skb(skb);
2668 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2669 return NETDEV_TX_OK;
8b230ed8 2670 }
8b230ed8 2671
271e8b79
RM
2672 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2673 u8 proto = 0;
8b230ed8 2674
271e8b79
RM
2675 if (skb->protocol == __constant_htons(ETH_P_IP))
2676 proto = ip_hdr(skb)->protocol;
2677 else if (skb->protocol ==
2678 __constant_htons(ETH_P_IPV6)) {
2679 /* nexthdr may not be TCP immediately. */
2680 proto = ipv6_hdr(skb)->nexthdr;
2681 }
2682 if (proto == IPPROTO_TCP) {
2683 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2684 txqent->hdr.wi.l4_hdr_size_n_offset =
2685 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2686 (0, skb_transport_offset(skb)));
2687
2688 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2689
2690 if (unlikely(skb_headlen(skb) <
2691 skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2692 dev_kfree_skb(skb);
2693 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2694 return NETDEV_TX_OK;
2695 }
8b230ed8 2696
271e8b79
RM
2697 } else if (proto == IPPROTO_UDP) {
2698 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2699 txqent->hdr.wi.l4_hdr_size_n_offset =
2700 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2701 (0, skb_transport_offset(skb)));
2702
2703 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2704 if (unlikely(skb_headlen(skb) <
2705 skb_transport_offset(skb) +
2706 sizeof(struct udphdr))) {
2707 dev_kfree_skb(skb);
2708 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2709 return NETDEV_TX_OK;
2710 }
2711 } else {
8b230ed8 2712 dev_kfree_skb(skb);
271e8b79 2713 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
8b230ed8
RM
2714 return NETDEV_TX_OK;
2715 }
271e8b79
RM
2716 } else {
2717 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
8b230ed8 2718 }
8b230ed8
RM
2719 }
2720
2721 txqent->hdr.wi.flags = htons(flags);
2722
2723 txqent->hdr.wi.frame_length = htonl(skb->len);
2724
2725 unmap_q->unmap_array[unmap_prod].skb = skb;
271e8b79
RM
2726 len = skb_headlen(skb);
2727 txqent->vector[0].length = htons(len);
5ea74318
IV
2728 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2729 skb_headlen(skb), DMA_TO_DEVICE);
2730 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
8b230ed8
RM
2731 dma_addr);
2732
271e8b79 2733 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
8b230ed8
RM
2734 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2735
271e8b79
RM
2736 vect_id = 0;
2737 wis_used = 1;
2738
8b230ed8
RM
2739 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2740 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
078086f3 2741 u16 size = frag->size;
8b230ed8 2742
271e8b79
RM
2743 if (unlikely(size == 0)) {
2744 unmap_prod = unmap_q->producer_index;
2745
2746 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2747 unmap_q->unmap_array,
2748 unmap_prod, unmap_q->q_depth, skb,
2749 i);
2750 dev_kfree_skb(skb);
2751 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2752 return NETDEV_TX_OK;
2753 }
2754
2755 len += size;
2756
8b230ed8
RM
2757 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2758 vect_id = 0;
2759 if (--wi_range)
2760 txqent++;
2761 else {
2762 BNA_QE_INDX_ADD(txq_prod, wis_used,
2763 tcb->q_depth);
2764 wis_used = 0;
2765 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2766 txqent, wi_range);
8b230ed8
RM
2767 }
2768 wis_used++;
271e8b79
RM
2769 txqent->hdr.wi_ext.opcode =
2770 __constant_htons(BNA_TXQ_WI_EXTENSION);
8b230ed8
RM
2771 }
2772
2773 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2774 txqent->vector[vect_id].length = htons(size);
5ea74318
IV
2775 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2776 frag->page_offset, size, DMA_TO_DEVICE);
2777 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
8b230ed8
RM
2778 dma_addr);
2779 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2780 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2781 }
2782
271e8b79
RM
2783 if (unlikely(len != skb->len)) {
2784 unmap_prod = unmap_q->producer_index;
2785
2786 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2787 unmap_q->unmap_array, unmap_prod,
2788 unmap_q->q_depth, skb,
2789 skb_shinfo(skb)->nr_frags);
2790 dev_kfree_skb(skb);
2791 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2792 return NETDEV_TX_OK;
2793 }
2794
8b230ed8
RM
2795 unmap_q->producer_index = unmap_prod;
2796 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2797 tcb->producer_index = txq_prod;
2798
2799 smp_mb();
be7fa326
RM
2800
2801 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2802 return NETDEV_TX_OK;
2803
8b230ed8 2804 bna_txq_prod_indx_doorbell(tcb);
271e8b79 2805 smp_mb();
8b230ed8
RM
2806
2807 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2808 tasklet_schedule(&bnad->tx_free_tasklet);
2809
2810 return NETDEV_TX_OK;
2811}
2812
2813/*
2814 * Used spin_lock to synchronize reading of stats structures, which
2815 * is written by BNA under the same lock.
2816 */
250e061e
ED
2817static struct rtnl_link_stats64 *
2818bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
8b230ed8
RM
2819{
2820 struct bnad *bnad = netdev_priv(netdev);
2821 unsigned long flags;
2822
2823 spin_lock_irqsave(&bnad->bna_lock, flags);
2824
250e061e
ED
2825 bnad_netdev_qstats_fill(bnad, stats);
2826 bnad_netdev_hwstats_fill(bnad, stats);
8b230ed8
RM
2827
2828 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2829
250e061e 2830 return stats;
8b230ed8
RM
2831}
2832
a2122d95 2833void
8b230ed8
RM
2834bnad_set_rx_mode(struct net_device *netdev)
2835{
2836 struct bnad *bnad = netdev_priv(netdev);
2837 u32 new_mask, valid_mask;
2838 unsigned long flags;
2839
2840 spin_lock_irqsave(&bnad->bna_lock, flags);
2841
2842 new_mask = valid_mask = 0;
2843
2844 if (netdev->flags & IFF_PROMISC) {
2845 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2846 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2847 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2848 bnad->cfg_flags |= BNAD_CF_PROMISC;
2849 }
2850 } else {
2851 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2852 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2853 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2854 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2855 }
2856 }
2857
2858 if (netdev->flags & IFF_ALLMULTI) {
2859 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2860 new_mask |= BNA_RXMODE_ALLMULTI;
2861 valid_mask |= BNA_RXMODE_ALLMULTI;
2862 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2863 }
2864 } else {
2865 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2866 new_mask &= ~BNA_RXMODE_ALLMULTI;
2867 valid_mask |= BNA_RXMODE_ALLMULTI;
2868 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2869 }
2870 }
2871
271e8b79
RM
2872 if (bnad->rx_info[0].rx == NULL)
2873 goto unlock;
2874
8b230ed8
RM
2875 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2876
2877 if (!netdev_mc_empty(netdev)) {
2878 u8 *mcaddr_list;
2879 int mc_count = netdev_mc_count(netdev);
2880
2881 /* Index 0 holds the broadcast address */
2882 mcaddr_list =
2883 kzalloc((mc_count + 1) * ETH_ALEN,
2884 GFP_ATOMIC);
2885 if (!mcaddr_list)
ca1cef3a 2886 goto unlock;
8b230ed8
RM
2887
2888 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2889
2890 /* Copy rest of the MC addresses */
2891 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2892
2893 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2894 mcaddr_list, NULL);
2895
2896 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2897 kfree(mcaddr_list);
2898 }
ca1cef3a 2899unlock:
8b230ed8
RM
2900 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2901}
2902
2903/*
2904 * bna_lock is used to sync writes to netdev->addr
2905 * conf_lock cannot be used since this call may be made
2906 * in a non-blocking context.
2907 */
2908static int
2909bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2910{
2911 int err;
2912 struct bnad *bnad = netdev_priv(netdev);
2913 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2914 unsigned long flags;
2915
2916 spin_lock_irqsave(&bnad->bna_lock, flags);
2917
2918 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2919
2920 if (!err)
2921 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2922
2923 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2924
2925 return err;
2926}
2927
2928static int
078086f3 2929bnad_mtu_set(struct bnad *bnad, int mtu)
8b230ed8 2930{
8b230ed8
RM
2931 unsigned long flags;
2932
078086f3
RM
2933 init_completion(&bnad->bnad_completions.mtu_comp);
2934
2935 spin_lock_irqsave(&bnad->bna_lock, flags);
2936 bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2937 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2938
2939 wait_for_completion(&bnad->bnad_completions.mtu_comp);
2940
2941 return bnad->bnad_completions.mtu_comp_status;
2942}
2943
2944static int
2945bnad_change_mtu(struct net_device *netdev, int new_mtu)
2946{
2947 int err, mtu = netdev->mtu;
8b230ed8
RM
2948 struct bnad *bnad = netdev_priv(netdev);
2949
2950 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2951 return -EINVAL;
2952
2953 mutex_lock(&bnad->conf_mutex);
2954
2955 netdev->mtu = new_mtu;
2956
078086f3
RM
2957 mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2958 err = bnad_mtu_set(bnad, mtu);
2959 if (err)
2960 err = -EBUSY;
8b230ed8
RM
2961
2962 mutex_unlock(&bnad->conf_mutex);
2963 return err;
2964}
2965
8b230ed8
RM
2966static void
2967bnad_vlan_rx_add_vid(struct net_device *netdev,
2968 unsigned short vid)
2969{
2970 struct bnad *bnad = netdev_priv(netdev);
2971 unsigned long flags;
2972
2973 if (!bnad->rx_info[0].rx)
2974 return;
2975
2976 mutex_lock(&bnad->conf_mutex);
2977
2978 spin_lock_irqsave(&bnad->bna_lock, flags);
2979 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
f859d7cb 2980 set_bit(vid, bnad->active_vlans);
8b230ed8
RM
2981 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2982
2983 mutex_unlock(&bnad->conf_mutex);
2984}
2985
2986static void
2987bnad_vlan_rx_kill_vid(struct net_device *netdev,
2988 unsigned short vid)
2989{
2990 struct bnad *bnad = netdev_priv(netdev);
2991 unsigned long flags;
2992
2993 if (!bnad->rx_info[0].rx)
2994 return;
2995
2996 mutex_lock(&bnad->conf_mutex);
2997
2998 spin_lock_irqsave(&bnad->bna_lock, flags);
f859d7cb 2999 clear_bit(vid, bnad->active_vlans);
8b230ed8
RM
3000 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3001 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3002
3003 mutex_unlock(&bnad->conf_mutex);
3004}
3005
3006#ifdef CONFIG_NET_POLL_CONTROLLER
3007static void
3008bnad_netpoll(struct net_device *netdev)
3009{
3010 struct bnad *bnad = netdev_priv(netdev);
3011 struct bnad_rx_info *rx_info;
3012 struct bnad_rx_ctrl *rx_ctrl;
3013 u32 curr_mask;
3014 int i, j;
3015
3016 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3017 bna_intx_disable(&bnad->bna, curr_mask);
3018 bnad_isr(bnad->pcidev->irq, netdev);
3019 bna_intx_enable(&bnad->bna, curr_mask);
3020 } else {
19dbff9f
RM
3021 /*
3022 * Tx processing may happen in sending context, so no need
3023 * to explicitly process completions here
3024 */
3025
3026 /* Rx processing */
8b230ed8
RM
3027 for (i = 0; i < bnad->num_rx; i++) {
3028 rx_info = &bnad->rx_info[i];
3029 if (!rx_info->rx)
3030 continue;
3031 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3032 rx_ctrl = &rx_info->rx_ctrl[j];
271e8b79 3033 if (rx_ctrl->ccb)
8b230ed8
RM
3034 bnad_netif_rx_schedule_poll(bnad,
3035 rx_ctrl->ccb);
8b230ed8
RM
3036 }
3037 }
3038 }
3039}
3040#endif
3041
3042static const struct net_device_ops bnad_netdev_ops = {
3043 .ndo_open = bnad_open,
3044 .ndo_stop = bnad_stop,
3045 .ndo_start_xmit = bnad_start_xmit,
250e061e 3046 .ndo_get_stats64 = bnad_get_stats64,
8b230ed8 3047 .ndo_set_rx_mode = bnad_set_rx_mode,
8b230ed8
RM
3048 .ndo_validate_addr = eth_validate_addr,
3049 .ndo_set_mac_address = bnad_set_mac_address,
3050 .ndo_change_mtu = bnad_change_mtu,
8b230ed8
RM
3051 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3052 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3053#ifdef CONFIG_NET_POLL_CONTROLLER
3054 .ndo_poll_controller = bnad_netpoll
3055#endif
3056};
3057
3058static void
3059bnad_netdev_init(struct bnad *bnad, bool using_dac)
3060{
3061 struct net_device *netdev = bnad->netdev;
3062
e5ee20e7
MM
3063 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3064 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3065 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
8b230ed8 3066
e5ee20e7
MM
3067 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3068 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3069 NETIF_F_TSO | NETIF_F_TSO6;
8b230ed8 3070
e5ee20e7
MM
3071 netdev->features |= netdev->hw_features |
3072 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
8b230ed8
RM
3073
3074 if (using_dac)
3075 netdev->features |= NETIF_F_HIGHDMA;
3076
8b230ed8
RM
3077 netdev->mem_start = bnad->mmio_start;
3078 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3079
3080 netdev->netdev_ops = &bnad_netdev_ops;
3081 bnad_set_ethtool_ops(netdev);
3082}
3083
3084/*
3085 * 1. Initialize the bnad structure
3086 * 2. Setup netdev pointer in pci_dev
3087 * 3. Initialze Tx free tasklet
3088 * 4. Initialize no. of TxQ & CQs & MSIX vectors
3089 */
3090static int
3091bnad_init(struct bnad *bnad,
3092 struct pci_dev *pdev, struct net_device *netdev)
3093{
3094 unsigned long flags;
3095
3096 SET_NETDEV_DEV(netdev, &pdev->dev);
3097 pci_set_drvdata(pdev, netdev);
3098
3099 bnad->netdev = netdev;
3100 bnad->pcidev = pdev;
3101 bnad->mmio_start = pci_resource_start(pdev, 0);
3102 bnad->mmio_len = pci_resource_len(pdev, 0);
3103 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3104 if (!bnad->bar0) {
3105 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3106 pci_set_drvdata(pdev, NULL);
3107 return -ENOMEM;
3108 }
3109 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3110 (unsigned long long) bnad->mmio_len);
3111
3112 spin_lock_irqsave(&bnad->bna_lock, flags);
3113 if (!bnad_msix_disable)
3114 bnad->cfg_flags = BNAD_CF_MSIX;
3115
3116 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3117
3118 bnad_q_num_init(bnad);
3119 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3120
3121 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3122 (bnad->num_rx * bnad->num_rxp_per_rx) +
3123 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8
RM
3124
3125 bnad->txq_depth = BNAD_TXQ_DEPTH;
3126 bnad->rxq_depth = BNAD_RXQ_DEPTH;
8b230ed8
RM
3127
3128 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3129 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3130
3131 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
3132 (unsigned long)bnad);
3133
3134 return 0;
3135}
3136
3137/*
3138 * Must be called after bnad_pci_uninit()
3139 * so that iounmap() and pci_set_drvdata(NULL)
3140 * happens only after PCI uninitialization.
3141 */
3142static void
3143bnad_uninit(struct bnad *bnad)
3144{
3145 if (bnad->bar0)
3146 iounmap(bnad->bar0);
3147 pci_set_drvdata(bnad->pcidev, NULL);
3148}
3149
3150/*
3151 * Initialize locks
078086f3 3152 a) Per ioceth mutes used for serializing configuration
8b230ed8
RM
3153 changes from OS interface
3154 b) spin lock used to protect bna state machine
3155 */
3156static void
3157bnad_lock_init(struct bnad *bnad)
3158{
3159 spin_lock_init(&bnad->bna_lock);
3160 mutex_init(&bnad->conf_mutex);
3161}
3162
3163static void
3164bnad_lock_uninit(struct bnad *bnad)
3165{
3166 mutex_destroy(&bnad->conf_mutex);
3167}
3168
3169/* PCI Initialization */
3170static int
3171bnad_pci_init(struct bnad *bnad,
3172 struct pci_dev *pdev, bool *using_dac)
3173{
3174 int err;
3175
3176 err = pci_enable_device(pdev);
3177 if (err)
3178 return err;
3179 err = pci_request_regions(pdev, BNAD_NAME);
3180 if (err)
3181 goto disable_device;
5ea74318
IV
3182 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3183 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
8b230ed8
RM
3184 *using_dac = 1;
3185 } else {
5ea74318 3186 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
8b230ed8 3187 if (err) {
5ea74318
IV
3188 err = dma_set_coherent_mask(&pdev->dev,
3189 DMA_BIT_MASK(32));
8b230ed8
RM
3190 if (err)
3191 goto release_regions;
3192 }
3193 *using_dac = 0;
3194 }
3195 pci_set_master(pdev);
3196 return 0;
3197
3198release_regions:
3199 pci_release_regions(pdev);
3200disable_device:
3201 pci_disable_device(pdev);
3202
3203 return err;
3204}
3205
3206static void
3207bnad_pci_uninit(struct pci_dev *pdev)
3208{
3209 pci_release_regions(pdev);
3210 pci_disable_device(pdev);
3211}
3212
3213static int __devinit
3214bnad_pci_probe(struct pci_dev *pdev,
3215 const struct pci_device_id *pcidev_id)
3216{
3caa1e95 3217 bool using_dac;
0120b99c 3218 int err;
8b230ed8
RM
3219 struct bnad *bnad;
3220 struct bna *bna;
3221 struct net_device *netdev;
3222 struct bfa_pcidev pcidev_info;
3223 unsigned long flags;
3224
3225 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3226 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3227
3228 mutex_lock(&bnad_fwimg_mutex);
3229 if (!cna_get_firmware_buf(pdev)) {
3230 mutex_unlock(&bnad_fwimg_mutex);
3231 pr_warn("Failed to load Firmware Image!\n");
3232 return -ENODEV;
3233 }
3234 mutex_unlock(&bnad_fwimg_mutex);
3235
3236 /*
3237 * Allocates sizeof(struct net_device + struct bnad)
3238 * bnad = netdev->priv
3239 */
3240 netdev = alloc_etherdev(sizeof(struct bnad));
3241 if (!netdev) {
078086f3 3242 dev_err(&pdev->dev, "netdev allocation failed\n");
8b230ed8
RM
3243 err = -ENOMEM;
3244 return err;
3245 }
3246 bnad = netdev_priv(netdev);
3247
078086f3
RM
3248 bnad_lock_init(bnad);
3249
3250 mutex_lock(&bnad->conf_mutex);
8b230ed8
RM
3251 /*
3252 * PCI initialization
0120b99c 3253 * Output : using_dac = 1 for 64 bit DMA
be7fa326 3254 * = 0 for 32 bit DMA
8b230ed8
RM
3255 */
3256 err = bnad_pci_init(bnad, pdev, &using_dac);
3257 if (err)
44861f44 3258 goto unlock_mutex;
8b230ed8 3259
8b230ed8
RM
3260 /*
3261 * Initialize bnad structure
3262 * Setup relation between pci_dev & netdev
3263 * Init Tx free tasklet
3264 */
3265 err = bnad_init(bnad, pdev, netdev);
3266 if (err)
3267 goto pci_uninit;
078086f3 3268
8b230ed8
RM
3269 /* Initialize netdev structure, set up ethtool ops */
3270 bnad_netdev_init(bnad, using_dac);
3271
815f41e7
RM
3272 /* Set link to down state */
3273 netif_carrier_off(netdev);
3274
8b230ed8 3275 /* Get resource requirement form bna */
078086f3 3276 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 3277 bna_res_req(&bnad->res_info[0]);
078086f3 3278 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
3279
3280 /* Allocate resources from bna */
078086f3 3281 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
8b230ed8 3282 if (err)
078086f3 3283 goto drv_uninit;
8b230ed8
RM
3284
3285 bna = &bnad->bna;
3286
3287 /* Setup pcidev_info for bna_init() */
3288 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3289 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3290 pcidev_info.device_id = bnad->pcidev->device;
3291 pcidev_info.pci_bar_kva = bnad->bar0;
3292
8b230ed8
RM
3293 spin_lock_irqsave(&bnad->bna_lock, flags);
3294 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
8b230ed8
RM
3295 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3296
3297 bnad->stats.bna_stats = &bna->stats;
3298
078086f3
RM
3299 bnad_enable_msix(bnad);
3300 err = bnad_mbox_irq_alloc(bnad);
3301 if (err)
3302 goto res_free;
3303
3304
8b230ed8 3305 /* Set up timers */
078086f3 3306 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
8b230ed8 3307 ((unsigned long)bnad));
078086f3 3308 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
8b230ed8 3309 ((unsigned long)bnad));
078086f3 3310 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
1d32f769 3311 ((unsigned long)bnad));
078086f3 3312 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
8b230ed8
RM
3313 ((unsigned long)bnad));
3314
3315 /* Now start the timer before calling IOC */
078086f3 3316 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
8b230ed8
RM
3317 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3318
3319 /*
3320 * Start the chip
078086f3
RM
3321 * If the call back comes with error, we bail out.
3322 * This is a catastrophic error.
8b230ed8 3323 */
078086f3
RM
3324 err = bnad_ioceth_enable(bnad);
3325 if (err) {
3326 pr_err("BNA: Initialization failed err=%d\n",
3327 err);
3328 goto probe_success;
3329 }
3330
3331 spin_lock_irqsave(&bnad->bna_lock, flags);
3332 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3333 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3334 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3335 bna_attr(bna)->num_rxp - 1);
3336 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3337 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3338 err = -EIO;
3339 }
3caa1e95
RM
3340 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3341 if (err)
3342 goto disable_ioceth;
3343
3344 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
3345 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3346 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3347
3348 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
0caa9aae
RM
3349 if (err) {
3350 err = -EIO;
078086f3 3351 goto disable_ioceth;
0caa9aae 3352 }
078086f3
RM
3353
3354 spin_lock_irqsave(&bnad->bna_lock, flags);
3355 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3356 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
3357
3358 /* Get the burnt-in mac */
3359 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 3360 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
8b230ed8
RM
3361 bnad_set_netdev_perm_addr(bnad);
3362 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3363
0caa9aae
RM
3364 mutex_unlock(&bnad->conf_mutex);
3365
8b230ed8
RM
3366 /* Finally, reguister with net_device layer */
3367 err = register_netdev(netdev);
3368 if (err) {
3369 pr_err("BNA : Registering with netdev failed\n");
078086f3 3370 goto probe_uninit;
8b230ed8 3371 }
078086f3 3372 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
8b230ed8 3373
0caa9aae
RM
3374 return 0;
3375
078086f3
RM
3376probe_success:
3377 mutex_unlock(&bnad->conf_mutex);
8b230ed8
RM
3378 return 0;
3379
078086f3
RM
3380probe_uninit:
3381 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3382disable_ioceth:
3383 bnad_ioceth_disable(bnad);
3384 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3385 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3386 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
8b230ed8
RM
3387 spin_lock_irqsave(&bnad->bna_lock, flags);
3388 bna_uninit(bna);
3389 spin_unlock_irqrestore(&bnad->bna_lock, flags);
078086f3 3390 bnad_mbox_irq_free(bnad);
8b230ed8 3391 bnad_disable_msix(bnad);
078086f3
RM
3392res_free:
3393 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3394drv_uninit:
3395 bnad_uninit(bnad);
8b230ed8
RM
3396pci_uninit:
3397 bnad_pci_uninit(pdev);
44861f44 3398unlock_mutex:
078086f3 3399 mutex_unlock(&bnad->conf_mutex);
8b230ed8 3400 bnad_lock_uninit(bnad);
8b230ed8
RM
3401 free_netdev(netdev);
3402 return err;
3403}
3404
3405static void __devexit
3406bnad_pci_remove(struct pci_dev *pdev)
3407{
3408 struct net_device *netdev = pci_get_drvdata(pdev);
3409 struct bnad *bnad;
3410 struct bna *bna;
3411 unsigned long flags;
3412
3413 if (!netdev)
3414 return;
3415
3416 pr_info("%s bnad_pci_remove\n", netdev->name);
3417 bnad = netdev_priv(netdev);
3418 bna = &bnad->bna;
3419
078086f3
RM
3420 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3421 unregister_netdev(netdev);
8b230ed8
RM
3422
3423 mutex_lock(&bnad->conf_mutex);
078086f3
RM
3424 bnad_ioceth_disable(bnad);
3425 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3426 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3427 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
8b230ed8
RM
3428 spin_lock_irqsave(&bnad->bna_lock, flags);
3429 bna_uninit(bna);
3430 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 3431
078086f3
RM
3432 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3433 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3434 bnad_mbox_irq_free(bnad);
8b230ed8
RM
3435 bnad_disable_msix(bnad);
3436 bnad_pci_uninit(pdev);
078086f3 3437 mutex_unlock(&bnad->conf_mutex);
8b230ed8
RM
3438 bnad_lock_uninit(bnad);
3439 bnad_uninit(bnad);
3440 free_netdev(netdev);
3441}
3442
0120b99c 3443static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
8b230ed8
RM
3444 {
3445 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3446 PCI_DEVICE_ID_BROCADE_CT),
3447 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3448 .class_mask = 0xffff00
3449 }, {0, }
3450};
3451
3452MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3453
3454static struct pci_driver bnad_pci_driver = {
3455 .name = BNAD_NAME,
3456 .id_table = bnad_pci_id_table,
3457 .probe = bnad_pci_probe,
3458 .remove = __devexit_p(bnad_pci_remove),
3459};
3460
3461static int __init
3462bnad_module_init(void)
3463{
3464 int err;
3465
5aad0011
RM
3466 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3467 BNAD_VERSION);
8b230ed8 3468
8a891429 3469 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
8b230ed8
RM
3470
3471 err = pci_register_driver(&bnad_pci_driver);
3472 if (err < 0) {
3473 pr_err("bna : PCI registration failed in module init "
3474 "(%d)\n", err);
3475 return err;
3476 }
3477
3478 return 0;
3479}
3480
3481static void __exit
3482bnad_module_exit(void)
3483{
3484 pci_unregister_driver(&bnad_pci_driver);
3485
3486 if (bfi_fw)
3487 release_firmware(bfi_fw);
3488}
3489
3490module_init(bnad_module_init);
3491module_exit(bnad_module_exit);
3492
3493MODULE_AUTHOR("Brocade");
3494MODULE_LICENSE("GPL");
3495MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3496MODULE_VERSION(BNAD_VERSION);
3497MODULE_FIRMWARE(CNA_FW_FILE_CT);