]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/bna/bnad.c
bna: Enable pure priority tagged packet reception and rxf uninit cleanup fix
[mirror_ubuntu-zesty-kernel.git] / drivers / net / bna / bnad.c
CommitLineData
8b230ed8
RM
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include <linux/netdevice.h>
19#include <linux/skbuff.h>
20#include <linux/etherdevice.h>
21#include <linux/in.h>
22#include <linux/ethtool.h>
23#include <linux/if_vlan.h>
24#include <linux/if_ether.h>
25#include <linux/ip.h>
26
27#include "bnad.h"
28#include "bna.h"
29#include "cna.h"
30
b7ee31c5 31static DEFINE_MUTEX(bnad_fwimg_mutex);
8b230ed8
RM
32
33/*
34 * Module params
35 */
36static uint bnad_msix_disable;
37module_param(bnad_msix_disable, uint, 0444);
38MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
39
40static uint bnad_ioc_auto_recover = 1;
41module_param(bnad_ioc_auto_recover, uint, 0444);
42MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
43
44/*
45 * Global variables
46 */
47u32 bnad_rxqs_per_cq = 2;
48
b7ee31c5 49static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8b230ed8
RM
50
51/*
52 * Local MACROS
53 */
54#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
55
56#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
57
58#define BNAD_GET_MBOX_IRQ(_bnad) \
59 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
60 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
61 ((_bnad)->pcidev->irq))
62
63#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
64do { \
65 (_res_info)->res_type = BNA_RES_T_MEM; \
66 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
67 (_res_info)->res_u.mem_info.num = (_num); \
68 (_res_info)->res_u.mem_info.len = \
69 sizeof(struct bnad_unmap_q) + \
70 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
71} while (0)
72
be7fa326
RM
73#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
74
8b230ed8
RM
75/*
76 * Reinitialize completions in CQ, once Rx is taken down
77 */
78static void
79bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
80{
81 struct bna_cq_entry *cmpl, *next_cmpl;
82 unsigned int wi_range, wis = 0, ccb_prod = 0;
83 int i;
84
85 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
86 wi_range);
87
88 for (i = 0; i < ccb->q_depth; i++) {
89 wis++;
90 if (likely(--wi_range))
91 next_cmpl = cmpl + 1;
92 else {
93 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
94 wis = 0;
95 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
96 next_cmpl, wi_range);
97 }
98 cmpl->valid = 0;
99 cmpl = next_cmpl;
100 }
101}
102
103/*
104 * Frees all pending Tx Bufs
105 * At this point no activity is expected on the Q,
106 * so DMA unmap & freeing is fine.
107 */
108static void
109bnad_free_all_txbufs(struct bnad *bnad,
110 struct bna_tcb *tcb)
111{
112 u16 unmap_cons;
113 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
114 struct bnad_skb_unmap *unmap_array;
115 struct sk_buff *skb = NULL;
116 int i;
117
118 unmap_array = unmap_q->unmap_array;
119
120 unmap_cons = 0;
121 while (unmap_cons < unmap_q->q_depth) {
122 skb = unmap_array[unmap_cons].skb;
123 if (!skb) {
124 unmap_cons++;
125 continue;
126 }
127 unmap_array[unmap_cons].skb = NULL;
128
129 pci_unmap_single(bnad->pcidev,
130 pci_unmap_addr(&unmap_array[unmap_cons],
131 dma_addr), skb_headlen(skb),
132 PCI_DMA_TODEVICE);
133
134 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
be7fa326
RM
135 if (++unmap_cons >= unmap_q->q_depth)
136 break;
137
8b230ed8
RM
138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
139 pci_unmap_page(bnad->pcidev,
140 pci_unmap_addr(&unmap_array[unmap_cons],
141 dma_addr),
142 skb_shinfo(skb)->frags[i].size,
143 PCI_DMA_TODEVICE);
144 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
145 0);
be7fa326
RM
146 if (++unmap_cons >= unmap_q->q_depth)
147 break;
8b230ed8
RM
148 }
149 dev_kfree_skb_any(skb);
150 }
151}
152
153/* Data Path Handlers */
154
155/*
156 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
157 * Can be called in a) Interrupt context
158 * b) Sending context
159 * c) Tasklet context
160 */
161static u32
162bnad_free_txbufs(struct bnad *bnad,
163 struct bna_tcb *tcb)
164{
165 u32 sent_packets = 0, sent_bytes = 0;
166 u16 wis, unmap_cons, updated_hw_cons;
167 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
168 struct bnad_skb_unmap *unmap_array;
169 struct sk_buff *skb;
170 int i;
171
172 /*
173 * Just return if TX is stopped. This check is useful
174 * when bnad_free_txbufs() runs out of a tasklet scheduled
be7fa326 175 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
8b230ed8
RM
176 * but this routine runs actually after the cleanup has been
177 * executed.
178 */
be7fa326 179 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
8b230ed8
RM
180 return 0;
181
182 updated_hw_cons = *(tcb->hw_consumer_index);
183
184 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
185 updated_hw_cons, tcb->q_depth);
186
187 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
188
189 unmap_array = unmap_q->unmap_array;
190 unmap_cons = unmap_q->consumer_index;
191
192 prefetch(&unmap_array[unmap_cons + 1]);
193 while (wis) {
194 skb = unmap_array[unmap_cons].skb;
195
196 unmap_array[unmap_cons].skb = NULL;
197
198 sent_packets++;
199 sent_bytes += skb->len;
200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
201
202 pci_unmap_single(bnad->pcidev,
203 pci_unmap_addr(&unmap_array[unmap_cons],
204 dma_addr), skb_headlen(skb),
205 PCI_DMA_TODEVICE);
206 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
208
209 prefetch(&unmap_array[unmap_cons + 1]);
210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
211 prefetch(&unmap_array[unmap_cons + 1]);
212
213 pci_unmap_page(bnad->pcidev,
214 pci_unmap_addr(&unmap_array[unmap_cons],
215 dma_addr),
216 skb_shinfo(skb)->frags[i].size,
217 PCI_DMA_TODEVICE);
218 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
219 0);
220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
221 }
222 dev_kfree_skb_any(skb);
223 }
224
225 /* Update consumer pointers. */
226 tcb->consumer_index = updated_hw_cons;
227 unmap_q->consumer_index = unmap_cons;
228
229 tcb->txq->tx_packets += sent_packets;
230 tcb->txq->tx_bytes += sent_bytes;
231
232 return sent_packets;
233}
234
235/* Tx Free Tasklet function */
236/* Frees for all the tcb's in all the Tx's */
237/*
238 * Scheduled from sending context, so that
239 * the fat Tx lock is not held for too long
240 * in the sending context.
241 */
242static void
243bnad_tx_free_tasklet(unsigned long bnad_ptr)
244{
245 struct bnad *bnad = (struct bnad *)bnad_ptr;
246 struct bna_tcb *tcb;
247 u32 acked;
248 int i, j;
249
250 for (i = 0; i < bnad->num_tx; i++) {
251 for (j = 0; j < bnad->num_txq_per_tx; j++) {
252 tcb = bnad->tx_info[i].tcb[j];
253 if (!tcb)
254 continue;
255 if (((u16) (*tcb->hw_consumer_index) !=
256 tcb->consumer_index) &&
257 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
258 &tcb->flags))) {
259 acked = bnad_free_txbufs(bnad, tcb);
be7fa326
RM
260 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
261 &tcb->flags)))
262 bna_ib_ack(tcb->i_dbell, acked);
8b230ed8
RM
263 smp_mb__before_clear_bit();
264 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
265 }
266 }
267 }
268}
269
270static u32
271bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
272{
273 struct net_device *netdev = bnad->netdev;
be7fa326 274 u32 sent = 0;
8b230ed8
RM
275
276 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
277 return 0;
278
279 sent = bnad_free_txbufs(bnad, tcb);
280 if (sent) {
281 if (netif_queue_stopped(netdev) &&
282 netif_carrier_ok(netdev) &&
283 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
284 BNAD_NETIF_WAKE_THRESHOLD) {
be7fa326
RM
285 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
286 netif_wake_queue(netdev);
287 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
288 }
8b230ed8 289 }
be7fa326
RM
290 }
291
292 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
8b230ed8 293 bna_ib_ack(tcb->i_dbell, sent);
8b230ed8
RM
294
295 smp_mb__before_clear_bit();
296 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
297
298 return sent;
299}
300
301/* MSIX Tx Completion Handler */
302static irqreturn_t
303bnad_msix_tx(int irq, void *data)
304{
305 struct bna_tcb *tcb = (struct bna_tcb *)data;
306 struct bnad *bnad = tcb->bnad;
307
308 bnad_tx(bnad, tcb);
309
310 return IRQ_HANDLED;
311}
312
313static void
314bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
315{
316 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
317
318 rcb->producer_index = 0;
319 rcb->consumer_index = 0;
320
321 unmap_q->producer_index = 0;
322 unmap_q->consumer_index = 0;
323}
324
325static void
be7fa326 326bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
8b230ed8
RM
327{
328 struct bnad_unmap_q *unmap_q;
329 struct sk_buff *skb;
be7fa326 330 int unmap_cons;
8b230ed8
RM
331
332 unmap_q = rcb->unmap_q;
be7fa326
RM
333 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
334 skb = unmap_q->unmap_array[unmap_cons].skb;
335 if (!skb)
336 continue;
337 BUG_ON(!(pci_unmap_addr(
338 &unmap_q->unmap_array[unmap_cons], dma_addr)));
339 unmap_q->unmap_array[unmap_cons].skb = NULL;
8b230ed8 340 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
be7fa326
RM
341 unmap_array[unmap_cons],
342 dma_addr), rcb->rxq->buffer_size,
343 PCI_DMA_FROMDEVICE);
8b230ed8 344 dev_kfree_skb(skb);
8b230ed8 345 }
8b230ed8
RM
346 bnad_reset_rcb(bnad, rcb);
347}
348
349static void
350bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
351{
352 u16 to_alloc, alloced, unmap_prod, wi_range;
353 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
354 struct bnad_skb_unmap *unmap_array;
355 struct bna_rxq_entry *rxent;
356 struct sk_buff *skb;
357 dma_addr_t dma_addr;
358
359 alloced = 0;
360 to_alloc =
361 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
362
363 unmap_array = unmap_q->unmap_array;
364 unmap_prod = unmap_q->producer_index;
365
366 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
367
368 while (to_alloc--) {
369 if (!wi_range) {
370 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
371 wi_range);
372 }
373 skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
374 GFP_ATOMIC);
375 if (unlikely(!skb)) {
376 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
377 goto finishing;
378 }
379 skb->dev = bnad->netdev;
380 skb_reserve(skb, NET_IP_ALIGN);
381 unmap_array[unmap_prod].skb = skb;
382 dma_addr = pci_map_single(bnad->pcidev, skb->data,
383 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
384 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
385 dma_addr);
386 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
387 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
388
389 rxent++;
390 wi_range--;
391 alloced++;
392 }
393
394finishing:
395 if (likely(alloced)) {
396 unmap_q->producer_index = unmap_prod;
397 rcb->producer_index = unmap_prod;
398 smp_mb();
be7fa326
RM
399 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
400 bna_rxq_prod_indx_doorbell(rcb);
8b230ed8 401 }
8b230ed8
RM
402}
403
404static inline void
405bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
406{
407 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
408
409 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
410 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
411 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
412 bnad_alloc_n_post_rxbufs(bnad, rcb);
413 smp_mb__before_clear_bit();
414 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
415 }
416}
417
418static u32
419bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
420{
421 struct bna_cq_entry *cmpl, *next_cmpl;
422 struct bna_rcb *rcb = NULL;
423 unsigned int wi_range, packets = 0, wis = 0;
424 struct bnad_unmap_q *unmap_q;
425 struct sk_buff *skb;
426 u32 flags;
427 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
428 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
429
be7fa326
RM
430 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
431 return 0;
432
8b230ed8
RM
433 prefetch(bnad->netdev);
434 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
435 wi_range);
436 BUG_ON(!(wi_range <= ccb->q_depth));
437 while (cmpl->valid && packets < budget) {
438 packets++;
439 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
440
441 if (qid0 == cmpl->rxq_id)
442 rcb = ccb->rcb[0];
443 else
444 rcb = ccb->rcb[1];
445
446 unmap_q = rcb->unmap_q;
447
448 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
449 BUG_ON(!(skb));
450 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
451 pci_unmap_single(bnad->pcidev,
452 pci_unmap_addr(&unmap_q->
453 unmap_array[unmap_q->
454 consumer_index],
455 dma_addr),
456 rcb->rxq->buffer_size,
457 PCI_DMA_FROMDEVICE);
458 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
459
460 /* Should be more efficient ? Performance ? */
461 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
462
463 wis++;
464 if (likely(--wi_range))
465 next_cmpl = cmpl + 1;
466 else {
467 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
468 wis = 0;
469 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
470 next_cmpl, wi_range);
471 BUG_ON(!(wi_range <= ccb->q_depth));
472 }
473 prefetch(next_cmpl);
474
475 flags = ntohl(cmpl->flags);
476 if (unlikely
477 (flags &
478 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
479 BNA_CQ_EF_TOO_LONG))) {
480 dev_kfree_skb_any(skb);
481 rcb->rxq->rx_packets_with_error++;
482 goto next;
483 }
484
485 skb_put(skb, ntohs(cmpl->length));
486 if (likely
487 (bnad->rx_csum &&
488 (((flags & BNA_CQ_EF_IPV4) &&
489 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
490 (flags & BNA_CQ_EF_IPV6)) &&
491 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
492 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
493 skb->ip_summed = CHECKSUM_UNNECESSARY;
494 else
bc8acf2c 495 skb_checksum_none_assert(skb);
8b230ed8
RM
496
497 rcb->rxq->rx_packets++;
498 rcb->rxq->rx_bytes += skb->len;
499 skb->protocol = eth_type_trans(skb, bnad->netdev);
500
501 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
502 struct bnad_rx_ctrl *rx_ctrl =
503 (struct bnad_rx_ctrl *)ccb->ctrl;
504 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
505 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
506 ntohs(cmpl->vlan_tag), skb);
507 else
508 vlan_hwaccel_receive_skb(skb,
509 bnad->vlan_grp,
510 ntohs(cmpl->vlan_tag));
511
512 } else { /* Not VLAN tagged/stripped */
513 struct bnad_rx_ctrl *rx_ctrl =
514 (struct bnad_rx_ctrl *)ccb->ctrl;
515 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
516 napi_gro_receive(&rx_ctrl->napi, skb);
517 else
518 netif_receive_skb(skb);
519 }
520
521next:
522 cmpl->valid = 0;
523 cmpl = next_cmpl;
524 }
525
526 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
527
528 if (likely(ccb)) {
be7fa326
RM
529 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
530 bna_ib_ack(ccb->i_dbell, packets);
8b230ed8
RM
531 bnad_refill_rxq(bnad, ccb->rcb[0]);
532 if (ccb->rcb[1])
533 bnad_refill_rxq(bnad, ccb->rcb[1]);
be7fa326
RM
534 } else {
535 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
536 bna_ib_ack(ccb->i_dbell, 0);
537 }
8b230ed8
RM
538
539 return packets;
540}
541
542static void
543bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
544{
be7fa326
RM
545 if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
546 return;
547
8b230ed8
RM
548 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
549 bna_ib_ack(ccb->i_dbell, 0);
550}
551
552static void
553bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
554{
e2fa6f2e
RM
555 unsigned long flags;
556
557 spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
8b230ed8 558 bnad_enable_rx_irq_unsafe(ccb);
e2fa6f2e 559 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
560}
561
562static void
563bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
564{
565 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
be7fa326
RM
566 struct napi_struct *napi = &rx_ctrl->napi;
567
568 if (likely(napi_schedule_prep(napi))) {
8b230ed8 569 bnad_disable_rx_irq(bnad, ccb);
be7fa326 570 __napi_schedule(napi);
8b230ed8
RM
571 }
572 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
573}
574
575/* MSIX Rx Path Handler */
576static irqreturn_t
577bnad_msix_rx(int irq, void *data)
578{
579 struct bna_ccb *ccb = (struct bna_ccb *)data;
580 struct bnad *bnad = ccb->bnad;
581
582 bnad_netif_rx_schedule_poll(bnad, ccb);
583
584 return IRQ_HANDLED;
585}
586
587/* Interrupt handlers */
588
589/* Mbox Interrupt Handlers */
590static irqreturn_t
591bnad_msix_mbox_handler(int irq, void *data)
592{
593 u32 intr_status;
e2fa6f2e 594 unsigned long flags;
be7fa326 595 struct bnad *bnad = (struct bnad *)data;
8b230ed8 596
be7fa326
RM
597 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
598 return IRQ_HANDLED;
8b230ed8 599
8b230ed8
RM
600 spin_lock_irqsave(&bnad->bna_lock, flags);
601
602 bna_intr_status_get(&bnad->bna, intr_status);
603
604 if (BNA_IS_MBOX_ERR_INTR(intr_status))
605 bna_mbox_handler(&bnad->bna, intr_status);
606
607 spin_unlock_irqrestore(&bnad->bna_lock, flags);
608
8b230ed8
RM
609 return IRQ_HANDLED;
610}
611
612static irqreturn_t
613bnad_isr(int irq, void *data)
614{
615 int i, j;
616 u32 intr_status;
617 unsigned long flags;
be7fa326 618 struct bnad *bnad = (struct bnad *)data;
8b230ed8
RM
619 struct bnad_rx_info *rx_info;
620 struct bnad_rx_ctrl *rx_ctrl;
621
e2fa6f2e
RM
622 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
623 return IRQ_NONE;
8b230ed8
RM
624
625 bna_intr_status_get(&bnad->bna, intr_status);
e2fa6f2e
RM
626
627 if (unlikely(!intr_status))
8b230ed8 628 return IRQ_NONE;
e2fa6f2e
RM
629
630 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 631
be7fa326 632 if (BNA_IS_MBOX_ERR_INTR(intr_status))
8b230ed8 633 bna_mbox_handler(&bnad->bna, intr_status);
be7fa326 634
8b230ed8
RM
635 spin_unlock_irqrestore(&bnad->bna_lock, flags);
636
be7fa326
RM
637 if (!BNA_IS_INTX_DATA_INTR(intr_status))
638 return IRQ_HANDLED;
639
8b230ed8 640 /* Process data interrupts */
be7fa326
RM
641 /* Tx processing */
642 for (i = 0; i < bnad->num_tx; i++) {
643 for (j = 0; j < bnad->num_txq_per_tx; j++)
644 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
645 }
646 /* Rx processing */
8b230ed8
RM
647 for (i = 0; i < bnad->num_rx; i++) {
648 rx_info = &bnad->rx_info[i];
649 if (!rx_info->rx)
650 continue;
651 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
652 rx_ctrl = &rx_info->rx_ctrl[j];
653 if (rx_ctrl->ccb)
654 bnad_netif_rx_schedule_poll(bnad,
655 rx_ctrl->ccb);
656 }
657 }
8b230ed8
RM
658 return IRQ_HANDLED;
659}
660
661/*
662 * Called in interrupt / callback context
663 * with bna_lock held, so cfg_flags access is OK
664 */
665static void
666bnad_enable_mbox_irq(struct bnad *bnad)
667{
be7fa326 668 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
e2fa6f2e 669
8b230ed8
RM
670 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
671}
672
673/*
674 * Called with bnad->bna_lock held b'cos of
675 * bnad->cfg_flags access.
676 */
b7ee31c5 677static void
8b230ed8
RM
678bnad_disable_mbox_irq(struct bnad *bnad)
679{
be7fa326 680 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
8b230ed8 681
be7fa326
RM
682 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
683}
8b230ed8 684
be7fa326
RM
685static void
686bnad_set_netdev_perm_addr(struct bnad *bnad)
687{
688 struct net_device *netdev = bnad->netdev;
e2fa6f2e 689
be7fa326
RM
690 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
691 if (is_zero_ether_addr(netdev->dev_addr))
692 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
8b230ed8
RM
693}
694
695/* Control Path Handlers */
696
697/* Callbacks */
698void
699bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
700{
701 bnad_enable_mbox_irq(bnad);
702}
703
704void
705bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
706{
707 bnad_disable_mbox_irq(bnad);
708}
709
710void
711bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
712{
713 complete(&bnad->bnad_completions.ioc_comp);
714 bnad->bnad_completions.ioc_comp_status = status;
715}
716
717void
718bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
719{
720 complete(&bnad->bnad_completions.ioc_comp);
721 bnad->bnad_completions.ioc_comp_status = status;
722}
723
724static void
725bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
726{
727 struct bnad *bnad = (struct bnad *)arg;
728
729 complete(&bnad->bnad_completions.port_comp);
730
731 netif_carrier_off(bnad->netdev);
732}
733
734void
735bnad_cb_port_link_status(struct bnad *bnad,
736 enum bna_link_status link_status)
737{
738 bool link_up = 0;
739
740 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
741
742 if (link_status == BNA_CEE_UP) {
743 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
744 BNAD_UPDATE_CTR(bnad, cee_up);
745 } else
746 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
747
748 if (link_up) {
749 if (!netif_carrier_ok(bnad->netdev)) {
be7fa326
RM
750 struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
751 if (!tcb)
752 return;
8b230ed8
RM
753 pr_warn("bna: %s link up\n",
754 bnad->netdev->name);
755 netif_carrier_on(bnad->netdev);
756 BNAD_UPDATE_CTR(bnad, link_toggle);
be7fa326 757 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
8b230ed8
RM
758 /* Force an immediate Transmit Schedule */
759 pr_info("bna: %s TX_STARTED\n",
760 bnad->netdev->name);
761 netif_wake_queue(bnad->netdev);
762 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
763 } else {
764 netif_stop_queue(bnad->netdev);
765 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
766 }
767 }
768 } else {
769 if (netif_carrier_ok(bnad->netdev)) {
770 pr_warn("bna: %s link down\n",
771 bnad->netdev->name);
772 netif_carrier_off(bnad->netdev);
773 BNAD_UPDATE_CTR(bnad, link_toggle);
774 }
775 }
776}
777
778static void
779bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
780 enum bna_cb_status status)
781{
782 struct bnad *bnad = (struct bnad *)arg;
783
784 complete(&bnad->bnad_completions.tx_comp);
785}
786
787static void
788bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
789{
790 struct bnad_tx_info *tx_info =
791 (struct bnad_tx_info *)tcb->txq->tx->priv;
792 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
793
794 tx_info->tcb[tcb->id] = tcb;
795 unmap_q->producer_index = 0;
796 unmap_q->consumer_index = 0;
797 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
798}
799
800static void
801bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
802{
803 struct bnad_tx_info *tx_info =
804 (struct bnad_tx_info *)tcb->txq->tx->priv;
be7fa326
RM
805 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
806
807 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
808 cpu_relax();
809
810 bnad_free_all_txbufs(bnad, tcb);
811
812 unmap_q->producer_index = 0;
813 unmap_q->consumer_index = 0;
814
815 smp_mb__before_clear_bit();
816 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
8b230ed8
RM
817
818 tx_info->tcb[tcb->id] = NULL;
819}
820
821static void
822bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
823{
824 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
825
826 unmap_q->producer_index = 0;
827 unmap_q->consumer_index = 0;
828 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
829}
830
be7fa326
RM
831static void
832bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
833{
834 bnad_free_all_rxbufs(bnad, rcb);
835}
836
8b230ed8
RM
837static void
838bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
839{
840 struct bnad_rx_info *rx_info =
841 (struct bnad_rx_info *)ccb->cq->rx->priv;
842
843 rx_info->rx_ctrl[ccb->id].ccb = ccb;
844 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
845}
846
847static void
848bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
849{
850 struct bnad_rx_info *rx_info =
851 (struct bnad_rx_info *)ccb->cq->rx->priv;
852
853 rx_info->rx_ctrl[ccb->id].ccb = NULL;
854}
855
856static void
857bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
858{
859 struct bnad_tx_info *tx_info =
860 (struct bnad_tx_info *)tcb->txq->tx->priv;
861
862 if (tx_info != &bnad->tx_info[0])
863 return;
864
be7fa326 865 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
8b230ed8
RM
866 netif_stop_queue(bnad->netdev);
867 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
868}
869
870static void
871bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
872{
be7fa326 873 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
8b230ed8 874
be7fa326 875 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
8b230ed8
RM
876 return;
877
be7fa326 878 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
8b230ed8 879
be7fa326
RM
880 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
881 cpu_relax();
8b230ed8
RM
882
883 bnad_free_all_txbufs(bnad, tcb);
884
885 unmap_q->producer_index = 0;
886 unmap_q->consumer_index = 0;
887
888 smp_mb__before_clear_bit();
889 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
be7fa326
RM
890
891 /*
892 * Workaround for first device enable failure & we
893 * get a 0 MAC address. We try to get the MAC address
894 * again here.
895 */
896 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
897 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
898 bnad_set_netdev_perm_addr(bnad);
899 }
900
901 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
902
903 if (netif_carrier_ok(bnad->netdev)) {
904 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
905 netif_wake_queue(bnad->netdev);
906 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
907 }
908}
909
910static void
911bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
912{
913 /* Delay only once for the whole Tx Path Shutdown */
914 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
915 mdelay(BNAD_TXRX_SYNC_MDELAY);
8b230ed8
RM
916}
917
918static void
919bnad_cb_rx_cleanup(struct bnad *bnad,
920 struct bna_ccb *ccb)
921{
8b230ed8
RM
922 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
923
be7fa326 924 if (ccb->rcb[1])
8b230ed8 925 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
be7fa326
RM
926
927 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
928 mdelay(BNAD_TXRX_SYNC_MDELAY);
8b230ed8
RM
929}
930
931static void
932bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
933{
934 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
935
be7fa326
RM
936 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
937
938 if (rcb == rcb->cq->ccb->rcb[0])
939 bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
940
941 bnad_free_all_rxbufs(bnad, rcb);
942
8b230ed8
RM
943 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
944
945 /* Now allocate & post buffers for this RCB */
946 /* !!Allocation in callback context */
947 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
948 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
949 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
950 bnad_alloc_n_post_rxbufs(bnad, rcb);
951 smp_mb__before_clear_bit();
952 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
953 }
954}
955
956static void
957bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
958 enum bna_cb_status status)
959{
960 struct bnad *bnad = (struct bnad *)arg;
961
962 complete(&bnad->bnad_completions.rx_comp);
963}
964
965static void
966bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
967 enum bna_cb_status status)
968{
969 bnad->bnad_completions.mcast_comp_status = status;
970 complete(&bnad->bnad_completions.mcast_comp);
971}
972
973void
974bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
975 struct bna_stats *stats)
976{
977 if (status == BNA_CB_SUCCESS)
978 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
979
980 if (!netif_running(bnad->netdev) ||
981 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
982 return;
983
984 mod_timer(&bnad->stats_timer,
985 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
986}
987
8b230ed8
RM
988/* Resource allocation, free functions */
989
990static void
991bnad_mem_free(struct bnad *bnad,
992 struct bna_mem_info *mem_info)
993{
994 int i;
995 dma_addr_t dma_pa;
996
997 if (mem_info->mdl == NULL)
998 return;
999
1000 for (i = 0; i < mem_info->num; i++) {
1001 if (mem_info->mdl[i].kva != NULL) {
1002 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1003 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1004 dma_pa);
1005 pci_free_consistent(bnad->pcidev,
1006 mem_info->mdl[i].len,
1007 mem_info->mdl[i].kva, dma_pa);
1008 } else
1009 kfree(mem_info->mdl[i].kva);
1010 }
1011 }
1012 kfree(mem_info->mdl);
1013 mem_info->mdl = NULL;
1014}
1015
1016static int
1017bnad_mem_alloc(struct bnad *bnad,
1018 struct bna_mem_info *mem_info)
1019{
1020 int i;
1021 dma_addr_t dma_pa;
1022
1023 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1024 mem_info->mdl = NULL;
1025 return 0;
1026 }
1027
1028 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1029 GFP_KERNEL);
1030 if (mem_info->mdl == NULL)
1031 return -ENOMEM;
1032
1033 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1034 for (i = 0; i < mem_info->num; i++) {
1035 mem_info->mdl[i].len = mem_info->len;
1036 mem_info->mdl[i].kva =
1037 pci_alloc_consistent(bnad->pcidev,
1038 mem_info->len, &dma_pa);
1039
1040 if (mem_info->mdl[i].kva == NULL)
1041 goto err_return;
1042
1043 BNA_SET_DMA_ADDR(dma_pa,
1044 &(mem_info->mdl[i].dma));
1045 }
1046 } else {
1047 for (i = 0; i < mem_info->num; i++) {
1048 mem_info->mdl[i].len = mem_info->len;
1049 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1050 GFP_KERNEL);
1051 if (mem_info->mdl[i].kva == NULL)
1052 goto err_return;
1053 }
1054 }
1055
1056 return 0;
1057
1058err_return:
1059 bnad_mem_free(bnad, mem_info);
1060 return -ENOMEM;
1061}
1062
1063/* Free IRQ for Mailbox */
1064static void
1065bnad_mbox_irq_free(struct bnad *bnad,
1066 struct bna_intr_info *intr_info)
1067{
1068 int irq;
1069 unsigned long flags;
1070
1071 if (intr_info->idl == NULL)
1072 return;
1073
1074 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 1075 bnad_disable_mbox_irq(bnad);
e2fa6f2e 1076 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1077
1078 irq = BNAD_GET_MBOX_IRQ(bnad);
be7fa326 1079 free_irq(irq, bnad);
8b230ed8 1080
8b230ed8
RM
1081 kfree(intr_info->idl);
1082}
1083
1084/*
1085 * Allocates IRQ for Mailbox, but keep it disabled
1086 * This will be enabled once we get the mbox enable callback
1087 * from bna
1088 */
1089static int
1090bnad_mbox_irq_alloc(struct bnad *bnad,
1091 struct bna_intr_info *intr_info)
1092{
be7fa326 1093 int err = 0;
8b230ed8
RM
1094 unsigned long flags;
1095 u32 irq;
1096 irq_handler_t irq_handler;
1097
1098 /* Mbox should use only 1 vector */
1099
1100 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1101 if (!intr_info->idl)
1102 return -ENOMEM;
1103
1104 spin_lock_irqsave(&bnad->bna_lock, flags);
1105 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1106 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1107 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1108 flags = 0;
1109 intr_info->intr_type = BNA_INTR_T_MSIX;
1110 intr_info->idl[0].vector = bnad->msix_num - 1;
1111 } else {
1112 irq_handler = (irq_handler_t)bnad_isr;
1113 irq = bnad->pcidev->irq;
1114 flags = IRQF_SHARED;
1115 intr_info->intr_type = BNA_INTR_T_INTX;
1116 /* intr_info->idl.vector = 0 ? */
1117 }
1118 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1119
1120 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1121
e2fa6f2e
RM
1122 /*
1123 * Set the Mbox IRQ disable flag, so that the IRQ handler
1124 * called from request_irq() for SHARED IRQs do not execute
1125 */
1126 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1127
be7fa326
RM
1128 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1129
8b230ed8 1130 err = request_irq(irq, irq_handler, flags,
be7fa326 1131 bnad->mbox_irq_name, bnad);
e2fa6f2e 1132
8b230ed8
RM
1133 if (err) {
1134 kfree(intr_info->idl);
1135 intr_info->idl = NULL;
8b230ed8
RM
1136 }
1137
be7fa326 1138 return err;
8b230ed8
RM
1139}
1140
1141static void
1142bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1143{
1144 kfree(intr_info->idl);
1145 intr_info->idl = NULL;
1146}
1147
1148/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1149static int
1150bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1151 uint txrx_id, struct bna_intr_info *intr_info)
1152{
1153 int i, vector_start = 0;
1154 u32 cfg_flags;
1155 unsigned long flags;
1156
1157 spin_lock_irqsave(&bnad->bna_lock, flags);
1158 cfg_flags = bnad->cfg_flags;
1159 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1160
1161 if (cfg_flags & BNAD_CF_MSIX) {
1162 intr_info->intr_type = BNA_INTR_T_MSIX;
1163 intr_info->idl = kcalloc(intr_info->num,
1164 sizeof(struct bna_intr_descr),
1165 GFP_KERNEL);
1166 if (!intr_info->idl)
1167 return -ENOMEM;
1168
1169 switch (src) {
1170 case BNAD_INTR_TX:
1171 vector_start = txrx_id;
1172 break;
1173
1174 case BNAD_INTR_RX:
1175 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1176 txrx_id;
1177 break;
1178
1179 default:
1180 BUG();
1181 }
1182
1183 for (i = 0; i < intr_info->num; i++)
1184 intr_info->idl[i].vector = vector_start + i;
1185 } else {
1186 intr_info->intr_type = BNA_INTR_T_INTX;
1187 intr_info->num = 1;
1188 intr_info->idl = kcalloc(intr_info->num,
1189 sizeof(struct bna_intr_descr),
1190 GFP_KERNEL);
1191 if (!intr_info->idl)
1192 return -ENOMEM;
1193
1194 switch (src) {
1195 case BNAD_INTR_TX:
1196 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1197 break;
1198
1199 case BNAD_INTR_RX:
1200 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1201 break;
1202 }
1203 }
1204 return 0;
1205}
1206
1207/**
1208 * NOTE: Should be called for MSIX only
1209 * Unregisters Tx MSIX vector(s) from the kernel
1210 */
1211static void
1212bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1213 int num_txqs)
1214{
1215 int i;
1216 int vector_num;
1217
1218 for (i = 0; i < num_txqs; i++) {
1219 if (tx_info->tcb[i] == NULL)
1220 continue;
1221
1222 vector_num = tx_info->tcb[i]->intr_vector;
1223 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1224 }
1225}
1226
1227/**
1228 * NOTE: Should be called for MSIX only
1229 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1230 */
1231static int
1232bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1233 uint tx_id, int num_txqs)
1234{
1235 int i;
1236 int err;
1237 int vector_num;
1238
1239 for (i = 0; i < num_txqs; i++) {
1240 vector_num = tx_info->tcb[i]->intr_vector;
1241 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1242 tx_id + tx_info->tcb[i]->id);
1243 err = request_irq(bnad->msix_table[vector_num].vector,
1244 (irq_handler_t)bnad_msix_tx, 0,
1245 tx_info->tcb[i]->name,
1246 tx_info->tcb[i]);
1247 if (err)
1248 goto err_return;
1249 }
1250
1251 return 0;
1252
1253err_return:
1254 if (i > 0)
1255 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1256 return -1;
1257}
1258
1259/**
1260 * NOTE: Should be called for MSIX only
1261 * Unregisters Rx MSIX vector(s) from the kernel
1262 */
1263static void
1264bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1265 int num_rxps)
1266{
1267 int i;
1268 int vector_num;
1269
1270 for (i = 0; i < num_rxps; i++) {
1271 if (rx_info->rx_ctrl[i].ccb == NULL)
1272 continue;
1273
1274 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1275 free_irq(bnad->msix_table[vector_num].vector,
1276 rx_info->rx_ctrl[i].ccb);
1277 }
1278}
1279
1280/**
1281 * NOTE: Should be called for MSIX only
1282 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1283 */
1284static int
1285bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1286 uint rx_id, int num_rxps)
1287{
1288 int i;
1289 int err;
1290 int vector_num;
1291
1292 for (i = 0; i < num_rxps; i++) {
1293 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1294 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1295 bnad->netdev->name,
1296 rx_id + rx_info->rx_ctrl[i].ccb->id);
1297 err = request_irq(bnad->msix_table[vector_num].vector,
1298 (irq_handler_t)bnad_msix_rx, 0,
1299 rx_info->rx_ctrl[i].ccb->name,
1300 rx_info->rx_ctrl[i].ccb);
1301 if (err)
1302 goto err_return;
1303 }
1304
1305 return 0;
1306
1307err_return:
1308 if (i > 0)
1309 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1310 return -1;
1311}
1312
1313/* Free Tx object Resources */
1314static void
1315bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1316{
1317 int i;
1318
1319 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1320 if (res_info[i].res_type == BNA_RES_T_MEM)
1321 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1322 else if (res_info[i].res_type == BNA_RES_T_INTR)
1323 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1324 }
1325}
1326
1327/* Allocates memory and interrupt resources for Tx object */
1328static int
1329bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1330 uint tx_id)
1331{
1332 int i, err = 0;
1333
1334 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1335 if (res_info[i].res_type == BNA_RES_T_MEM)
1336 err = bnad_mem_alloc(bnad,
1337 &res_info[i].res_u.mem_info);
1338 else if (res_info[i].res_type == BNA_RES_T_INTR)
1339 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1340 &res_info[i].res_u.intr_info);
1341 if (err)
1342 goto err_return;
1343 }
1344 return 0;
1345
1346err_return:
1347 bnad_tx_res_free(bnad, res_info);
1348 return err;
1349}
1350
1351/* Free Rx object Resources */
1352static void
1353bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1354{
1355 int i;
1356
1357 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1358 if (res_info[i].res_type == BNA_RES_T_MEM)
1359 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1360 else if (res_info[i].res_type == BNA_RES_T_INTR)
1361 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1362 }
1363}
1364
1365/* Allocates memory and interrupt resources for Rx object */
1366static int
1367bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1368 uint rx_id)
1369{
1370 int i, err = 0;
1371
1372 /* All memory needs to be allocated before setup_ccbs */
1373 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1374 if (res_info[i].res_type == BNA_RES_T_MEM)
1375 err = bnad_mem_alloc(bnad,
1376 &res_info[i].res_u.mem_info);
1377 else if (res_info[i].res_type == BNA_RES_T_INTR)
1378 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1379 &res_info[i].res_u.intr_info);
1380 if (err)
1381 goto err_return;
1382 }
1383 return 0;
1384
1385err_return:
1386 bnad_rx_res_free(bnad, res_info);
1387 return err;
1388}
1389
1390/* Timer callbacks */
1391/* a) IOC timer */
1392static void
1393bnad_ioc_timeout(unsigned long data)
1394{
1395 struct bnad *bnad = (struct bnad *)data;
1396 unsigned long flags;
1397
1398 spin_lock_irqsave(&bnad->bna_lock, flags);
8a891429 1399 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
8b230ed8
RM
1400 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1401}
1402
1403static void
1404bnad_ioc_hb_check(unsigned long data)
1405{
1406 struct bnad *bnad = (struct bnad *)data;
1407 unsigned long flags;
1408
1409 spin_lock_irqsave(&bnad->bna_lock, flags);
8a891429 1410 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
8b230ed8
RM
1411 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1412}
1413
1414static void
1415bnad_ioc_sem_timeout(unsigned long data)
1416{
1417 struct bnad *bnad = (struct bnad *)data;
1418 unsigned long flags;
1419
1420 spin_lock_irqsave(&bnad->bna_lock, flags);
8a891429 1421 bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
8b230ed8
RM
1422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423}
1424
1425/*
1426 * All timer routines use bnad->bna_lock to protect against
1427 * the following race, which may occur in case of no locking:
1428 * Time CPU m CPU n
1429 * 0 1 = test_bit
1430 * 1 clear_bit
1431 * 2 del_timer_sync
1432 * 3 mod_timer
1433 */
1434
1435/* b) Dynamic Interrupt Moderation Timer */
1436static void
1437bnad_dim_timeout(unsigned long data)
1438{
1439 struct bnad *bnad = (struct bnad *)data;
1440 struct bnad_rx_info *rx_info;
1441 struct bnad_rx_ctrl *rx_ctrl;
1442 int i, j;
1443 unsigned long flags;
1444
1445 if (!netif_carrier_ok(bnad->netdev))
1446 return;
1447
1448 spin_lock_irqsave(&bnad->bna_lock, flags);
1449 for (i = 0; i < bnad->num_rx; i++) {
1450 rx_info = &bnad->rx_info[i];
1451 if (!rx_info->rx)
1452 continue;
1453 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1454 rx_ctrl = &rx_info->rx_ctrl[j];
1455 if (!rx_ctrl->ccb)
1456 continue;
1457 bna_rx_dim_update(rx_ctrl->ccb);
1458 }
1459 }
1460
1461 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1462 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1463 mod_timer(&bnad->dim_timer,
1464 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1465 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1466}
1467
1468/* c) Statistics Timer */
1469static void
1470bnad_stats_timeout(unsigned long data)
1471{
1472 struct bnad *bnad = (struct bnad *)data;
1473 unsigned long flags;
1474
1475 if (!netif_running(bnad->netdev) ||
1476 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1477 return;
1478
1479 spin_lock_irqsave(&bnad->bna_lock, flags);
1480 bna_stats_get(&bnad->bna);
1481 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1482}
1483
1484/*
1485 * Set up timer for DIM
1486 * Called with bnad->bna_lock held
1487 */
1488void
1489bnad_dim_timer_start(struct bnad *bnad)
1490{
1491 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1492 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1493 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1494 (unsigned long)bnad);
1495 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1496 mod_timer(&bnad->dim_timer,
1497 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1498 }
1499}
1500
1501/*
1502 * Set up timer for statistics
1503 * Called with mutex_lock(&bnad->conf_mutex) held
1504 */
1505static void
1506bnad_stats_timer_start(struct bnad *bnad)
1507{
1508 unsigned long flags;
1509
1510 spin_lock_irqsave(&bnad->bna_lock, flags);
1511 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1512 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1513 (unsigned long)bnad);
1514 mod_timer(&bnad->stats_timer,
1515 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1516 }
1517 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1518}
1519
1520/*
1521 * Stops the stats timer
1522 * Called with mutex_lock(&bnad->conf_mutex) held
1523 */
1524static void
1525bnad_stats_timer_stop(struct bnad *bnad)
1526{
1527 int to_del = 0;
1528 unsigned long flags;
1529
1530 spin_lock_irqsave(&bnad->bna_lock, flags);
1531 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1532 to_del = 1;
1533 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1534 if (to_del)
1535 del_timer_sync(&bnad->stats_timer);
1536}
1537
1538/* Utilities */
1539
1540static void
1541bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1542{
1543 int i = 1; /* Index 0 has broadcast address */
1544 struct netdev_hw_addr *mc_addr;
1545
1546 netdev_for_each_mc_addr(mc_addr, netdev) {
1547 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1548 ETH_ALEN);
1549 i++;
1550 }
1551}
1552
1553static int
1554bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1555{
1556 struct bnad_rx_ctrl *rx_ctrl =
1557 container_of(napi, struct bnad_rx_ctrl, napi);
1558 struct bna_ccb *ccb;
1559 struct bnad *bnad;
1560 int rcvd = 0;
1561
1562 ccb = rx_ctrl->ccb;
1563
1564 bnad = ccb->bnad;
1565
1566 if (!netif_carrier_ok(bnad->netdev))
1567 goto poll_exit;
1568
1569 rcvd = bnad_poll_cq(bnad, ccb, budget);
1570 if (rcvd == budget)
1571 return rcvd;
1572
1573poll_exit:
1574 napi_complete((napi));
1575
1576 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1577
1578 bnad_enable_rx_irq(bnad, ccb);
1579 return rcvd;
1580}
1581
8b230ed8
RM
1582static void
1583bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1584{
8b230ed8
RM
1585 struct bnad_rx_ctrl *rx_ctrl;
1586 int i;
8b230ed8
RM
1587
1588 /* Initialize & enable NAPI */
1589 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1590 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
be7fa326 1591
8b230ed8 1592 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
be7fa326
RM
1593 bnad_napi_poll_rx, 64);
1594
8b230ed8
RM
1595 napi_enable(&rx_ctrl->napi);
1596 }
1597}
1598
1599static void
1600bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1601{
1602 int i;
1603
1604 /* First disable and then clean up */
1605 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1606 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1607 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1608 }
1609}
1610
1611/* Should be held with conf_lock held */
1612void
1613bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1614{
1615 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1616 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1617 unsigned long flags;
1618
1619 if (!tx_info->tx)
1620 return;
1621
1622 init_completion(&bnad->bnad_completions.tx_comp);
1623 spin_lock_irqsave(&bnad->bna_lock, flags);
1624 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1625 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1626 wait_for_completion(&bnad->bnad_completions.tx_comp);
1627
1628 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1629 bnad_tx_msix_unregister(bnad, tx_info,
1630 bnad->num_txq_per_tx);
1631
1632 spin_lock_irqsave(&bnad->bna_lock, flags);
1633 bna_tx_destroy(tx_info->tx);
1634 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1635
1636 tx_info->tx = NULL;
1637
1638 if (0 == tx_id)
1639 tasklet_kill(&bnad->tx_free_tasklet);
1640
1641 bnad_tx_res_free(bnad, res_info);
1642}
1643
1644/* Should be held with conf_lock held */
1645int
1646bnad_setup_tx(struct bnad *bnad, uint tx_id)
1647{
1648 int err;
1649 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1650 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1651 struct bna_intr_info *intr_info =
1652 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1653 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1654 struct bna_tx_event_cbfn tx_cbfn;
1655 struct bna_tx *tx;
1656 unsigned long flags;
1657
1658 /* Initialize the Tx object configuration */
1659 tx_config->num_txq = bnad->num_txq_per_tx;
1660 tx_config->txq_depth = bnad->txq_depth;
1661 tx_config->tx_type = BNA_TX_T_REGULAR;
1662
1663 /* Initialize the tx event handlers */
1664 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1665 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1666 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1667 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1668 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1669
1670 /* Get BNA's resource requirement for one tx object */
1671 spin_lock_irqsave(&bnad->bna_lock, flags);
1672 bna_tx_res_req(bnad->num_txq_per_tx,
1673 bnad->txq_depth, res_info);
1674 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1675
1676 /* Fill Unmap Q memory requirements */
1677 BNAD_FILL_UNMAPQ_MEM_REQ(
1678 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1679 bnad->num_txq_per_tx,
1680 BNAD_TX_UNMAPQ_DEPTH);
1681
1682 /* Allocate resources */
1683 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1684 if (err)
1685 return err;
1686
1687 /* Ask BNA to create one Tx object, supplying required resources */
1688 spin_lock_irqsave(&bnad->bna_lock, flags);
1689 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1690 tx_info);
1691 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1692 if (!tx)
1693 goto err_return;
1694 tx_info->tx = tx;
1695
1696 /* Register ISR for the Tx object */
1697 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1698 err = bnad_tx_msix_register(bnad, tx_info,
1699 tx_id, bnad->num_txq_per_tx);
1700 if (err)
1701 goto err_return;
1702 }
1703
1704 spin_lock_irqsave(&bnad->bna_lock, flags);
1705 bna_tx_enable(tx);
1706 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1707
1708 return 0;
1709
1710err_return:
1711 bnad_tx_res_free(bnad, res_info);
1712 return err;
1713}
1714
1715/* Setup the rx config for bna_rx_create */
1716/* bnad decides the configuration */
1717static void
1718bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1719{
1720 rx_config->rx_type = BNA_RX_T_REGULAR;
1721 rx_config->num_paths = bnad->num_rxp_per_rx;
1722
1723 if (bnad->num_rxp_per_rx > 1) {
1724 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1725 rx_config->rss_config.hash_type =
1726 (BFI_RSS_T_V4_TCP |
1727 BFI_RSS_T_V6_TCP |
1728 BFI_RSS_T_V4_IP |
1729 BFI_RSS_T_V6_IP);
1730 rx_config->rss_config.hash_mask =
1731 bnad->num_rxp_per_rx - 1;
1732 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1733 sizeof(rx_config->rss_config.toeplitz_hash_key));
1734 } else {
1735 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1736 memset(&rx_config->rss_config, 0,
1737 sizeof(rx_config->rss_config));
1738 }
1739 rx_config->rxp_type = BNA_RXP_SLR;
1740 rx_config->q_depth = bnad->rxq_depth;
1741
1742 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1743
1744 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1745}
1746
1747/* Called with mutex_lock(&bnad->conf_mutex) held */
1748void
1749bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1750{
1751 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1752 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1753 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1754 unsigned long flags;
1755 int dim_timer_del = 0;
1756
1757 if (!rx_info->rx)
1758 return;
1759
1760 if (0 == rx_id) {
1761 spin_lock_irqsave(&bnad->bna_lock, flags);
1762 dim_timer_del = bnad_dim_timer_running(bnad);
1763 if (dim_timer_del)
1764 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1765 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1766 if (dim_timer_del)
1767 del_timer_sync(&bnad->dim_timer);
1768 }
1769
1770 bnad_napi_disable(bnad, rx_id);
1771
1772 init_completion(&bnad->bnad_completions.rx_comp);
1773 spin_lock_irqsave(&bnad->bna_lock, flags);
1774 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1775 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1776 wait_for_completion(&bnad->bnad_completions.rx_comp);
1777
1778 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1779 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1780
1781 spin_lock_irqsave(&bnad->bna_lock, flags);
1782 bna_rx_destroy(rx_info->rx);
1783 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1784
1785 rx_info->rx = NULL;
1786
1787 bnad_rx_res_free(bnad, res_info);
1788}
1789
1790/* Called with mutex_lock(&bnad->conf_mutex) held */
1791int
1792bnad_setup_rx(struct bnad *bnad, uint rx_id)
1793{
1794 int err;
1795 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1796 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1797 struct bna_intr_info *intr_info =
1798 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1799 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1800 struct bna_rx_event_cbfn rx_cbfn;
1801 struct bna_rx *rx;
1802 unsigned long flags;
1803
1804 /* Initialize the Rx object configuration */
1805 bnad_init_rx_config(bnad, rx_config);
1806
1807 /* Initialize the Rx event handlers */
1808 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
be7fa326 1809 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
8b230ed8
RM
1810 rx_cbfn.rcb_destroy_cbfn = NULL;
1811 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1812 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1813 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1814 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1815
1816 /* Get BNA's resource requirement for one Rx object */
1817 spin_lock_irqsave(&bnad->bna_lock, flags);
1818 bna_rx_res_req(rx_config, res_info);
1819 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1820
1821 /* Fill Unmap Q memory requirements */
1822 BNAD_FILL_UNMAPQ_MEM_REQ(
1823 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1824 rx_config->num_paths +
1825 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1826 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1827
1828 /* Allocate resource */
1829 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1830 if (err)
1831 return err;
1832
1833 /* Ask BNA to create one Rx object, supplying required resources */
1834 spin_lock_irqsave(&bnad->bna_lock, flags);
1835 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1836 rx_info);
1837 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1838 if (!rx)
1839 goto err_return;
1840 rx_info->rx = rx;
1841
1842 /* Register ISR for the Rx object */
1843 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1844 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1845 rx_config->num_paths);
1846 if (err)
1847 goto err_return;
1848 }
1849
1850 /* Enable NAPI */
1851 bnad_napi_enable(bnad, rx_id);
1852
1853 spin_lock_irqsave(&bnad->bna_lock, flags);
1854 if (0 == rx_id) {
1855 /* Set up Dynamic Interrupt Moderation Vector */
1856 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1857 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1858
1859 /* Enable VLAN filtering only on the default Rx */
1860 bna_rx_vlanfilter_enable(rx);
1861
1862 /* Start the DIM timer */
1863 bnad_dim_timer_start(bnad);
1864 }
1865
1866 bna_rx_enable(rx);
1867 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1868
1869 return 0;
1870
1871err_return:
1872 bnad_cleanup_rx(bnad, rx_id);
1873 return err;
1874}
1875
1876/* Called with conf_lock & bnad->bna_lock held */
1877void
1878bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1879{
1880 struct bnad_tx_info *tx_info;
1881
1882 tx_info = &bnad->tx_info[0];
1883 if (!tx_info->tx)
1884 return;
1885
1886 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1887}
1888
1889/* Called with conf_lock & bnad->bna_lock held */
1890void
1891bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1892{
1893 struct bnad_rx_info *rx_info;
1894 int i;
1895
1896 for (i = 0; i < bnad->num_rx; i++) {
1897 rx_info = &bnad->rx_info[i];
1898 if (!rx_info->rx)
1899 continue;
1900 bna_rx_coalescing_timeo_set(rx_info->rx,
1901 bnad->rx_coalescing_timeo);
1902 }
1903}
1904
1905/*
1906 * Called with bnad->bna_lock held
1907 */
1908static int
1909bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1910{
1911 int ret;
1912
1913 if (!is_valid_ether_addr(mac_addr))
1914 return -EADDRNOTAVAIL;
1915
1916 /* If datapath is down, pretend everything went through */
1917 if (!bnad->rx_info[0].rx)
1918 return 0;
1919
1920 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1921 if (ret != BNA_CB_SUCCESS)
1922 return -EADDRNOTAVAIL;
1923
1924 return 0;
1925}
1926
1927/* Should be called with conf_lock held */
1928static int
1929bnad_enable_default_bcast(struct bnad *bnad)
1930{
1931 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1932 int ret;
1933 unsigned long flags;
1934
1935 init_completion(&bnad->bnad_completions.mcast_comp);
1936
1937 spin_lock_irqsave(&bnad->bna_lock, flags);
1938 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1939 bnad_cb_rx_mcast_add);
1940 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1941
1942 if (ret == BNA_CB_SUCCESS)
1943 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1944 else
1945 return -ENODEV;
1946
1947 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1948 return -ENODEV;
1949
1950 return 0;
1951}
1952
1953/* Statistics utilities */
1954void
250e061e 1955bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 1956{
8b230ed8
RM
1957 int i, j;
1958
1959 for (i = 0; i < bnad->num_rx; i++) {
1960 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1961 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
250e061e 1962 stats->rx_packets += bnad->rx_info[i].
8b230ed8 1963 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
250e061e 1964 stats->rx_bytes += bnad->rx_info[i].
8b230ed8
RM
1965 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
1966 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1967 bnad->rx_info[i].rx_ctrl[j].ccb->
1968 rcb[1]->rxq) {
250e061e 1969 stats->rx_packets +=
8b230ed8
RM
1970 bnad->rx_info[i].rx_ctrl[j].
1971 ccb->rcb[1]->rxq->rx_packets;
250e061e 1972 stats->rx_bytes +=
8b230ed8
RM
1973 bnad->rx_info[i].rx_ctrl[j].
1974 ccb->rcb[1]->rxq->rx_bytes;
1975 }
1976 }
1977 }
1978 }
1979 for (i = 0; i < bnad->num_tx; i++) {
1980 for (j = 0; j < bnad->num_txq_per_tx; j++) {
1981 if (bnad->tx_info[i].tcb[j]) {
250e061e 1982 stats->tx_packets +=
8b230ed8 1983 bnad->tx_info[i].tcb[j]->txq->tx_packets;
250e061e 1984 stats->tx_bytes +=
8b230ed8
RM
1985 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
1986 }
1987 }
1988 }
1989}
1990
1991/*
1992 * Must be called with the bna_lock held.
1993 */
1994void
250e061e 1995bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8
RM
1996{
1997 struct bfi_ll_stats_mac *mac_stats;
8b230ed8
RM
1998 u64 bmap;
1999 int i;
2000
2001 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
250e061e 2002 stats->rx_errors =
8b230ed8
RM
2003 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2004 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2005 mac_stats->rx_undersize;
250e061e 2006 stats->tx_errors = mac_stats->tx_fcs_error +
8b230ed8 2007 mac_stats->tx_undersize;
250e061e
ED
2008 stats->rx_dropped = mac_stats->rx_drop;
2009 stats->tx_dropped = mac_stats->tx_drop;
2010 stats->multicast = mac_stats->rx_multicast;
2011 stats->collisions = mac_stats->tx_total_collision;
8b230ed8 2012
250e061e 2013 stats->rx_length_errors = mac_stats->rx_frame_length_error;
8b230ed8
RM
2014
2015 /* receive ring buffer overflow ?? */
2016
250e061e
ED
2017 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2018 stats->rx_frame_errors = mac_stats->rx_alignment_error;
8b230ed8
RM
2019 /* recv'r fifo overrun */
2020 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2021 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2022 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2023 if (bmap & 1) {
250e061e 2024 stats->rx_fifo_errors +=
8b230ed8
RM
2025 bnad->stats.bna_stats->
2026 hw_stats->rxf_stats[i].frame_drops;
2027 break;
2028 }
2029 bmap >>= 1;
2030 }
2031}
2032
2033static void
2034bnad_mbox_irq_sync(struct bnad *bnad)
2035{
2036 u32 irq;
2037 unsigned long flags;
2038
2039 spin_lock_irqsave(&bnad->bna_lock, flags);
2040 if (bnad->cfg_flags & BNAD_CF_MSIX)
2041 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2042 else
2043 irq = bnad->pcidev->irq;
2044 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2045
2046 synchronize_irq(irq);
2047}
2048
2049/* Utility used by bnad_start_xmit, for doing TSO */
2050static int
2051bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2052{
2053 int err;
2054
2055 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2056 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2057 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2058 if (skb_header_cloned(skb)) {
2059 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2060 if (err) {
2061 BNAD_UPDATE_CTR(bnad, tso_err);
2062 return err;
2063 }
2064 }
2065
2066 /*
2067 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2068 * excluding the length field.
2069 */
2070 if (skb->protocol == htons(ETH_P_IP)) {
2071 struct iphdr *iph = ip_hdr(skb);
2072
2073 /* Do we really need these? */
2074 iph->tot_len = 0;
2075 iph->check = 0;
2076
2077 tcp_hdr(skb)->check =
2078 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2079 IPPROTO_TCP, 0);
2080 BNAD_UPDATE_CTR(bnad, tso4);
2081 } else {
2082 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2083
2084 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2085 ipv6h->payload_len = 0;
2086 tcp_hdr(skb)->check =
2087 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2088 IPPROTO_TCP, 0);
2089 BNAD_UPDATE_CTR(bnad, tso6);
2090 }
2091
2092 return 0;
2093}
2094
2095/*
2096 * Initialize Q numbers depending on Rx Paths
2097 * Called with bnad->bna_lock held, because of cfg_flags
2098 * access.
2099 */
2100static void
2101bnad_q_num_init(struct bnad *bnad)
2102{
2103 int rxps;
2104
2105 rxps = min((uint)num_online_cpus(),
2106 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2107
2108 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2109 rxps = 1; /* INTx */
2110
2111 bnad->num_rx = 1;
2112 bnad->num_tx = 1;
2113 bnad->num_rxp_per_rx = rxps;
2114 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2115}
2116
2117/*
2118 * Adjusts the Q numbers, given a number of msix vectors
2119 * Give preference to RSS as opposed to Tx priority Queues,
2120 * in such a case, just use 1 Tx Q
2121 * Called with bnad->bna_lock held b'cos of cfg_flags access
2122 */
2123static void
2124bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2125{
2126 bnad->num_txq_per_tx = 1;
2127 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2128 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2129 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2130 bnad->num_rxp_per_rx = msix_vectors -
2131 (bnad->num_tx * bnad->num_txq_per_tx) -
2132 BNAD_MAILBOX_MSIX_VECTORS;
2133 } else
2134 bnad->num_rxp_per_rx = 1;
2135}
2136
8b230ed8
RM
2137/* Enable / disable device */
2138static void
2139bnad_device_disable(struct bnad *bnad)
2140{
2141 unsigned long flags;
2142
2143 init_completion(&bnad->bnad_completions.ioc_comp);
2144
2145 spin_lock_irqsave(&bnad->bna_lock, flags);
2146 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2147 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2148
2149 wait_for_completion(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
2150}
2151
2152static int
2153bnad_device_enable(struct bnad *bnad)
2154{
2155 int err = 0;
2156 unsigned long flags;
2157
2158 init_completion(&bnad->bnad_completions.ioc_comp);
2159
2160 spin_lock_irqsave(&bnad->bna_lock, flags);
2161 bna_device_enable(&bnad->bna.device);
2162 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2163
2164 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2165
2166 if (bnad->bnad_completions.ioc_comp_status)
2167 err = bnad->bnad_completions.ioc_comp_status;
2168
2169 return err;
2170}
2171
2172/* Free BNA resources */
2173static void
2174bnad_res_free(struct bnad *bnad)
2175{
2176 int i;
2177 struct bna_res_info *res_info = &bnad->res_info[0];
2178
2179 for (i = 0; i < BNA_RES_T_MAX; i++) {
2180 if (res_info[i].res_type == BNA_RES_T_MEM)
2181 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2182 else
2183 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2184 }
2185}
2186
2187/* Allocates memory and interrupt resources for BNA */
2188static int
2189bnad_res_alloc(struct bnad *bnad)
2190{
2191 int i, err;
2192 struct bna_res_info *res_info = &bnad->res_info[0];
2193
2194 for (i = 0; i < BNA_RES_T_MAX; i++) {
2195 if (res_info[i].res_type == BNA_RES_T_MEM)
2196 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2197 else
2198 err = bnad_mbox_irq_alloc(bnad,
2199 &res_info[i].res_u.intr_info);
2200 if (err)
2201 goto err_return;
2202 }
2203 return 0;
2204
2205err_return:
2206 bnad_res_free(bnad);
2207 return err;
2208}
2209
2210/* Interrupt enable / disable */
2211static void
2212bnad_enable_msix(struct bnad *bnad)
2213{
2214 int i, ret;
8b230ed8
RM
2215 unsigned long flags;
2216
2217 spin_lock_irqsave(&bnad->bna_lock, flags);
2218 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2219 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2220 return;
2221 }
2222 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2223
2224 if (bnad->msix_table)
2225 return;
2226
8b230ed8 2227 bnad->msix_table =
b7ee31c5 2228 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
8b230ed8
RM
2229
2230 if (!bnad->msix_table)
2231 goto intx_mode;
2232
b7ee31c5 2233 for (i = 0; i < bnad->msix_num; i++)
8b230ed8
RM
2234 bnad->msix_table[i].entry = i;
2235
b7ee31c5 2236 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
8b230ed8
RM
2237 if (ret > 0) {
2238 /* Not enough MSI-X vectors. */
2239
2240 spin_lock_irqsave(&bnad->bna_lock, flags);
2241 /* ret = #of vectors that we got */
2242 bnad_q_num_adjust(bnad, ret);
2243 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2244
2245 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2246 + (bnad->num_rx
2247 * bnad->num_rxp_per_rx) +
2248 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8
RM
2249
2250 /* Try once more with adjusted numbers */
2251 /* If this fails, fall back to INTx */
2252 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
b7ee31c5 2253 bnad->msix_num);
8b230ed8
RM
2254 if (ret)
2255 goto intx_mode;
2256
2257 } else if (ret < 0)
2258 goto intx_mode;
2259 return;
2260
2261intx_mode:
2262
2263 kfree(bnad->msix_table);
2264 bnad->msix_table = NULL;
2265 bnad->msix_num = 0;
8b230ed8
RM
2266 spin_lock_irqsave(&bnad->bna_lock, flags);
2267 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2268 bnad_q_num_init(bnad);
2269 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2270}
2271
2272static void
2273bnad_disable_msix(struct bnad *bnad)
2274{
2275 u32 cfg_flags;
2276 unsigned long flags;
2277
2278 spin_lock_irqsave(&bnad->bna_lock, flags);
2279 cfg_flags = bnad->cfg_flags;
2280 if (bnad->cfg_flags & BNAD_CF_MSIX)
2281 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2282 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2283
2284 if (cfg_flags & BNAD_CF_MSIX) {
2285 pci_disable_msix(bnad->pcidev);
2286 kfree(bnad->msix_table);
2287 bnad->msix_table = NULL;
2288 }
2289}
2290
2291/* Netdev entry points */
2292static int
2293bnad_open(struct net_device *netdev)
2294{
2295 int err;
2296 struct bnad *bnad = netdev_priv(netdev);
2297 struct bna_pause_config pause_config;
2298 int mtu;
2299 unsigned long flags;
2300
2301 mutex_lock(&bnad->conf_mutex);
2302
2303 /* Tx */
2304 err = bnad_setup_tx(bnad, 0);
2305 if (err)
2306 goto err_return;
2307
2308 /* Rx */
2309 err = bnad_setup_rx(bnad, 0);
2310 if (err)
2311 goto cleanup_tx;
2312
2313 /* Port */
2314 pause_config.tx_pause = 0;
2315 pause_config.rx_pause = 0;
2316
2317 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2318
2319 spin_lock_irqsave(&bnad->bna_lock, flags);
2320 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2321 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2322 bna_port_enable(&bnad->bna.port);
2323 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2324
2325 /* Enable broadcast */
2326 bnad_enable_default_bcast(bnad);
2327
2328 /* Set the UCAST address */
2329 spin_lock_irqsave(&bnad->bna_lock, flags);
2330 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2331 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2332
2333 /* Start the stats timer */
2334 bnad_stats_timer_start(bnad);
2335
2336 mutex_unlock(&bnad->conf_mutex);
2337
2338 return 0;
2339
2340cleanup_tx:
2341 bnad_cleanup_tx(bnad, 0);
2342
2343err_return:
2344 mutex_unlock(&bnad->conf_mutex);
2345 return err;
2346}
2347
2348static int
2349bnad_stop(struct net_device *netdev)
2350{
2351 struct bnad *bnad = netdev_priv(netdev);
2352 unsigned long flags;
2353
2354 mutex_lock(&bnad->conf_mutex);
2355
2356 /* Stop the stats timer */
2357 bnad_stats_timer_stop(bnad);
2358
2359 init_completion(&bnad->bnad_completions.port_comp);
2360
2361 spin_lock_irqsave(&bnad->bna_lock, flags);
2362 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2363 bnad_cb_port_disabled);
2364 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2365
2366 wait_for_completion(&bnad->bnad_completions.port_comp);
2367
2368 bnad_cleanup_tx(bnad, 0);
2369 bnad_cleanup_rx(bnad, 0);
2370
2371 /* Synchronize mailbox IRQ */
2372 bnad_mbox_irq_sync(bnad);
2373
2374 mutex_unlock(&bnad->conf_mutex);
2375
2376 return 0;
2377}
2378
2379/* TX */
2380/*
2381 * bnad_start_xmit : Netdev entry point for Transmit
2382 * Called under lock held by net_device
2383 */
2384static netdev_tx_t
2385bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2386{
2387 struct bnad *bnad = netdev_priv(netdev);
2388
2389 u16 txq_prod, vlan_tag = 0;
2390 u32 unmap_prod, wis, wis_used, wi_range;
2391 u32 vectors, vect_id, i, acked;
2392 u32 tx_id;
2393 int err;
2394
2395 struct bnad_tx_info *tx_info;
2396 struct bna_tcb *tcb;
2397 struct bnad_unmap_q *unmap_q;
2398 dma_addr_t dma_addr;
2399 struct bna_txq_entry *txqent;
2400 bna_txq_wi_ctrl_flag_t flags;
2401
2402 if (unlikely
2403 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2404 dev_kfree_skb(skb);
2405 return NETDEV_TX_OK;
2406 }
2407
be7fa326
RM
2408 tx_id = 0;
2409
2410 tx_info = &bnad->tx_info[tx_id];
2411 tcb = tx_info->tcb[tx_id];
2412 unmap_q = tcb->unmap_q;
2413
8b230ed8
RM
2414 /*
2415 * Takes care of the Tx that is scheduled between clearing the flag
2416 * and the netif_stop_queue() call.
2417 */
be7fa326 2418 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
8b230ed8
RM
2419 dev_kfree_skb(skb);
2420 return NETDEV_TX_OK;
2421 }
2422
8b230ed8
RM
2423 vectors = 1 + skb_shinfo(skb)->nr_frags;
2424 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2425 dev_kfree_skb(skb);
2426 return NETDEV_TX_OK;
2427 }
2428 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2429 acked = 0;
2430 if (unlikely
2431 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2432 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2433 if ((u16) (*tcb->hw_consumer_index) !=
2434 tcb->consumer_index &&
2435 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2436 acked = bnad_free_txbufs(bnad, tcb);
be7fa326
RM
2437 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2438 bna_ib_ack(tcb->i_dbell, acked);
8b230ed8
RM
2439 smp_mb__before_clear_bit();
2440 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2441 } else {
2442 netif_stop_queue(netdev);
2443 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2444 }
2445
2446 smp_mb();
2447 /*
2448 * Check again to deal with race condition between
2449 * netif_stop_queue here, and netif_wake_queue in
2450 * interrupt handler which is not inside netif tx lock.
2451 */
2452 if (likely
2453 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2454 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2455 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2456 return NETDEV_TX_BUSY;
2457 } else {
2458 netif_wake_queue(netdev);
2459 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2460 }
2461 }
2462
2463 unmap_prod = unmap_q->producer_index;
2464 wis_used = 1;
2465 vect_id = 0;
2466 flags = 0;
2467
2468 txq_prod = tcb->producer_index;
2469 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2470 BUG_ON(!(wi_range <= tcb->q_depth));
2471 txqent->hdr.wi.reserved = 0;
2472 txqent->hdr.wi.num_vectors = vectors;
2473 txqent->hdr.wi.opcode =
2474 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2475 BNA_TXQ_WI_SEND));
2476
eab6d18d 2477 if (vlan_tx_tag_present(skb)) {
8b230ed8
RM
2478 vlan_tag = (u16) vlan_tx_tag_get(skb);
2479 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2480 }
2481 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2482 vlan_tag =
2483 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2484 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2485 }
2486
2487 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2488
2489 if (skb_is_gso(skb)) {
2490 err = bnad_tso_prepare(bnad, skb);
2491 if (err) {
2492 dev_kfree_skb(skb);
2493 return NETDEV_TX_OK;
2494 }
2495 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2496 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2497 txqent->hdr.wi.l4_hdr_size_n_offset =
2498 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2499 (tcp_hdrlen(skb) >> 2,
2500 skb_transport_offset(skb)));
2501 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2502 u8 proto = 0;
2503
2504 txqent->hdr.wi.lso_mss = 0;
2505
2506 if (skb->protocol == htons(ETH_P_IP))
2507 proto = ip_hdr(skb)->protocol;
2508 else if (skb->protocol == htons(ETH_P_IPV6)) {
2509 /* nexthdr may not be TCP immediately. */
2510 proto = ipv6_hdr(skb)->nexthdr;
2511 }
2512 if (proto == IPPROTO_TCP) {
2513 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2514 txqent->hdr.wi.l4_hdr_size_n_offset =
2515 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2516 (0, skb_transport_offset(skb)));
2517
2518 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2519
2520 BUG_ON(!(skb_headlen(skb) >=
2521 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2522
2523 } else if (proto == IPPROTO_UDP) {
2524 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2525 txqent->hdr.wi.l4_hdr_size_n_offset =
2526 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2527 (0, skb_transport_offset(skb)));
2528
2529 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2530
2531 BUG_ON(!(skb_headlen(skb) >=
2532 skb_transport_offset(skb) +
2533 sizeof(struct udphdr)));
2534 } else {
2535 err = skb_checksum_help(skb);
2536 BNAD_UPDATE_CTR(bnad, csum_help);
2537 if (err) {
2538 dev_kfree_skb(skb);
2539 BNAD_UPDATE_CTR(bnad, csum_help_err);
2540 return NETDEV_TX_OK;
2541 }
2542 }
2543 } else {
2544 txqent->hdr.wi.lso_mss = 0;
2545 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2546 }
2547
2548 txqent->hdr.wi.flags = htons(flags);
2549
2550 txqent->hdr.wi.frame_length = htonl(skb->len);
2551
2552 unmap_q->unmap_array[unmap_prod].skb = skb;
2553 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2554 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2555 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
2556 PCI_DMA_TODEVICE);
2557 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2558 dma_addr);
2559
2560 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2561 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2562
2563 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2564 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2565 u32 size = frag->size;
2566
2567 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2568 vect_id = 0;
2569 if (--wi_range)
2570 txqent++;
2571 else {
2572 BNA_QE_INDX_ADD(txq_prod, wis_used,
2573 tcb->q_depth);
2574 wis_used = 0;
2575 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2576 txqent, wi_range);
2577 BUG_ON(!(wi_range <= tcb->q_depth));
2578 }
2579 wis_used++;
2580 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2581 }
2582
2583 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2584 txqent->vector[vect_id].length = htons(size);
2585 dma_addr =
2586 pci_map_page(bnad->pcidev, frag->page,
2587 frag->page_offset, size,
2588 PCI_DMA_TODEVICE);
2589 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2590 dma_addr);
2591 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2592 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2593 }
2594
2595 unmap_q->producer_index = unmap_prod;
2596 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2597 tcb->producer_index = txq_prod;
2598
2599 smp_mb();
be7fa326
RM
2600
2601 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2602 return NETDEV_TX_OK;
2603
8b230ed8
RM
2604 bna_txq_prod_indx_doorbell(tcb);
2605
2606 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2607 tasklet_schedule(&bnad->tx_free_tasklet);
2608
2609 return NETDEV_TX_OK;
2610}
2611
2612/*
2613 * Used spin_lock to synchronize reading of stats structures, which
2614 * is written by BNA under the same lock.
2615 */
250e061e
ED
2616static struct rtnl_link_stats64 *
2617bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
8b230ed8
RM
2618{
2619 struct bnad *bnad = netdev_priv(netdev);
2620 unsigned long flags;
2621
2622 spin_lock_irqsave(&bnad->bna_lock, flags);
2623
250e061e
ED
2624 bnad_netdev_qstats_fill(bnad, stats);
2625 bnad_netdev_hwstats_fill(bnad, stats);
8b230ed8
RM
2626
2627 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2628
250e061e 2629 return stats;
8b230ed8
RM
2630}
2631
2632static void
2633bnad_set_rx_mode(struct net_device *netdev)
2634{
2635 struct bnad *bnad = netdev_priv(netdev);
2636 u32 new_mask, valid_mask;
2637 unsigned long flags;
2638
2639 spin_lock_irqsave(&bnad->bna_lock, flags);
2640
2641 new_mask = valid_mask = 0;
2642
2643 if (netdev->flags & IFF_PROMISC) {
2644 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2645 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2646 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2647 bnad->cfg_flags |= BNAD_CF_PROMISC;
2648 }
2649 } else {
2650 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2651 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2652 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2653 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2654 }
2655 }
2656
2657 if (netdev->flags & IFF_ALLMULTI) {
2658 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2659 new_mask |= BNA_RXMODE_ALLMULTI;
2660 valid_mask |= BNA_RXMODE_ALLMULTI;
2661 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2662 }
2663 } else {
2664 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2665 new_mask &= ~BNA_RXMODE_ALLMULTI;
2666 valid_mask |= BNA_RXMODE_ALLMULTI;
2667 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2668 }
2669 }
2670
2671 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2672
2673 if (!netdev_mc_empty(netdev)) {
2674 u8 *mcaddr_list;
2675 int mc_count = netdev_mc_count(netdev);
2676
2677 /* Index 0 holds the broadcast address */
2678 mcaddr_list =
2679 kzalloc((mc_count + 1) * ETH_ALEN,
2680 GFP_ATOMIC);
2681 if (!mcaddr_list)
ca1cef3a 2682 goto unlock;
8b230ed8
RM
2683
2684 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2685
2686 /* Copy rest of the MC addresses */
2687 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2688
2689 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2690 mcaddr_list, NULL);
2691
2692 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2693 kfree(mcaddr_list);
2694 }
ca1cef3a 2695unlock:
8b230ed8
RM
2696 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2697}
2698
2699/*
2700 * bna_lock is used to sync writes to netdev->addr
2701 * conf_lock cannot be used since this call may be made
2702 * in a non-blocking context.
2703 */
2704static int
2705bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2706{
2707 int err;
2708 struct bnad *bnad = netdev_priv(netdev);
2709 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2710 unsigned long flags;
2711
2712 spin_lock_irqsave(&bnad->bna_lock, flags);
2713
2714 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2715
2716 if (!err)
2717 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2718
2719 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2720
2721 return err;
2722}
2723
2724static int
2725bnad_change_mtu(struct net_device *netdev, int new_mtu)
2726{
2727 int mtu, err = 0;
2728 unsigned long flags;
2729
2730 struct bnad *bnad = netdev_priv(netdev);
2731
2732 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2733 return -EINVAL;
2734
2735 mutex_lock(&bnad->conf_mutex);
2736
2737 netdev->mtu = new_mtu;
2738
2739 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2740
2741 spin_lock_irqsave(&bnad->bna_lock, flags);
2742 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2743 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2744
2745 mutex_unlock(&bnad->conf_mutex);
2746 return err;
2747}
2748
2749static void
2750bnad_vlan_rx_register(struct net_device *netdev,
2751 struct vlan_group *vlan_grp)
2752{
2753 struct bnad *bnad = netdev_priv(netdev);
2754
2755 mutex_lock(&bnad->conf_mutex);
2756 bnad->vlan_grp = vlan_grp;
2757 mutex_unlock(&bnad->conf_mutex);
2758}
2759
2760static void
2761bnad_vlan_rx_add_vid(struct net_device *netdev,
2762 unsigned short vid)
2763{
2764 struct bnad *bnad = netdev_priv(netdev);
2765 unsigned long flags;
2766
2767 if (!bnad->rx_info[0].rx)
2768 return;
2769
2770 mutex_lock(&bnad->conf_mutex);
2771
2772 spin_lock_irqsave(&bnad->bna_lock, flags);
2773 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2774 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2775
2776 mutex_unlock(&bnad->conf_mutex);
2777}
2778
2779static void
2780bnad_vlan_rx_kill_vid(struct net_device *netdev,
2781 unsigned short vid)
2782{
2783 struct bnad *bnad = netdev_priv(netdev);
2784 unsigned long flags;
2785
2786 if (!bnad->rx_info[0].rx)
2787 return;
2788
2789 mutex_lock(&bnad->conf_mutex);
2790
2791 spin_lock_irqsave(&bnad->bna_lock, flags);
2792 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2793 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2794
2795 mutex_unlock(&bnad->conf_mutex);
2796}
2797
2798#ifdef CONFIG_NET_POLL_CONTROLLER
2799static void
2800bnad_netpoll(struct net_device *netdev)
2801{
2802 struct bnad *bnad = netdev_priv(netdev);
2803 struct bnad_rx_info *rx_info;
2804 struct bnad_rx_ctrl *rx_ctrl;
2805 u32 curr_mask;
2806 int i, j;
2807
2808 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2809 bna_intx_disable(&bnad->bna, curr_mask);
2810 bnad_isr(bnad->pcidev->irq, netdev);
2811 bna_intx_enable(&bnad->bna, curr_mask);
2812 } else {
2813 for (i = 0; i < bnad->num_rx; i++) {
2814 rx_info = &bnad->rx_info[i];
2815 if (!rx_info->rx)
2816 continue;
2817 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2818 rx_ctrl = &rx_info->rx_ctrl[j];
2819 if (rx_ctrl->ccb) {
2820 bnad_disable_rx_irq(bnad,
2821 rx_ctrl->ccb);
2822 bnad_netif_rx_schedule_poll(bnad,
2823 rx_ctrl->ccb);
2824 }
2825 }
2826 }
2827 }
2828}
2829#endif
2830
2831static const struct net_device_ops bnad_netdev_ops = {
2832 .ndo_open = bnad_open,
2833 .ndo_stop = bnad_stop,
2834 .ndo_start_xmit = bnad_start_xmit,
250e061e 2835 .ndo_get_stats64 = bnad_get_stats64,
8b230ed8
RM
2836 .ndo_set_rx_mode = bnad_set_rx_mode,
2837 .ndo_set_multicast_list = bnad_set_rx_mode,
2838 .ndo_validate_addr = eth_validate_addr,
2839 .ndo_set_mac_address = bnad_set_mac_address,
2840 .ndo_change_mtu = bnad_change_mtu,
2841 .ndo_vlan_rx_register = bnad_vlan_rx_register,
2842 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2843 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2844#ifdef CONFIG_NET_POLL_CONTROLLER
2845 .ndo_poll_controller = bnad_netpoll
2846#endif
2847};
2848
2849static void
2850bnad_netdev_init(struct bnad *bnad, bool using_dac)
2851{
2852 struct net_device *netdev = bnad->netdev;
2853
2854 netdev->features |= NETIF_F_IPV6_CSUM;
2855 netdev->features |= NETIF_F_TSO;
2856 netdev->features |= NETIF_F_TSO6;
2857
2858 netdev->features |= NETIF_F_GRO;
2859 pr_warn("bna: GRO enabled, using kernel stack GRO\n");
2860
2861 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2862
2863 if (using_dac)
2864 netdev->features |= NETIF_F_HIGHDMA;
2865
2866 netdev->features |=
2867 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2868 NETIF_F_HW_VLAN_FILTER;
2869
2870 netdev->vlan_features = netdev->features;
2871 netdev->mem_start = bnad->mmio_start;
2872 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2873
2874 netdev->netdev_ops = &bnad_netdev_ops;
2875 bnad_set_ethtool_ops(netdev);
2876}
2877
2878/*
2879 * 1. Initialize the bnad structure
2880 * 2. Setup netdev pointer in pci_dev
2881 * 3. Initialze Tx free tasklet
2882 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2883 */
2884static int
2885bnad_init(struct bnad *bnad,
2886 struct pci_dev *pdev, struct net_device *netdev)
2887{
2888 unsigned long flags;
2889
2890 SET_NETDEV_DEV(netdev, &pdev->dev);
2891 pci_set_drvdata(pdev, netdev);
2892
2893 bnad->netdev = netdev;
2894 bnad->pcidev = pdev;
2895 bnad->mmio_start = pci_resource_start(pdev, 0);
2896 bnad->mmio_len = pci_resource_len(pdev, 0);
2897 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2898 if (!bnad->bar0) {
2899 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2900 pci_set_drvdata(pdev, NULL);
2901 return -ENOMEM;
2902 }
2903 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2904 (unsigned long long) bnad->mmio_len);
2905
2906 spin_lock_irqsave(&bnad->bna_lock, flags);
2907 if (!bnad_msix_disable)
2908 bnad->cfg_flags = BNAD_CF_MSIX;
2909
2910 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2911
2912 bnad_q_num_init(bnad);
2913 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2914
2915 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2916 (bnad->num_rx * bnad->num_rxp_per_rx) +
2917 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8
RM
2918
2919 bnad->txq_depth = BNAD_TXQ_DEPTH;
2920 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2921 bnad->rx_csum = true;
2922
2923 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2924 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2925
2926 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2927 (unsigned long)bnad);
2928
2929 return 0;
2930}
2931
2932/*
2933 * Must be called after bnad_pci_uninit()
2934 * so that iounmap() and pci_set_drvdata(NULL)
2935 * happens only after PCI uninitialization.
2936 */
2937static void
2938bnad_uninit(struct bnad *bnad)
2939{
2940 if (bnad->bar0)
2941 iounmap(bnad->bar0);
2942 pci_set_drvdata(bnad->pcidev, NULL);
2943}
2944
2945/*
2946 * Initialize locks
2947 a) Per device mutes used for serializing configuration
2948 changes from OS interface
2949 b) spin lock used to protect bna state machine
2950 */
2951static void
2952bnad_lock_init(struct bnad *bnad)
2953{
2954 spin_lock_init(&bnad->bna_lock);
2955 mutex_init(&bnad->conf_mutex);
2956}
2957
2958static void
2959bnad_lock_uninit(struct bnad *bnad)
2960{
2961 mutex_destroy(&bnad->conf_mutex);
2962}
2963
2964/* PCI Initialization */
2965static int
2966bnad_pci_init(struct bnad *bnad,
2967 struct pci_dev *pdev, bool *using_dac)
2968{
2969 int err;
2970
2971 err = pci_enable_device(pdev);
2972 if (err)
2973 return err;
2974 err = pci_request_regions(pdev, BNAD_NAME);
2975 if (err)
2976 goto disable_device;
2977 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
2978 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2979 *using_dac = 1;
2980 } else {
2981 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2982 if (err) {
2983 err = pci_set_consistent_dma_mask(pdev,
2984 DMA_BIT_MASK(32));
2985 if (err)
2986 goto release_regions;
2987 }
2988 *using_dac = 0;
2989 }
2990 pci_set_master(pdev);
2991 return 0;
2992
2993release_regions:
2994 pci_release_regions(pdev);
2995disable_device:
2996 pci_disable_device(pdev);
2997
2998 return err;
2999}
3000
3001static void
3002bnad_pci_uninit(struct pci_dev *pdev)
3003{
3004 pci_release_regions(pdev);
3005 pci_disable_device(pdev);
3006}
3007
3008static int __devinit
3009bnad_pci_probe(struct pci_dev *pdev,
3010 const struct pci_device_id *pcidev_id)
3011{
3012 bool using_dac;
3013 int err;
3014 struct bnad *bnad;
3015 struct bna *bna;
3016 struct net_device *netdev;
3017 struct bfa_pcidev pcidev_info;
3018 unsigned long flags;
3019
3020 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3021 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3022
3023 mutex_lock(&bnad_fwimg_mutex);
3024 if (!cna_get_firmware_buf(pdev)) {
3025 mutex_unlock(&bnad_fwimg_mutex);
3026 pr_warn("Failed to load Firmware Image!\n");
3027 return -ENODEV;
3028 }
3029 mutex_unlock(&bnad_fwimg_mutex);
3030
3031 /*
3032 * Allocates sizeof(struct net_device + struct bnad)
3033 * bnad = netdev->priv
3034 */
3035 netdev = alloc_etherdev(sizeof(struct bnad));
3036 if (!netdev) {
3037 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3038 err = -ENOMEM;
3039 return err;
3040 }
3041 bnad = netdev_priv(netdev);
3042
8b230ed8
RM
3043 /*
3044 * PCI initialization
3045 * Output : using_dac = 1 for 64 bit DMA
be7fa326 3046 * = 0 for 32 bit DMA
8b230ed8
RM
3047 */
3048 err = bnad_pci_init(bnad, pdev, &using_dac);
3049 if (err)
3050 goto free_netdev;
3051
3052 bnad_lock_init(bnad);
3053 /*
3054 * Initialize bnad structure
3055 * Setup relation between pci_dev & netdev
3056 * Init Tx free tasklet
3057 */
3058 err = bnad_init(bnad, pdev, netdev);
3059 if (err)
3060 goto pci_uninit;
3061 /* Initialize netdev structure, set up ethtool ops */
3062 bnad_netdev_init(bnad, using_dac);
3063
815f41e7
RM
3064 /* Set link to down state */
3065 netif_carrier_off(netdev);
3066
8b230ed8
RM
3067 bnad_enable_msix(bnad);
3068
3069 /* Get resource requirement form bna */
3070 bna_res_req(&bnad->res_info[0]);
3071
3072 /* Allocate resources from bna */
3073 err = bnad_res_alloc(bnad);
3074 if (err)
3075 goto free_netdev;
3076
3077 bna = &bnad->bna;
3078
3079 /* Setup pcidev_info for bna_init() */
3080 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3081 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3082 pcidev_info.device_id = bnad->pcidev->device;
3083 pcidev_info.pci_bar_kva = bnad->bar0;
3084
3085 mutex_lock(&bnad->conf_mutex);
3086
3087 spin_lock_irqsave(&bnad->bna_lock, flags);
3088 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
8b230ed8
RM
3089 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3090
3091 bnad->stats.bna_stats = &bna->stats;
3092
3093 /* Set up timers */
3094 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3095 ((unsigned long)bnad));
3096 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3097 ((unsigned long)bnad));
3098 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
3099 ((unsigned long)bnad));
3100
3101 /* Now start the timer before calling IOC */
3102 mod_timer(&bnad->bna.device.ioc.ioc_timer,
3103 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3104
3105 /*
3106 * Start the chip
3107 * Don't care even if err != 0, bna state machine will
3108 * deal with it
3109 */
3110 err = bnad_device_enable(bnad);
3111
3112 /* Get the burnt-in mac */
3113 spin_lock_irqsave(&bnad->bna_lock, flags);
3114 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3115 bnad_set_netdev_perm_addr(bnad);
3116 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3117
3118 mutex_unlock(&bnad->conf_mutex);
3119
8b230ed8
RM
3120 /* Finally, reguister with net_device layer */
3121 err = register_netdev(netdev);
3122 if (err) {
3123 pr_err("BNA : Registering with netdev failed\n");
3124 goto disable_device;
3125 }
3126
3127 return 0;
3128
3129disable_device:
3130 mutex_lock(&bnad->conf_mutex);
3131 bnad_device_disable(bnad);
3132 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3133 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3134 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3135 spin_lock_irqsave(&bnad->bna_lock, flags);
3136 bna_uninit(bna);
3137 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3138 mutex_unlock(&bnad->conf_mutex);
3139
3140 bnad_res_free(bnad);
3141 bnad_disable_msix(bnad);
3142pci_uninit:
3143 bnad_pci_uninit(pdev);
3144 bnad_lock_uninit(bnad);
3145 bnad_uninit(bnad);
3146free_netdev:
3147 free_netdev(netdev);
3148 return err;
3149}
3150
3151static void __devexit
3152bnad_pci_remove(struct pci_dev *pdev)
3153{
3154 struct net_device *netdev = pci_get_drvdata(pdev);
3155 struct bnad *bnad;
3156 struct bna *bna;
3157 unsigned long flags;
3158
3159 if (!netdev)
3160 return;
3161
3162 pr_info("%s bnad_pci_remove\n", netdev->name);
3163 bnad = netdev_priv(netdev);
3164 bna = &bnad->bna;
3165
3166 unregister_netdev(netdev);
3167
3168 mutex_lock(&bnad->conf_mutex);
3169 bnad_device_disable(bnad);
3170 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3171 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3172 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3173 spin_lock_irqsave(&bnad->bna_lock, flags);
3174 bna_uninit(bna);
3175 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3176 mutex_unlock(&bnad->conf_mutex);
3177
3178 bnad_res_free(bnad);
3179 bnad_disable_msix(bnad);
3180 bnad_pci_uninit(pdev);
3181 bnad_lock_uninit(bnad);
3182 bnad_uninit(bnad);
3183 free_netdev(netdev);
3184}
3185
b7ee31c5 3186static const struct pci_device_id bnad_pci_id_table[] = {
8b230ed8
RM
3187 {
3188 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3189 PCI_DEVICE_ID_BROCADE_CT),
3190 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3191 .class_mask = 0xffff00
3192 }, {0, }
3193};
3194
3195MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3196
3197static struct pci_driver bnad_pci_driver = {
3198 .name = BNAD_NAME,
3199 .id_table = bnad_pci_id_table,
3200 .probe = bnad_pci_probe,
3201 .remove = __devexit_p(bnad_pci_remove),
3202};
3203
3204static int __init
3205bnad_module_init(void)
3206{
3207 int err;
3208
3209 pr_info("Brocade 10G Ethernet driver\n");
3210
8a891429 3211 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
8b230ed8
RM
3212
3213 err = pci_register_driver(&bnad_pci_driver);
3214 if (err < 0) {
3215 pr_err("bna : PCI registration failed in module init "
3216 "(%d)\n", err);
3217 return err;
3218 }
3219
3220 return 0;
3221}
3222
3223static void __exit
3224bnad_module_exit(void)
3225{
3226 pci_unregister_driver(&bnad_pci_driver);
3227
3228 if (bfi_fw)
3229 release_firmware(bfi_fw);
3230}
3231
3232module_init(bnad_module_init);
3233module_exit(bnad_module_exit);
3234
3235MODULE_AUTHOR("Brocade");
3236MODULE_LICENSE("GPL");
3237MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3238MODULE_VERSION(BNAD_VERSION);
3239MODULE_FIRMWARE(CNA_FW_FILE_CT);