]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/brocade/bna/bnad.c
xfrm6: Don't call icmpv6_send on local error
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / brocade / bna / bnad.c
CommitLineData
8b230ed8
RM
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
f859d7cb 18#include <linux/bitops.h>
8b230ed8
RM
19#include <linux/netdevice.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/in.h>
23#include <linux/ethtool.h>
24#include <linux/if_vlan.h>
25#include <linux/if_ether.h>
26#include <linux/ip.h>
70c71606 27#include <linux/prefetch.h>
8b230ed8
RM
28
29#include "bnad.h"
30#include "bna.h"
31#include "cna.h"
32
b7ee31c5 33static DEFINE_MUTEX(bnad_fwimg_mutex);
8b230ed8
RM
34
35/*
36 * Module params
37 */
38static uint bnad_msix_disable;
39module_param(bnad_msix_disable, uint, 0444);
40MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
41
42static uint bnad_ioc_auto_recover = 1;
43module_param(bnad_ioc_auto_recover, uint, 0444);
44MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45
46/*
47 * Global variables
48 */
49u32 bnad_rxqs_per_cq = 2;
50
b7ee31c5 51static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8b230ed8
RM
52
53/*
54 * Local MACROS
55 */
56#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
57
58#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
59
60#define BNAD_GET_MBOX_IRQ(_bnad) \
61 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
8811e267 62 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
8b230ed8
RM
63 ((_bnad)->pcidev->irq))
64
65#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
66do { \
67 (_res_info)->res_type = BNA_RES_T_MEM; \
68 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
69 (_res_info)->res_u.mem_info.num = (_num); \
70 (_res_info)->res_u.mem_info.len = \
71 sizeof(struct bnad_unmap_q) + \
72 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
73} while (0)
74
be7fa326
RM
75#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
76
8b230ed8
RM
77/*
78 * Reinitialize completions in CQ, once Rx is taken down
79 */
80static void
81bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
82{
83 struct bna_cq_entry *cmpl, *next_cmpl;
84 unsigned int wi_range, wis = 0, ccb_prod = 0;
85 int i;
86
87 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
88 wi_range);
89
90 for (i = 0; i < ccb->q_depth; i++) {
91 wis++;
92 if (likely(--wi_range))
93 next_cmpl = cmpl + 1;
94 else {
95 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
96 wis = 0;
97 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
98 next_cmpl, wi_range);
99 }
100 cmpl->valid = 0;
101 cmpl = next_cmpl;
102 }
103}
104
271e8b79
RM
105static u32
106bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
107 u32 index, u32 depth, struct sk_buff *skb, u32 frag)
108{
109 int j;
110 array[index].skb = NULL;
111
112 dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
113 skb_headlen(skb), DMA_TO_DEVICE);
114 dma_unmap_addr_set(&array[index], dma_addr, 0);
115 BNA_QE_INDX_ADD(index, 1, depth);
116
117 for (j = 0; j < frag; j++) {
118 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
119 skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
120 dma_unmap_addr_set(&array[index], dma_addr, 0);
121 BNA_QE_INDX_ADD(index, 1, depth);
122 }
123
124 return index;
125}
126
8b230ed8
RM
127/*
128 * Frees all pending Tx Bufs
129 * At this point no activity is expected on the Q,
130 * so DMA unmap & freeing is fine.
131 */
132static void
133bnad_free_all_txbufs(struct bnad *bnad,
134 struct bna_tcb *tcb)
135{
0120b99c 136 u32 unmap_cons;
8b230ed8
RM
137 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
138 struct bnad_skb_unmap *unmap_array;
0120b99c 139 struct sk_buff *skb = NULL;
938fa488 140 int q;
8b230ed8
RM
141
142 unmap_array = unmap_q->unmap_array;
143
938fa488
RM
144 for (q = 0; q < unmap_q->q_depth; q++) {
145 skb = unmap_array[q].skb;
146 if (!skb)
8b230ed8 147 continue;
938fa488
RM
148
149 unmap_cons = q;
150 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
151 unmap_cons, unmap_q->q_depth, skb,
152 skb_shinfo(skb)->nr_frags);
153
8b230ed8
RM
154 dev_kfree_skb_any(skb);
155 }
156}
157
158/* Data Path Handlers */
159
160/*
161 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
162 * Can be called in a) Interrupt context
163 * b) Sending context
164 * c) Tasklet context
165 */
166static u32
167bnad_free_txbufs(struct bnad *bnad,
168 struct bna_tcb *tcb)
169{
271e8b79
RM
170 u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
171 u16 wis, updated_hw_cons;
8b230ed8
RM
172 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
173 struct bnad_skb_unmap *unmap_array;
0120b99c 174 struct sk_buff *skb;
8b230ed8
RM
175
176 /*
177 * Just return if TX is stopped. This check is useful
178 * when bnad_free_txbufs() runs out of a tasklet scheduled
be7fa326 179 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
8b230ed8
RM
180 * but this routine runs actually after the cleanup has been
181 * executed.
182 */
be7fa326 183 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
8b230ed8
RM
184 return 0;
185
186 updated_hw_cons = *(tcb->hw_consumer_index);
187
188 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
189 updated_hw_cons, tcb->q_depth);
190
191 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
192
193 unmap_array = unmap_q->unmap_array;
194 unmap_cons = unmap_q->consumer_index;
195
196 prefetch(&unmap_array[unmap_cons + 1]);
197 while (wis) {
198 skb = unmap_array[unmap_cons].skb;
199
8b230ed8
RM
200 sent_packets++;
201 sent_bytes += skb->len;
202 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
203
271e8b79
RM
204 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
205 unmap_cons, unmap_q->q_depth, skb,
206 skb_shinfo(skb)->nr_frags);
8b230ed8 207
8b230ed8
RM
208 dev_kfree_skb_any(skb);
209 }
210
211 /* Update consumer pointers. */
212 tcb->consumer_index = updated_hw_cons;
213 unmap_q->consumer_index = unmap_cons;
214
215 tcb->txq->tx_packets += sent_packets;
216 tcb->txq->tx_bytes += sent_bytes;
217
218 return sent_packets;
219}
220
221/* Tx Free Tasklet function */
222/* Frees for all the tcb's in all the Tx's */
223/*
224 * Scheduled from sending context, so that
225 * the fat Tx lock is not held for too long
226 * in the sending context.
227 */
228static void
229bnad_tx_free_tasklet(unsigned long bnad_ptr)
230{
231 struct bnad *bnad = (struct bnad *)bnad_ptr;
232 struct bna_tcb *tcb;
0120b99c 233 u32 acked = 0;
8b230ed8
RM
234 int i, j;
235
236 for (i = 0; i < bnad->num_tx; i++) {
237 for (j = 0; j < bnad->num_txq_per_tx; j++) {
238 tcb = bnad->tx_info[i].tcb[j];
239 if (!tcb)
240 continue;
241 if (((u16) (*tcb->hw_consumer_index) !=
242 tcb->consumer_index) &&
243 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
244 &tcb->flags))) {
245 acked = bnad_free_txbufs(bnad, tcb);
be7fa326
RM
246 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
247 &tcb->flags)))
248 bna_ib_ack(tcb->i_dbell, acked);
8b230ed8
RM
249 smp_mb__before_clear_bit();
250 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
251 }
f7c0fa4c
RM
252 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
253 &tcb->flags)))
254 continue;
255 if (netif_queue_stopped(bnad->netdev)) {
256 if (acked && netif_carrier_ok(bnad->netdev) &&
257 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
258 BNAD_NETIF_WAKE_THRESHOLD) {
259 netif_wake_queue(bnad->netdev);
260 /* TODO */
261 /* Counters for individual TxQs? */
262 BNAD_UPDATE_CTR(bnad,
263 netif_queue_wakeup);
264 }
265 }
8b230ed8
RM
266 }
267 }
268}
269
270static u32
271bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
272{
273 struct net_device *netdev = bnad->netdev;
be7fa326 274 u32 sent = 0;
8b230ed8
RM
275
276 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
277 return 0;
278
279 sent = bnad_free_txbufs(bnad, tcb);
280 if (sent) {
281 if (netif_queue_stopped(netdev) &&
282 netif_carrier_ok(netdev) &&
283 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
284 BNAD_NETIF_WAKE_THRESHOLD) {
be7fa326
RM
285 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
286 netif_wake_queue(netdev);
287 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
288 }
8b230ed8 289 }
be7fa326
RM
290 }
291
292 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
8b230ed8 293 bna_ib_ack(tcb->i_dbell, sent);
8b230ed8
RM
294
295 smp_mb__before_clear_bit();
296 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
297
298 return sent;
299}
300
301/* MSIX Tx Completion Handler */
302static irqreturn_t
303bnad_msix_tx(int irq, void *data)
304{
305 struct bna_tcb *tcb = (struct bna_tcb *)data;
306 struct bnad *bnad = tcb->bnad;
307
308 bnad_tx(bnad, tcb);
309
310 return IRQ_HANDLED;
311}
312
313static void
314bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
315{
316 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
317
318 rcb->producer_index = 0;
319 rcb->consumer_index = 0;
320
321 unmap_q->producer_index = 0;
322 unmap_q->consumer_index = 0;
323}
324
325static void
be7fa326 326bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
8b230ed8
RM
327{
328 struct bnad_unmap_q *unmap_q;
5ea74318 329 struct bnad_skb_unmap *unmap_array;
8b230ed8 330 struct sk_buff *skb;
be7fa326 331 int unmap_cons;
8b230ed8
RM
332
333 unmap_q = rcb->unmap_q;
5ea74318 334 unmap_array = unmap_q->unmap_array;
be7fa326 335 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
5ea74318 336 skb = unmap_array[unmap_cons].skb;
be7fa326
RM
337 if (!skb)
338 continue;
5ea74318
IV
339 unmap_array[unmap_cons].skb = NULL;
340 dma_unmap_single(&bnad->pcidev->dev,
341 dma_unmap_addr(&unmap_array[unmap_cons],
342 dma_addr),
343 rcb->rxq->buffer_size,
344 DMA_FROM_DEVICE);
8b230ed8 345 dev_kfree_skb(skb);
8b230ed8 346 }
8b230ed8
RM
347 bnad_reset_rcb(bnad, rcb);
348}
349
350static void
351bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
352{
353 u16 to_alloc, alloced, unmap_prod, wi_range;
354 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
355 struct bnad_skb_unmap *unmap_array;
356 struct bna_rxq_entry *rxent;
357 struct sk_buff *skb;
358 dma_addr_t dma_addr;
359
360 alloced = 0;
361 to_alloc =
362 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
363
364 unmap_array = unmap_q->unmap_array;
365 unmap_prod = unmap_q->producer_index;
366
367 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
368
369 while (to_alloc--) {
19dbff9f 370 if (!wi_range)
8b230ed8
RM
371 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
372 wi_range);
0a0e2344
ED
373 skb = netdev_alloc_skb_ip_align(bnad->netdev,
374 rcb->rxq->buffer_size);
8b230ed8
RM
375 if (unlikely(!skb)) {
376 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
3caa1e95 377 rcb->rxq->rxbuf_alloc_failed++;
8b230ed8
RM
378 goto finishing;
379 }
8b230ed8 380 unmap_array[unmap_prod].skb = skb;
5ea74318
IV
381 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
382 rcb->rxq->buffer_size,
383 DMA_FROM_DEVICE);
384 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
8b230ed8
RM
385 dma_addr);
386 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
387 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
388
389 rxent++;
390 wi_range--;
391 alloced++;
392 }
393
394finishing:
395 if (likely(alloced)) {
396 unmap_q->producer_index = unmap_prod;
397 rcb->producer_index = unmap_prod;
398 smp_mb();
5bcf6ac0 399 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
be7fa326 400 bna_rxq_prod_indx_doorbell(rcb);
8b230ed8 401 }
8b230ed8
RM
402}
403
404static inline void
405bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
406{
407 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
408
409 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
410 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
411 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
412 bnad_alloc_n_post_rxbufs(bnad, rcb);
413 smp_mb__before_clear_bit();
414 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
415 }
416}
417
418static u32
419bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
420{
421 struct bna_cq_entry *cmpl, *next_cmpl;
422 struct bna_rcb *rcb = NULL;
423 unsigned int wi_range, packets = 0, wis = 0;
424 struct bnad_unmap_q *unmap_q;
5ea74318 425 struct bnad_skb_unmap *unmap_array;
8b230ed8 426 struct sk_buff *skb;
5ea74318 427 u32 flags, unmap_cons;
8b230ed8 428 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
078086f3
RM
429 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
430
431 set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
8b230ed8 432
078086f3
RM
433 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
434 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
be7fa326 435 return 0;
078086f3 436 }
be7fa326 437
8b230ed8
RM
438 prefetch(bnad->netdev);
439 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
440 wi_range);
441 BUG_ON(!(wi_range <= ccb->q_depth));
442 while (cmpl->valid && packets < budget) {
443 packets++;
444 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
445
078086f3 446 if (bna_is_small_rxq(cmpl->rxq_id))
8b230ed8 447 rcb = ccb->rcb[1];
078086f3
RM
448 else
449 rcb = ccb->rcb[0];
8b230ed8
RM
450
451 unmap_q = rcb->unmap_q;
5ea74318
IV
452 unmap_array = unmap_q->unmap_array;
453 unmap_cons = unmap_q->consumer_index;
8b230ed8 454
5ea74318 455 skb = unmap_array[unmap_cons].skb;
8b230ed8 456 BUG_ON(!(skb));
5ea74318
IV
457 unmap_array[unmap_cons].skb = NULL;
458 dma_unmap_single(&bnad->pcidev->dev,
459 dma_unmap_addr(&unmap_array[unmap_cons],
8b230ed8 460 dma_addr),
5ea74318
IV
461 rcb->rxq->buffer_size,
462 DMA_FROM_DEVICE);
8b230ed8
RM
463 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
464
465 /* Should be more efficient ? Performance ? */
466 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
467
468 wis++;
469 if (likely(--wi_range))
470 next_cmpl = cmpl + 1;
471 else {
472 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
473 wis = 0;
474 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
475 next_cmpl, wi_range);
476 BUG_ON(!(wi_range <= ccb->q_depth));
477 }
478 prefetch(next_cmpl);
479
480 flags = ntohl(cmpl->flags);
481 if (unlikely
482 (flags &
483 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
484 BNA_CQ_EF_TOO_LONG))) {
485 dev_kfree_skb_any(skb);
486 rcb->rxq->rx_packets_with_error++;
487 goto next;
488 }
489
490 skb_put(skb, ntohs(cmpl->length));
491 if (likely
e5ee20e7 492 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
8b230ed8
RM
493 (((flags & BNA_CQ_EF_IPV4) &&
494 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
495 (flags & BNA_CQ_EF_IPV6)) &&
496 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
497 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
498 skb->ip_summed = CHECKSUM_UNNECESSARY;
499 else
bc8acf2c 500 skb_checksum_none_assert(skb);
8b230ed8
RM
501
502 rcb->rxq->rx_packets++;
503 rcb->rxq->rx_bytes += skb->len;
504 skb->protocol = eth_type_trans(skb, bnad->netdev);
505
f859d7cb
JP
506 if (flags & BNA_CQ_EF_VLAN)
507 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
508
078086f3 509 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
f859d7cb 510 napi_gro_receive(&rx_ctrl->napi, skb);
078086f3 511 else {
f859d7cb 512 netif_receive_skb(skb);
8b230ed8
RM
513 }
514
515next:
516 cmpl->valid = 0;
517 cmpl = next_cmpl;
518 }
519
520 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
521
2be67144 522 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
271e8b79
RM
523 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
524
2be67144
RM
525 bnad_refill_rxq(bnad, ccb->rcb[0]);
526 if (ccb->rcb[1])
527 bnad_refill_rxq(bnad, ccb->rcb[1]);
8b230ed8 528
078086f3
RM
529 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
530
8b230ed8
RM
531 return packets;
532}
533
8b230ed8
RM
534static void
535bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
536{
537 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
be7fa326
RM
538 struct napi_struct *napi = &rx_ctrl->napi;
539
540 if (likely(napi_schedule_prep(napi))) {
be7fa326 541 __napi_schedule(napi);
271e8b79 542 rx_ctrl->rx_schedule++;
8b230ed8 543 }
8b230ed8
RM
544}
545
546/* MSIX Rx Path Handler */
547static irqreturn_t
548bnad_msix_rx(int irq, void *data)
549{
550 struct bna_ccb *ccb = (struct bna_ccb *)data;
8b230ed8 551
271e8b79
RM
552 if (ccb) {
553 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
2be67144 554 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
271e8b79 555 }
8b230ed8
RM
556
557 return IRQ_HANDLED;
558}
559
560/* Interrupt handlers */
561
562/* Mbox Interrupt Handlers */
563static irqreturn_t
564bnad_msix_mbox_handler(int irq, void *data)
565{
566 u32 intr_status;
e2fa6f2e 567 unsigned long flags;
be7fa326 568 struct bnad *bnad = (struct bnad *)data;
8b230ed8 569
8b230ed8 570 spin_lock_irqsave(&bnad->bna_lock, flags);
dfee325a
RM
571 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
572 spin_unlock_irqrestore(&bnad->bna_lock, flags);
573 return IRQ_HANDLED;
574 }
8b230ed8
RM
575
576 bna_intr_status_get(&bnad->bna, intr_status);
577
078086f3 578 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
8b230ed8
RM
579 bna_mbox_handler(&bnad->bna, intr_status);
580
581 spin_unlock_irqrestore(&bnad->bna_lock, flags);
582
8b230ed8
RM
583 return IRQ_HANDLED;
584}
585
586static irqreturn_t
587bnad_isr(int irq, void *data)
588{
589 int i, j;
590 u32 intr_status;
591 unsigned long flags;
be7fa326 592 struct bnad *bnad = (struct bnad *)data;
8b230ed8
RM
593 struct bnad_rx_info *rx_info;
594 struct bnad_rx_ctrl *rx_ctrl;
078086f3 595 struct bna_tcb *tcb = NULL;
8b230ed8 596
dfee325a
RM
597 spin_lock_irqsave(&bnad->bna_lock, flags);
598 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
599 spin_unlock_irqrestore(&bnad->bna_lock, flags);
e2fa6f2e 600 return IRQ_NONE;
dfee325a 601 }
8b230ed8
RM
602
603 bna_intr_status_get(&bnad->bna, intr_status);
e2fa6f2e 604
dfee325a
RM
605 if (unlikely(!intr_status)) {
606 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 607 return IRQ_NONE;
dfee325a 608 }
8b230ed8 609
078086f3 610 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
8b230ed8 611 bna_mbox_handler(&bnad->bna, intr_status);
be7fa326 612
8b230ed8
RM
613 spin_unlock_irqrestore(&bnad->bna_lock, flags);
614
be7fa326
RM
615 if (!BNA_IS_INTX_DATA_INTR(intr_status))
616 return IRQ_HANDLED;
617
8b230ed8 618 /* Process data interrupts */
be7fa326
RM
619 /* Tx processing */
620 for (i = 0; i < bnad->num_tx; i++) {
078086f3
RM
621 for (j = 0; j < bnad->num_txq_per_tx; j++) {
622 tcb = bnad->tx_info[i].tcb[j];
623 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
624 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
625 }
be7fa326
RM
626 }
627 /* Rx processing */
8b230ed8
RM
628 for (i = 0; i < bnad->num_rx; i++) {
629 rx_info = &bnad->rx_info[i];
630 if (!rx_info->rx)
631 continue;
632 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
633 rx_ctrl = &rx_info->rx_ctrl[j];
634 if (rx_ctrl->ccb)
635 bnad_netif_rx_schedule_poll(bnad,
636 rx_ctrl->ccb);
637 }
638 }
8b230ed8
RM
639 return IRQ_HANDLED;
640}
641
642/*
643 * Called in interrupt / callback context
644 * with bna_lock held, so cfg_flags access is OK
645 */
646static void
647bnad_enable_mbox_irq(struct bnad *bnad)
648{
be7fa326 649 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
e2fa6f2e 650
8b230ed8
RM
651 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
652}
653
654/*
655 * Called with bnad->bna_lock held b'cos of
656 * bnad->cfg_flags access.
657 */
b7ee31c5 658static void
8b230ed8
RM
659bnad_disable_mbox_irq(struct bnad *bnad)
660{
be7fa326 661 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
8b230ed8 662
be7fa326
RM
663 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
664}
8b230ed8 665
be7fa326
RM
666static void
667bnad_set_netdev_perm_addr(struct bnad *bnad)
668{
669 struct net_device *netdev = bnad->netdev;
e2fa6f2e 670
be7fa326
RM
671 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
672 if (is_zero_ether_addr(netdev->dev_addr))
673 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
8b230ed8
RM
674}
675
676/* Control Path Handlers */
677
678/* Callbacks */
679void
078086f3 680bnad_cb_mbox_intr_enable(struct bnad *bnad)
8b230ed8
RM
681{
682 bnad_enable_mbox_irq(bnad);
683}
684
685void
078086f3 686bnad_cb_mbox_intr_disable(struct bnad *bnad)
8b230ed8
RM
687{
688 bnad_disable_mbox_irq(bnad);
689}
690
691void
078086f3
RM
692bnad_cb_ioceth_ready(struct bnad *bnad)
693{
694 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
695 complete(&bnad->bnad_completions.ioc_comp);
696}
697
698void
699bnad_cb_ioceth_failed(struct bnad *bnad)
8b230ed8 700{
078086f3 701 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
8b230ed8 702 complete(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
703}
704
705void
078086f3 706bnad_cb_ioceth_disabled(struct bnad *bnad)
8b230ed8 707{
078086f3 708 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
8b230ed8 709 complete(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
710}
711
712static void
078086f3 713bnad_cb_enet_disabled(void *arg)
8b230ed8
RM
714{
715 struct bnad *bnad = (struct bnad *)arg;
716
8b230ed8 717 netif_carrier_off(bnad->netdev);
078086f3 718 complete(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
719}
720
721void
078086f3 722bnad_cb_ethport_link_status(struct bnad *bnad,
8b230ed8
RM
723 enum bna_link_status link_status)
724{
725 bool link_up = 0;
726
727 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
728
729 if (link_status == BNA_CEE_UP) {
078086f3
RM
730 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
731 BNAD_UPDATE_CTR(bnad, cee_toggle);
8b230ed8 732 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
078086f3
RM
733 } else {
734 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
735 BNAD_UPDATE_CTR(bnad, cee_toggle);
8b230ed8 736 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
078086f3 737 }
8b230ed8
RM
738
739 if (link_up) {
740 if (!netif_carrier_ok(bnad->netdev)) {
078086f3
RM
741 uint tx_id, tcb_id;
742 printk(KERN_WARNING "bna: %s link up\n",
8b230ed8
RM
743 bnad->netdev->name);
744 netif_carrier_on(bnad->netdev);
745 BNAD_UPDATE_CTR(bnad, link_toggle);
078086f3
RM
746 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
747 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
748 tcb_id++) {
749 struct bna_tcb *tcb =
750 bnad->tx_info[tx_id].tcb[tcb_id];
751 u32 txq_id;
752 if (!tcb)
753 continue;
754
755 txq_id = tcb->id;
756
757 if (test_bit(BNAD_TXQ_TX_STARTED,
758 &tcb->flags)) {
759 /*
760 * Force an immediate
761 * Transmit Schedule */
762 printk(KERN_INFO "bna: %s %d "
763 "TXQ_STARTED\n",
764 bnad->netdev->name,
765 txq_id);
766 netif_wake_subqueue(
767 bnad->netdev,
768 txq_id);
769 BNAD_UPDATE_CTR(bnad,
770 netif_queue_wakeup);
771 } else {
772 netif_stop_subqueue(
773 bnad->netdev,
774 txq_id);
775 BNAD_UPDATE_CTR(bnad,
776 netif_queue_stop);
777 }
778 }
8b230ed8
RM
779 }
780 }
781 } else {
782 if (netif_carrier_ok(bnad->netdev)) {
078086f3 783 printk(KERN_WARNING "bna: %s link down\n",
8b230ed8
RM
784 bnad->netdev->name);
785 netif_carrier_off(bnad->netdev);
786 BNAD_UPDATE_CTR(bnad, link_toggle);
787 }
788 }
789}
790
791static void
078086f3 792bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
8b230ed8
RM
793{
794 struct bnad *bnad = (struct bnad *)arg;
795
796 complete(&bnad->bnad_completions.tx_comp);
797}
798
799static void
800bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
801{
802 struct bnad_tx_info *tx_info =
803 (struct bnad_tx_info *)tcb->txq->tx->priv;
804 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
805
806 tx_info->tcb[tcb->id] = tcb;
807 unmap_q->producer_index = 0;
808 unmap_q->consumer_index = 0;
809 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
810}
811
812static void
813bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
814{
815 struct bnad_tx_info *tx_info =
816 (struct bnad_tx_info *)tcb->txq->tx->priv;
be7fa326
RM
817 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
818
819 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
820 cpu_relax();
821
822 bnad_free_all_txbufs(bnad, tcb);
823
824 unmap_q->producer_index = 0;
825 unmap_q->consumer_index = 0;
826
827 smp_mb__before_clear_bit();
828 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
8b230ed8
RM
829
830 tx_info->tcb[tcb->id] = NULL;
831}
832
833static void
834bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
835{
836 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
837
838 unmap_q->producer_index = 0;
839 unmap_q->consumer_index = 0;
840 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
841}
842
be7fa326
RM
843static void
844bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
845{
846 bnad_free_all_rxbufs(bnad, rcb);
847}
848
8b230ed8
RM
849static void
850bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
851{
852 struct bnad_rx_info *rx_info =
853 (struct bnad_rx_info *)ccb->cq->rx->priv;
854
855 rx_info->rx_ctrl[ccb->id].ccb = ccb;
856 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
857}
858
859static void
860bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
861{
862 struct bnad_rx_info *rx_info =
863 (struct bnad_rx_info *)ccb->cq->rx->priv;
864
865 rx_info->rx_ctrl[ccb->id].ccb = NULL;
866}
867
868static void
078086f3 869bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
8b230ed8
RM
870{
871 struct bnad_tx_info *tx_info =
078086f3
RM
872 (struct bnad_tx_info *)tx->priv;
873 struct bna_tcb *tcb;
874 u32 txq_id;
875 int i;
8b230ed8 876
078086f3
RM
877 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
878 tcb = tx_info->tcb[i];
879 if (!tcb)
880 continue;
881 txq_id = tcb->id;
882 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
883 netif_stop_subqueue(bnad->netdev, txq_id);
884 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
885 bnad->netdev->name, txq_id);
886 }
8b230ed8
RM
887}
888
889static void
078086f3 890bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
8b230ed8 891{
078086f3
RM
892 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
893 struct bna_tcb *tcb;
894 struct bnad_unmap_q *unmap_q;
895 u32 txq_id;
896 int i;
8b230ed8 897
078086f3
RM
898 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
899 tcb = tx_info->tcb[i];
900 if (!tcb)
901 continue;
902 txq_id = tcb->id;
8b230ed8 903
078086f3 904 unmap_q = tcb->unmap_q;
8b230ed8 905
078086f3
RM
906 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
907 continue;
8b230ed8 908
078086f3
RM
909 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
910 cpu_relax();
8b230ed8 911
078086f3 912 bnad_free_all_txbufs(bnad, tcb);
8b230ed8 913
078086f3
RM
914 unmap_q->producer_index = 0;
915 unmap_q->consumer_index = 0;
916
917 smp_mb__before_clear_bit();
918 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
919
920 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
921
922 if (netif_carrier_ok(bnad->netdev)) {
923 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
924 bnad->netdev->name, txq_id);
925 netif_wake_subqueue(bnad->netdev, txq_id);
926 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
927 }
928 }
be7fa326
RM
929
930 /*
078086f3 931 * Workaround for first ioceth enable failure & we
be7fa326
RM
932 * get a 0 MAC address. We try to get the MAC address
933 * again here.
934 */
935 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
078086f3 936 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
be7fa326
RM
937 bnad_set_netdev_perm_addr(bnad);
938 }
be7fa326
RM
939}
940
941static void
078086f3 942bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
be7fa326 943{
078086f3
RM
944 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
945 struct bna_tcb *tcb;
946 int i;
947
948 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
949 tcb = tx_info->tcb[i];
950 if (!tcb)
951 continue;
952 }
953
954 mdelay(BNAD_TXRX_SYNC_MDELAY);
955 bna_tx_cleanup_complete(tx);
8b230ed8
RM
956}
957
5bcf6ac0
RM
958static void
959bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
960{
961 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
962 struct bna_ccb *ccb;
963 struct bnad_rx_ctrl *rx_ctrl;
964 int i;
965
966 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
967 rx_ctrl = &rx_info->rx_ctrl[i];
968 ccb = rx_ctrl->ccb;
969 if (!ccb)
970 continue;
971
972 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
973
974 if (ccb->rcb[1])
975 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
976 }
977}
978
8b230ed8 979static void
078086f3 980bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 981{
078086f3
RM
982 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
983 struct bna_ccb *ccb;
984 struct bnad_rx_ctrl *rx_ctrl;
985 int i;
986
987 mdelay(BNAD_TXRX_SYNC_MDELAY);
988
772b5235 989 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
078086f3
RM
990 rx_ctrl = &rx_info->rx_ctrl[i];
991 ccb = rx_ctrl->ccb;
992 if (!ccb)
993 continue;
994
995 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
996
997 if (ccb->rcb[1])
998 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
8b230ed8 999
078086f3
RM
1000 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
1001 cpu_relax();
1002 }
be7fa326 1003
078086f3 1004 bna_rx_cleanup_complete(rx);
8b230ed8
RM
1005}
1006
1007static void
078086f3 1008bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1009{
078086f3
RM
1010 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1011 struct bna_ccb *ccb;
1012 struct bna_rcb *rcb;
1013 struct bnad_rx_ctrl *rx_ctrl;
1014 struct bnad_unmap_q *unmap_q;
1015 int i;
1016 int j;
be7fa326 1017
772b5235 1018 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
078086f3
RM
1019 rx_ctrl = &rx_info->rx_ctrl[i];
1020 ccb = rx_ctrl->ccb;
1021 if (!ccb)
1022 continue;
be7fa326 1023
078086f3 1024 bnad_cq_cmpl_init(bnad, ccb);
8b230ed8 1025
078086f3
RM
1026 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1027 rcb = ccb->rcb[j];
1028 if (!rcb)
1029 continue;
1030 bnad_free_all_rxbufs(bnad, rcb);
1031
1032 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
5bcf6ac0 1033 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
078086f3
RM
1034 unmap_q = rcb->unmap_q;
1035
1036 /* Now allocate & post buffers for this RCB */
1037 /* !!Allocation in callback context */
1038 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1039 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1040 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1041 bnad_alloc_n_post_rxbufs(bnad, rcb);
1042 smp_mb__before_clear_bit();
1043 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1044 }
1045 }
8b230ed8
RM
1046 }
1047}
1048
1049static void
078086f3 1050bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
8b230ed8
RM
1051{
1052 struct bnad *bnad = (struct bnad *)arg;
1053
1054 complete(&bnad->bnad_completions.rx_comp);
1055}
1056
1057static void
078086f3 1058bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1059{
078086f3 1060 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
8b230ed8
RM
1061 complete(&bnad->bnad_completions.mcast_comp);
1062}
1063
1064void
1065bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1066 struct bna_stats *stats)
1067{
1068 if (status == BNA_CB_SUCCESS)
1069 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1070
1071 if (!netif_running(bnad->netdev) ||
1072 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1073 return;
1074
1075 mod_timer(&bnad->stats_timer,
1076 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1077}
1078
078086f3
RM
1079static void
1080bnad_cb_enet_mtu_set(struct bnad *bnad)
1081{
1082 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1083 complete(&bnad->bnad_completions.mtu_comp);
1084}
1085
8b230ed8
RM
1086/* Resource allocation, free functions */
1087
1088static void
1089bnad_mem_free(struct bnad *bnad,
1090 struct bna_mem_info *mem_info)
1091{
1092 int i;
1093 dma_addr_t dma_pa;
1094
1095 if (mem_info->mdl == NULL)
1096 return;
1097
1098 for (i = 0; i < mem_info->num; i++) {
1099 if (mem_info->mdl[i].kva != NULL) {
1100 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1101 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1102 dma_pa);
5ea74318
IV
1103 dma_free_coherent(&bnad->pcidev->dev,
1104 mem_info->mdl[i].len,
1105 mem_info->mdl[i].kva, dma_pa);
8b230ed8
RM
1106 } else
1107 kfree(mem_info->mdl[i].kva);
1108 }
1109 }
1110 kfree(mem_info->mdl);
1111 mem_info->mdl = NULL;
1112}
1113
1114static int
1115bnad_mem_alloc(struct bnad *bnad,
1116 struct bna_mem_info *mem_info)
1117{
1118 int i;
1119 dma_addr_t dma_pa;
1120
1121 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1122 mem_info->mdl = NULL;
1123 return 0;
1124 }
1125
1126 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1127 GFP_KERNEL);
1128 if (mem_info->mdl == NULL)
1129 return -ENOMEM;
1130
1131 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1132 for (i = 0; i < mem_info->num; i++) {
1133 mem_info->mdl[i].len = mem_info->len;
1134 mem_info->mdl[i].kva =
5ea74318
IV
1135 dma_alloc_coherent(&bnad->pcidev->dev,
1136 mem_info->len, &dma_pa,
1137 GFP_KERNEL);
8b230ed8
RM
1138
1139 if (mem_info->mdl[i].kva == NULL)
1140 goto err_return;
1141
1142 BNA_SET_DMA_ADDR(dma_pa,
1143 &(mem_info->mdl[i].dma));
1144 }
1145 } else {
1146 for (i = 0; i < mem_info->num; i++) {
1147 mem_info->mdl[i].len = mem_info->len;
1148 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1149 GFP_KERNEL);
1150 if (mem_info->mdl[i].kva == NULL)
1151 goto err_return;
1152 }
1153 }
1154
1155 return 0;
1156
1157err_return:
1158 bnad_mem_free(bnad, mem_info);
1159 return -ENOMEM;
1160}
1161
1162/* Free IRQ for Mailbox */
1163static void
078086f3 1164bnad_mbox_irq_free(struct bnad *bnad)
8b230ed8
RM
1165{
1166 int irq;
1167 unsigned long flags;
1168
8b230ed8 1169 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 1170 bnad_disable_mbox_irq(bnad);
e2fa6f2e 1171 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1172
1173 irq = BNAD_GET_MBOX_IRQ(bnad);
be7fa326 1174 free_irq(irq, bnad);
8b230ed8
RM
1175}
1176
1177/*
1178 * Allocates IRQ for Mailbox, but keep it disabled
1179 * This will be enabled once we get the mbox enable callback
1180 * from bna
1181 */
1182static int
078086f3 1183bnad_mbox_irq_alloc(struct bnad *bnad)
8b230ed8 1184{
0120b99c
RM
1185 int err = 0;
1186 unsigned long irq_flags, flags;
8b230ed8 1187 u32 irq;
0120b99c 1188 irq_handler_t irq_handler;
8b230ed8 1189
8b230ed8
RM
1190 spin_lock_irqsave(&bnad->bna_lock, flags);
1191 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1192 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
8811e267 1193 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
8279171a 1194 irq_flags = 0;
8b230ed8
RM
1195 } else {
1196 irq_handler = (irq_handler_t)bnad_isr;
1197 irq = bnad->pcidev->irq;
5f77898d 1198 irq_flags = IRQF_SHARED;
8b230ed8 1199 }
8811e267 1200
8b230ed8 1201 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1202 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1203
e2fa6f2e
RM
1204 /*
1205 * Set the Mbox IRQ disable flag, so that the IRQ handler
1206 * called from request_irq() for SHARED IRQs do not execute
1207 */
1208 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1209
be7fa326
RM
1210 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1211
8279171a 1212 err = request_irq(irq, irq_handler, irq_flags,
be7fa326 1213 bnad->mbox_irq_name, bnad);
e2fa6f2e 1214
be7fa326 1215 return err;
8b230ed8
RM
1216}
1217
1218static void
1219bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1220{
1221 kfree(intr_info->idl);
1222 intr_info->idl = NULL;
1223}
1224
1225/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1226static int
1227bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
078086f3 1228 u32 txrx_id, struct bna_intr_info *intr_info)
8b230ed8
RM
1229{
1230 int i, vector_start = 0;
1231 u32 cfg_flags;
1232 unsigned long flags;
1233
1234 spin_lock_irqsave(&bnad->bna_lock, flags);
1235 cfg_flags = bnad->cfg_flags;
1236 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1237
1238 if (cfg_flags & BNAD_CF_MSIX) {
1239 intr_info->intr_type = BNA_INTR_T_MSIX;
1240 intr_info->idl = kcalloc(intr_info->num,
1241 sizeof(struct bna_intr_descr),
1242 GFP_KERNEL);
1243 if (!intr_info->idl)
1244 return -ENOMEM;
1245
1246 switch (src) {
1247 case BNAD_INTR_TX:
8811e267 1248 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
8b230ed8
RM
1249 break;
1250
1251 case BNAD_INTR_RX:
8811e267
RM
1252 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1253 (bnad->num_tx * bnad->num_txq_per_tx) +
8b230ed8
RM
1254 txrx_id;
1255 break;
1256
1257 default:
1258 BUG();
1259 }
1260
1261 for (i = 0; i < intr_info->num; i++)
1262 intr_info->idl[i].vector = vector_start + i;
1263 } else {
1264 intr_info->intr_type = BNA_INTR_T_INTX;
1265 intr_info->num = 1;
1266 intr_info->idl = kcalloc(intr_info->num,
1267 sizeof(struct bna_intr_descr),
1268 GFP_KERNEL);
1269 if (!intr_info->idl)
1270 return -ENOMEM;
1271
1272 switch (src) {
1273 case BNAD_INTR_TX:
8811e267 1274 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
8b230ed8
RM
1275 break;
1276
1277 case BNAD_INTR_RX:
8811e267 1278 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
8b230ed8
RM
1279 break;
1280 }
1281 }
1282 return 0;
1283}
1284
1285/**
1286 * NOTE: Should be called for MSIX only
1287 * Unregisters Tx MSIX vector(s) from the kernel
1288 */
1289static void
1290bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1291 int num_txqs)
1292{
1293 int i;
1294 int vector_num;
1295
1296 for (i = 0; i < num_txqs; i++) {
1297 if (tx_info->tcb[i] == NULL)
1298 continue;
1299
1300 vector_num = tx_info->tcb[i]->intr_vector;
1301 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1302 }
1303}
1304
1305/**
1306 * NOTE: Should be called for MSIX only
1307 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1308 */
1309static int
1310bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
078086f3 1311 u32 tx_id, int num_txqs)
8b230ed8
RM
1312{
1313 int i;
1314 int err;
1315 int vector_num;
1316
1317 for (i = 0; i < num_txqs; i++) {
1318 vector_num = tx_info->tcb[i]->intr_vector;
1319 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1320 tx_id + tx_info->tcb[i]->id);
1321 err = request_irq(bnad->msix_table[vector_num].vector,
1322 (irq_handler_t)bnad_msix_tx, 0,
1323 tx_info->tcb[i]->name,
1324 tx_info->tcb[i]);
1325 if (err)
1326 goto err_return;
1327 }
1328
1329 return 0;
1330
1331err_return:
1332 if (i > 0)
1333 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1334 return -1;
1335}
1336
1337/**
1338 * NOTE: Should be called for MSIX only
1339 * Unregisters Rx MSIX vector(s) from the kernel
1340 */
1341static void
1342bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1343 int num_rxps)
1344{
1345 int i;
1346 int vector_num;
1347
1348 for (i = 0; i < num_rxps; i++) {
1349 if (rx_info->rx_ctrl[i].ccb == NULL)
1350 continue;
1351
1352 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1353 free_irq(bnad->msix_table[vector_num].vector,
1354 rx_info->rx_ctrl[i].ccb);
1355 }
1356}
1357
1358/**
1359 * NOTE: Should be called for MSIX only
1360 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1361 */
1362static int
1363bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
078086f3 1364 u32 rx_id, int num_rxps)
8b230ed8
RM
1365{
1366 int i;
1367 int err;
1368 int vector_num;
1369
1370 for (i = 0; i < num_rxps; i++) {
1371 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1372 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1373 bnad->netdev->name,
1374 rx_id + rx_info->rx_ctrl[i].ccb->id);
1375 err = request_irq(bnad->msix_table[vector_num].vector,
1376 (irq_handler_t)bnad_msix_rx, 0,
1377 rx_info->rx_ctrl[i].ccb->name,
1378 rx_info->rx_ctrl[i].ccb);
1379 if (err)
1380 goto err_return;
1381 }
1382
1383 return 0;
1384
1385err_return:
1386 if (i > 0)
1387 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1388 return -1;
1389}
1390
1391/* Free Tx object Resources */
1392static void
1393bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1394{
1395 int i;
1396
1397 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1398 if (res_info[i].res_type == BNA_RES_T_MEM)
1399 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1400 else if (res_info[i].res_type == BNA_RES_T_INTR)
1401 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1402 }
1403}
1404
1405/* Allocates memory and interrupt resources for Tx object */
1406static int
1407bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
078086f3 1408 u32 tx_id)
8b230ed8
RM
1409{
1410 int i, err = 0;
1411
1412 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1413 if (res_info[i].res_type == BNA_RES_T_MEM)
1414 err = bnad_mem_alloc(bnad,
1415 &res_info[i].res_u.mem_info);
1416 else if (res_info[i].res_type == BNA_RES_T_INTR)
1417 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1418 &res_info[i].res_u.intr_info);
1419 if (err)
1420 goto err_return;
1421 }
1422 return 0;
1423
1424err_return:
1425 bnad_tx_res_free(bnad, res_info);
1426 return err;
1427}
1428
1429/* Free Rx object Resources */
1430static void
1431bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1432{
1433 int i;
1434
1435 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1436 if (res_info[i].res_type == BNA_RES_T_MEM)
1437 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1438 else if (res_info[i].res_type == BNA_RES_T_INTR)
1439 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1440 }
1441}
1442
1443/* Allocates memory and interrupt resources for Rx object */
1444static int
1445bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1446 uint rx_id)
1447{
1448 int i, err = 0;
1449
1450 /* All memory needs to be allocated before setup_ccbs */
1451 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1452 if (res_info[i].res_type == BNA_RES_T_MEM)
1453 err = bnad_mem_alloc(bnad,
1454 &res_info[i].res_u.mem_info);
1455 else if (res_info[i].res_type == BNA_RES_T_INTR)
1456 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1457 &res_info[i].res_u.intr_info);
1458 if (err)
1459 goto err_return;
1460 }
1461 return 0;
1462
1463err_return:
1464 bnad_rx_res_free(bnad, res_info);
1465 return err;
1466}
1467
1468/* Timer callbacks */
1469/* a) IOC timer */
1470static void
1471bnad_ioc_timeout(unsigned long data)
1472{
1473 struct bnad *bnad = (struct bnad *)data;
1474 unsigned long flags;
1475
1476 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1477 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1478 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1479}
1480
1481static void
1482bnad_ioc_hb_check(unsigned long data)
1483{
1484 struct bnad *bnad = (struct bnad *)data;
1485 unsigned long flags;
1486
1487 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1488 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1489 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1490}
1491
1492static void
1d32f769 1493bnad_iocpf_timeout(unsigned long data)
8b230ed8
RM
1494{
1495 struct bnad *bnad = (struct bnad *)data;
1496 unsigned long flags;
1497
1498 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1499 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1d32f769
RM
1500 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1501}
1502
1503static void
1504bnad_iocpf_sem_timeout(unsigned long data)
1505{
1506 struct bnad *bnad = (struct bnad *)data;
1507 unsigned long flags;
1508
1509 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1510 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1511 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1512}
1513
1514/*
1515 * All timer routines use bnad->bna_lock to protect against
1516 * the following race, which may occur in case of no locking:
0120b99c 1517 * Time CPU m CPU n
8b230ed8
RM
1518 * 0 1 = test_bit
1519 * 1 clear_bit
1520 * 2 del_timer_sync
1521 * 3 mod_timer
1522 */
1523
1524/* b) Dynamic Interrupt Moderation Timer */
1525static void
1526bnad_dim_timeout(unsigned long data)
1527{
1528 struct bnad *bnad = (struct bnad *)data;
1529 struct bnad_rx_info *rx_info;
1530 struct bnad_rx_ctrl *rx_ctrl;
1531 int i, j;
1532 unsigned long flags;
1533
1534 if (!netif_carrier_ok(bnad->netdev))
1535 return;
1536
1537 spin_lock_irqsave(&bnad->bna_lock, flags);
1538 for (i = 0; i < bnad->num_rx; i++) {
1539 rx_info = &bnad->rx_info[i];
1540 if (!rx_info->rx)
1541 continue;
1542 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1543 rx_ctrl = &rx_info->rx_ctrl[j];
1544 if (!rx_ctrl->ccb)
1545 continue;
1546 bna_rx_dim_update(rx_ctrl->ccb);
1547 }
1548 }
1549
1550 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1551 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1552 mod_timer(&bnad->dim_timer,
1553 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1554 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1555}
1556
1557/* c) Statistics Timer */
1558static void
1559bnad_stats_timeout(unsigned long data)
1560{
1561 struct bnad *bnad = (struct bnad *)data;
1562 unsigned long flags;
1563
1564 if (!netif_running(bnad->netdev) ||
1565 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1566 return;
1567
1568 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1569 bna_hw_stats_get(&bnad->bna);
8b230ed8
RM
1570 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1571}
1572
1573/*
1574 * Set up timer for DIM
1575 * Called with bnad->bna_lock held
1576 */
1577void
1578bnad_dim_timer_start(struct bnad *bnad)
1579{
1580 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1581 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1582 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1583 (unsigned long)bnad);
1584 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1585 mod_timer(&bnad->dim_timer,
1586 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1587 }
1588}
1589
1590/*
1591 * Set up timer for statistics
1592 * Called with mutex_lock(&bnad->conf_mutex) held
1593 */
1594static void
1595bnad_stats_timer_start(struct bnad *bnad)
1596{
1597 unsigned long flags;
1598
1599 spin_lock_irqsave(&bnad->bna_lock, flags);
1600 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1601 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1602 (unsigned long)bnad);
1603 mod_timer(&bnad->stats_timer,
1604 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1605 }
1606 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1607}
1608
1609/*
1610 * Stops the stats timer
1611 * Called with mutex_lock(&bnad->conf_mutex) held
1612 */
1613static void
1614bnad_stats_timer_stop(struct bnad *bnad)
1615{
1616 int to_del = 0;
1617 unsigned long flags;
1618
1619 spin_lock_irqsave(&bnad->bna_lock, flags);
1620 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1621 to_del = 1;
1622 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1623 if (to_del)
1624 del_timer_sync(&bnad->stats_timer);
1625}
1626
1627/* Utilities */
1628
1629static void
1630bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1631{
1632 int i = 1; /* Index 0 has broadcast address */
1633 struct netdev_hw_addr *mc_addr;
1634
1635 netdev_for_each_mc_addr(mc_addr, netdev) {
1636 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1637 ETH_ALEN);
1638 i++;
1639 }
1640}
1641
1642static int
1643bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1644{
1645 struct bnad_rx_ctrl *rx_ctrl =
1646 container_of(napi, struct bnad_rx_ctrl, napi);
2be67144 1647 struct bnad *bnad = rx_ctrl->bnad;
8b230ed8
RM
1648 int rcvd = 0;
1649
271e8b79 1650 rx_ctrl->rx_poll_ctr++;
8b230ed8
RM
1651
1652 if (!netif_carrier_ok(bnad->netdev))
1653 goto poll_exit;
1654
2be67144 1655 rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
271e8b79 1656 if (rcvd >= budget)
8b230ed8
RM
1657 return rcvd;
1658
1659poll_exit:
19dbff9f 1660 napi_complete(napi);
8b230ed8 1661
271e8b79 1662 rx_ctrl->rx_complete++;
2be67144
RM
1663
1664 if (rx_ctrl->ccb)
271e8b79
RM
1665 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1666
8b230ed8
RM
1667 return rcvd;
1668}
1669
2be67144 1670#define BNAD_NAPI_POLL_QUOTA 64
8b230ed8 1671static void
2be67144 1672bnad_napi_init(struct bnad *bnad, u32 rx_id)
8b230ed8 1673{
8b230ed8
RM
1674 struct bnad_rx_ctrl *rx_ctrl;
1675 int i;
8b230ed8
RM
1676
1677 /* Initialize & enable NAPI */
1678 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1679 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1680 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
2be67144
RM
1681 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1682 }
1683}
1684
1685static void
1686bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1687{
1688 struct bnad_rx_ctrl *rx_ctrl;
1689 int i;
1690
1691 /* Initialize & enable NAPI */
1692 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1693 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
be7fa326 1694
8b230ed8
RM
1695 napi_enable(&rx_ctrl->napi);
1696 }
1697}
1698
1699static void
1700bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1701{
1702 int i;
1703
1704 /* First disable and then clean up */
1705 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1706 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1707 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1708 }
1709}
1710
1711/* Should be held with conf_lock held */
1712void
078086f3 1713bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
8b230ed8
RM
1714{
1715 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1716 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1717 unsigned long flags;
1718
1719 if (!tx_info->tx)
1720 return;
1721
1722 init_completion(&bnad->bnad_completions.tx_comp);
1723 spin_lock_irqsave(&bnad->bna_lock, flags);
1724 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1725 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1726 wait_for_completion(&bnad->bnad_completions.tx_comp);
1727
1728 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1729 bnad_tx_msix_unregister(bnad, tx_info,
1730 bnad->num_txq_per_tx);
1731
2be67144
RM
1732 if (0 == tx_id)
1733 tasklet_kill(&bnad->tx_free_tasklet);
1734
8b230ed8
RM
1735 spin_lock_irqsave(&bnad->bna_lock, flags);
1736 bna_tx_destroy(tx_info->tx);
1737 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1738
1739 tx_info->tx = NULL;
078086f3 1740 tx_info->tx_id = 0;
8b230ed8 1741
8b230ed8
RM
1742 bnad_tx_res_free(bnad, res_info);
1743}
1744
1745/* Should be held with conf_lock held */
1746int
078086f3 1747bnad_setup_tx(struct bnad *bnad, u32 tx_id)
8b230ed8
RM
1748{
1749 int err;
1750 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1751 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1752 struct bna_intr_info *intr_info =
1753 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1754 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
d91d25d5 1755 static const struct bna_tx_event_cbfn tx_cbfn = {
1756 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1757 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1758 .tx_stall_cbfn = bnad_cb_tx_stall,
1759 .tx_resume_cbfn = bnad_cb_tx_resume,
1760 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1761 };
1762
8b230ed8
RM
1763 struct bna_tx *tx;
1764 unsigned long flags;
1765
078086f3
RM
1766 tx_info->tx_id = tx_id;
1767
8b230ed8
RM
1768 /* Initialize the Tx object configuration */
1769 tx_config->num_txq = bnad->num_txq_per_tx;
1770 tx_config->txq_depth = bnad->txq_depth;
1771 tx_config->tx_type = BNA_TX_T_REGULAR;
078086f3 1772 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
8b230ed8 1773
8b230ed8
RM
1774 /* Get BNA's resource requirement for one tx object */
1775 spin_lock_irqsave(&bnad->bna_lock, flags);
1776 bna_tx_res_req(bnad->num_txq_per_tx,
1777 bnad->txq_depth, res_info);
1778 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1779
1780 /* Fill Unmap Q memory requirements */
1781 BNAD_FILL_UNMAPQ_MEM_REQ(
1782 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1783 bnad->num_txq_per_tx,
1784 BNAD_TX_UNMAPQ_DEPTH);
1785
1786 /* Allocate resources */
1787 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1788 if (err)
1789 return err;
1790
1791 /* Ask BNA to create one Tx object, supplying required resources */
1792 spin_lock_irqsave(&bnad->bna_lock, flags);
1793 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1794 tx_info);
1795 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1796 if (!tx)
1797 goto err_return;
1798 tx_info->tx = tx;
1799
1800 /* Register ISR for the Tx object */
1801 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1802 err = bnad_tx_msix_register(bnad, tx_info,
1803 tx_id, bnad->num_txq_per_tx);
1804 if (err)
1805 goto err_return;
1806 }
1807
1808 spin_lock_irqsave(&bnad->bna_lock, flags);
1809 bna_tx_enable(tx);
1810 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1811
1812 return 0;
1813
1814err_return:
1815 bnad_tx_res_free(bnad, res_info);
1816 return err;
1817}
1818
1819/* Setup the rx config for bna_rx_create */
1820/* bnad decides the configuration */
1821static void
1822bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1823{
1824 rx_config->rx_type = BNA_RX_T_REGULAR;
1825 rx_config->num_paths = bnad->num_rxp_per_rx;
078086f3 1826 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
8b230ed8
RM
1827
1828 if (bnad->num_rxp_per_rx > 1) {
1829 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1830 rx_config->rss_config.hash_type =
078086f3
RM
1831 (BFI_ENET_RSS_IPV6 |
1832 BFI_ENET_RSS_IPV6_TCP |
1833 BFI_ENET_RSS_IPV4 |
1834 BFI_ENET_RSS_IPV4_TCP);
8b230ed8
RM
1835 rx_config->rss_config.hash_mask =
1836 bnad->num_rxp_per_rx - 1;
1837 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1838 sizeof(rx_config->rss_config.toeplitz_hash_key));
1839 } else {
1840 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1841 memset(&rx_config->rss_config, 0,
1842 sizeof(rx_config->rss_config));
1843 }
1844 rx_config->rxp_type = BNA_RXP_SLR;
1845 rx_config->q_depth = bnad->rxq_depth;
1846
1847 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1848
1849 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1850}
1851
2be67144
RM
1852static void
1853bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1854{
1855 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1856 int i;
1857
1858 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1859 rx_info->rx_ctrl[i].bnad = bnad;
1860}
1861
8b230ed8
RM
1862/* Called with mutex_lock(&bnad->conf_mutex) held */
1863void
078086f3 1864bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
1865{
1866 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1867 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1868 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1869 unsigned long flags;
271e8b79 1870 int to_del = 0;
8b230ed8
RM
1871
1872 if (!rx_info->rx)
1873 return;
1874
1875 if (0 == rx_id) {
1876 spin_lock_irqsave(&bnad->bna_lock, flags);
271e8b79
RM
1877 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1878 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
8b230ed8 1879 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
271e8b79
RM
1880 to_del = 1;
1881 }
8b230ed8 1882 spin_unlock_irqrestore(&bnad->bna_lock, flags);
271e8b79 1883 if (to_del)
8b230ed8
RM
1884 del_timer_sync(&bnad->dim_timer);
1885 }
1886
8b230ed8
RM
1887 init_completion(&bnad->bnad_completions.rx_comp);
1888 spin_lock_irqsave(&bnad->bna_lock, flags);
1889 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1890 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1891 wait_for_completion(&bnad->bnad_completions.rx_comp);
1892
1893 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1894 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1895
2be67144
RM
1896 bnad_napi_disable(bnad, rx_id);
1897
8b230ed8
RM
1898 spin_lock_irqsave(&bnad->bna_lock, flags);
1899 bna_rx_destroy(rx_info->rx);
8b230ed8
RM
1900
1901 rx_info->rx = NULL;
3caa1e95 1902 rx_info->rx_id = 0;
b9fa1fbf 1903 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1904
1905 bnad_rx_res_free(bnad, res_info);
1906}
1907
1908/* Called with mutex_lock(&bnad->conf_mutex) held */
1909int
078086f3 1910bnad_setup_rx(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
1911{
1912 int err;
1913 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1914 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1915 struct bna_intr_info *intr_info =
1916 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1917 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
d91d25d5 1918 static const struct bna_rx_event_cbfn rx_cbfn = {
1919 .rcb_setup_cbfn = bnad_cb_rcb_setup,
1920 .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
1921 .ccb_setup_cbfn = bnad_cb_ccb_setup,
1922 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
5bcf6ac0 1923 .rx_stall_cbfn = bnad_cb_rx_stall,
d91d25d5 1924 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
1925 .rx_post_cbfn = bnad_cb_rx_post,
1926 };
8b230ed8
RM
1927 struct bna_rx *rx;
1928 unsigned long flags;
1929
078086f3
RM
1930 rx_info->rx_id = rx_id;
1931
8b230ed8
RM
1932 /* Initialize the Rx object configuration */
1933 bnad_init_rx_config(bnad, rx_config);
1934
8b230ed8
RM
1935 /* Get BNA's resource requirement for one Rx object */
1936 spin_lock_irqsave(&bnad->bna_lock, flags);
1937 bna_rx_res_req(rx_config, res_info);
1938 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1939
1940 /* Fill Unmap Q memory requirements */
1941 BNAD_FILL_UNMAPQ_MEM_REQ(
1942 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1943 rx_config->num_paths +
1944 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1945 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1946
1947 /* Allocate resource */
1948 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1949 if (err)
1950 return err;
1951
2be67144
RM
1952 bnad_rx_ctrl_init(bnad, rx_id);
1953
8b230ed8
RM
1954 /* Ask BNA to create one Rx object, supplying required resources */
1955 spin_lock_irqsave(&bnad->bna_lock, flags);
1956 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1957 rx_info);
3caa1e95
RM
1958 if (!rx) {
1959 err = -ENOMEM;
b9fa1fbf 1960 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 1961 goto err_return;
3caa1e95 1962 }
8b230ed8 1963 rx_info->rx = rx;
b9fa1fbf 1964 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 1965
2be67144
RM
1966 /*
1967 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1968 * so that IRQ handler cannot schedule NAPI at this point.
1969 */
1970 bnad_napi_init(bnad, rx_id);
1971
8b230ed8
RM
1972 /* Register ISR for the Rx object */
1973 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1974 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1975 rx_config->num_paths);
1976 if (err)
1977 goto err_return;
1978 }
1979
8b230ed8
RM
1980 spin_lock_irqsave(&bnad->bna_lock, flags);
1981 if (0 == rx_id) {
1982 /* Set up Dynamic Interrupt Moderation Vector */
1983 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1984 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1985
1986 /* Enable VLAN filtering only on the default Rx */
1987 bna_rx_vlanfilter_enable(rx);
1988
1989 /* Start the DIM timer */
1990 bnad_dim_timer_start(bnad);
1991 }
1992
1993 bna_rx_enable(rx);
1994 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1995
2be67144
RM
1996 /* Enable scheduling of NAPI */
1997 bnad_napi_enable(bnad, rx_id);
1998
8b230ed8
RM
1999 return 0;
2000
2001err_return:
2002 bnad_cleanup_rx(bnad, rx_id);
2003 return err;
2004}
2005
2006/* Called with conf_lock & bnad->bna_lock held */
2007void
2008bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2009{
2010 struct bnad_tx_info *tx_info;
2011
2012 tx_info = &bnad->tx_info[0];
2013 if (!tx_info->tx)
2014 return;
2015
2016 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2017}
2018
2019/* Called with conf_lock & bnad->bna_lock held */
2020void
2021bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2022{
2023 struct bnad_rx_info *rx_info;
0120b99c 2024 int i;
8b230ed8
RM
2025
2026 for (i = 0; i < bnad->num_rx; i++) {
2027 rx_info = &bnad->rx_info[i];
2028 if (!rx_info->rx)
2029 continue;
2030 bna_rx_coalescing_timeo_set(rx_info->rx,
2031 bnad->rx_coalescing_timeo);
2032 }
2033}
2034
2035/*
2036 * Called with bnad->bna_lock held
2037 */
a2122d95 2038int
8b230ed8
RM
2039bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2040{
2041 int ret;
2042
2043 if (!is_valid_ether_addr(mac_addr))
2044 return -EADDRNOTAVAIL;
2045
2046 /* If datapath is down, pretend everything went through */
2047 if (!bnad->rx_info[0].rx)
2048 return 0;
2049
2050 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2051 if (ret != BNA_CB_SUCCESS)
2052 return -EADDRNOTAVAIL;
2053
2054 return 0;
2055}
2056
2057/* Should be called with conf_lock held */
a2122d95 2058int
8b230ed8
RM
2059bnad_enable_default_bcast(struct bnad *bnad)
2060{
2061 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2062 int ret;
2063 unsigned long flags;
2064
2065 init_completion(&bnad->bnad_completions.mcast_comp);
2066
2067 spin_lock_irqsave(&bnad->bna_lock, flags);
2068 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2069 bnad_cb_rx_mcast_add);
2070 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2071
2072 if (ret == BNA_CB_SUCCESS)
2073 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2074 else
2075 return -ENODEV;
2076
2077 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2078 return -ENODEV;
2079
2080 return 0;
2081}
2082
19dbff9f 2083/* Called with mutex_lock(&bnad->conf_mutex) held */
a2122d95 2084void
aad75b66
RM
2085bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2086{
f859d7cb 2087 u16 vid;
aad75b66
RM
2088 unsigned long flags;
2089
f859d7cb 2090 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
aad75b66 2091 spin_lock_irqsave(&bnad->bna_lock, flags);
f859d7cb 2092 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
aad75b66
RM
2093 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2094 }
2095}
2096
8b230ed8
RM
2097/* Statistics utilities */
2098void
250e061e 2099bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 2100{
8b230ed8
RM
2101 int i, j;
2102
2103 for (i = 0; i < bnad->num_rx; i++) {
2104 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2105 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
250e061e 2106 stats->rx_packets += bnad->rx_info[i].
8b230ed8 2107 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
250e061e 2108 stats->rx_bytes += bnad->rx_info[i].
8b230ed8
RM
2109 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2110 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2111 bnad->rx_info[i].rx_ctrl[j].ccb->
2112 rcb[1]->rxq) {
250e061e 2113 stats->rx_packets +=
8b230ed8
RM
2114 bnad->rx_info[i].rx_ctrl[j].
2115 ccb->rcb[1]->rxq->rx_packets;
250e061e 2116 stats->rx_bytes +=
8b230ed8
RM
2117 bnad->rx_info[i].rx_ctrl[j].
2118 ccb->rcb[1]->rxq->rx_bytes;
2119 }
2120 }
2121 }
2122 }
2123 for (i = 0; i < bnad->num_tx; i++) {
2124 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2125 if (bnad->tx_info[i].tcb[j]) {
250e061e 2126 stats->tx_packets +=
8b230ed8 2127 bnad->tx_info[i].tcb[j]->txq->tx_packets;
250e061e 2128 stats->tx_bytes +=
8b230ed8
RM
2129 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2130 }
2131 }
2132 }
2133}
2134
2135/*
2136 * Must be called with the bna_lock held.
2137 */
2138void
250e061e 2139bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 2140{
078086f3
RM
2141 struct bfi_enet_stats_mac *mac_stats;
2142 u32 bmap;
8b230ed8
RM
2143 int i;
2144
078086f3 2145 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
250e061e 2146 stats->rx_errors =
8b230ed8
RM
2147 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2148 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2149 mac_stats->rx_undersize;
250e061e 2150 stats->tx_errors = mac_stats->tx_fcs_error +
8b230ed8 2151 mac_stats->tx_undersize;
250e061e
ED
2152 stats->rx_dropped = mac_stats->rx_drop;
2153 stats->tx_dropped = mac_stats->tx_drop;
2154 stats->multicast = mac_stats->rx_multicast;
2155 stats->collisions = mac_stats->tx_total_collision;
8b230ed8 2156
250e061e 2157 stats->rx_length_errors = mac_stats->rx_frame_length_error;
8b230ed8
RM
2158
2159 /* receive ring buffer overflow ?? */
2160
250e061e
ED
2161 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2162 stats->rx_frame_errors = mac_stats->rx_alignment_error;
8b230ed8 2163 /* recv'r fifo overrun */
078086f3
RM
2164 bmap = bna_rx_rid_mask(&bnad->bna);
2165 for (i = 0; bmap; i++) {
8b230ed8 2166 if (bmap & 1) {
250e061e 2167 stats->rx_fifo_errors +=
8b230ed8 2168 bnad->stats.bna_stats->
078086f3 2169 hw_stats.rxf_stats[i].frame_drops;
8b230ed8
RM
2170 break;
2171 }
2172 bmap >>= 1;
2173 }
2174}
2175
2176static void
2177bnad_mbox_irq_sync(struct bnad *bnad)
2178{
2179 u32 irq;
2180 unsigned long flags;
2181
2182 spin_lock_irqsave(&bnad->bna_lock, flags);
2183 if (bnad->cfg_flags & BNAD_CF_MSIX)
8811e267 2184 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
8b230ed8
RM
2185 else
2186 irq = bnad->pcidev->irq;
2187 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2188
2189 synchronize_irq(irq);
2190}
2191
2192/* Utility used by bnad_start_xmit, for doing TSO */
2193static int
2194bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2195{
2196 int err;
2197
8b230ed8
RM
2198 if (skb_header_cloned(skb)) {
2199 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2200 if (err) {
2201 BNAD_UPDATE_CTR(bnad, tso_err);
2202 return err;
2203 }
2204 }
2205
2206 /*
2207 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2208 * excluding the length field.
2209 */
2210 if (skb->protocol == htons(ETH_P_IP)) {
2211 struct iphdr *iph = ip_hdr(skb);
2212
2213 /* Do we really need these? */
2214 iph->tot_len = 0;
2215 iph->check = 0;
2216
2217 tcp_hdr(skb)->check =
2218 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2219 IPPROTO_TCP, 0);
2220 BNAD_UPDATE_CTR(bnad, tso4);
2221 } else {
2222 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2223
8b230ed8
RM
2224 ipv6h->payload_len = 0;
2225 tcp_hdr(skb)->check =
2226 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2227 IPPROTO_TCP, 0);
2228 BNAD_UPDATE_CTR(bnad, tso6);
2229 }
2230
2231 return 0;
2232}
2233
2234/*
2235 * Initialize Q numbers depending on Rx Paths
2236 * Called with bnad->bna_lock held, because of cfg_flags
2237 * access.
2238 */
2239static void
2240bnad_q_num_init(struct bnad *bnad)
2241{
2242 int rxps;
2243
2244 rxps = min((uint)num_online_cpus(),
772b5235 2245 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
8b230ed8
RM
2246
2247 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2248 rxps = 1; /* INTx */
2249
2250 bnad->num_rx = 1;
2251 bnad->num_tx = 1;
2252 bnad->num_rxp_per_rx = rxps;
2253 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2254}
2255
2256/*
2257 * Adjusts the Q numbers, given a number of msix vectors
2258 * Give preference to RSS as opposed to Tx priority Queues,
2259 * in such a case, just use 1 Tx Q
2260 * Called with bnad->bna_lock held b'cos of cfg_flags access
2261 */
2262static void
078086f3 2263bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
8b230ed8
RM
2264{
2265 bnad->num_txq_per_tx = 1;
2266 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2267 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2268 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2269 bnad->num_rxp_per_rx = msix_vectors -
2270 (bnad->num_tx * bnad->num_txq_per_tx) -
2271 BNAD_MAILBOX_MSIX_VECTORS;
2272 } else
2273 bnad->num_rxp_per_rx = 1;
2274}
2275
078086f3
RM
2276/* Enable / disable ioceth */
2277static int
2278bnad_ioceth_disable(struct bnad *bnad)
8b230ed8
RM
2279{
2280 unsigned long flags;
078086f3 2281 int err = 0;
8b230ed8
RM
2282
2283 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2284 init_completion(&bnad->bnad_completions.ioc_comp);
2285 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
8b230ed8
RM
2286 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2287
078086f3
RM
2288 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2289 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2290
2291 err = bnad->bnad_completions.ioc_comp_status;
2292 return err;
8b230ed8
RM
2293}
2294
2295static int
078086f3 2296bnad_ioceth_enable(struct bnad *bnad)
8b230ed8
RM
2297{
2298 int err = 0;
2299 unsigned long flags;
2300
8b230ed8 2301 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2302 init_completion(&bnad->bnad_completions.ioc_comp);
2303 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2304 bna_ioceth_enable(&bnad->bna.ioceth);
8b230ed8
RM
2305 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2306
078086f3
RM
2307 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2308 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
8b230ed8 2309
078086f3 2310 err = bnad->bnad_completions.ioc_comp_status;
8b230ed8
RM
2311
2312 return err;
2313}
2314
2315/* Free BNA resources */
2316static void
078086f3
RM
2317bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2318 u32 res_val_max)
8b230ed8
RM
2319{
2320 int i;
8b230ed8 2321
078086f3
RM
2322 for (i = 0; i < res_val_max; i++)
2323 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
8b230ed8
RM
2324}
2325
2326/* Allocates memory and interrupt resources for BNA */
2327static int
078086f3
RM
2328bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2329 u32 res_val_max)
8b230ed8
RM
2330{
2331 int i, err;
8b230ed8 2332
078086f3
RM
2333 for (i = 0; i < res_val_max; i++) {
2334 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
8b230ed8
RM
2335 if (err)
2336 goto err_return;
2337 }
2338 return 0;
2339
2340err_return:
078086f3 2341 bnad_res_free(bnad, res_info, res_val_max);
8b230ed8
RM
2342 return err;
2343}
2344
2345/* Interrupt enable / disable */
2346static void
2347bnad_enable_msix(struct bnad *bnad)
2348{
2349 int i, ret;
8b230ed8
RM
2350 unsigned long flags;
2351
2352 spin_lock_irqsave(&bnad->bna_lock, flags);
2353 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2354 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2355 return;
2356 }
2357 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2358
2359 if (bnad->msix_table)
2360 return;
2361
8b230ed8 2362 bnad->msix_table =
b7ee31c5 2363 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
8b230ed8
RM
2364
2365 if (!bnad->msix_table)
2366 goto intx_mode;
2367
b7ee31c5 2368 for (i = 0; i < bnad->msix_num; i++)
8b230ed8
RM
2369 bnad->msix_table[i].entry = i;
2370
b7ee31c5 2371 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
8b230ed8
RM
2372 if (ret > 0) {
2373 /* Not enough MSI-X vectors. */
19dbff9f
RM
2374 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2375 ret, bnad->msix_num);
8b230ed8
RM
2376
2377 spin_lock_irqsave(&bnad->bna_lock, flags);
2378 /* ret = #of vectors that we got */
271e8b79
RM
2379 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2380 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
8b230ed8
RM
2381 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2382
271e8b79 2383 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
8b230ed8 2384 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8 2385
078086f3
RM
2386 if (bnad->msix_num > ret)
2387 goto intx_mode;
2388
8b230ed8
RM
2389 /* Try once more with adjusted numbers */
2390 /* If this fails, fall back to INTx */
2391 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
b7ee31c5 2392 bnad->msix_num);
8b230ed8
RM
2393 if (ret)
2394 goto intx_mode;
2395
2396 } else if (ret < 0)
2397 goto intx_mode;
078086f3
RM
2398
2399 pci_intx(bnad->pcidev, 0);
2400
8b230ed8
RM
2401 return;
2402
2403intx_mode:
19dbff9f 2404 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
8b230ed8
RM
2405
2406 kfree(bnad->msix_table);
2407 bnad->msix_table = NULL;
2408 bnad->msix_num = 0;
8b230ed8
RM
2409 spin_lock_irqsave(&bnad->bna_lock, flags);
2410 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2411 bnad_q_num_init(bnad);
2412 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2413}
2414
2415static void
2416bnad_disable_msix(struct bnad *bnad)
2417{
2418 u32 cfg_flags;
2419 unsigned long flags;
2420
2421 spin_lock_irqsave(&bnad->bna_lock, flags);
2422 cfg_flags = bnad->cfg_flags;
2423 if (bnad->cfg_flags & BNAD_CF_MSIX)
2424 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2425 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2426
2427 if (cfg_flags & BNAD_CF_MSIX) {
2428 pci_disable_msix(bnad->pcidev);
2429 kfree(bnad->msix_table);
2430 bnad->msix_table = NULL;
2431 }
2432}
2433
2434/* Netdev entry points */
2435static int
2436bnad_open(struct net_device *netdev)
2437{
2438 int err;
2439 struct bnad *bnad = netdev_priv(netdev);
2440 struct bna_pause_config pause_config;
2441 int mtu;
2442 unsigned long flags;
2443
2444 mutex_lock(&bnad->conf_mutex);
2445
2446 /* Tx */
2447 err = bnad_setup_tx(bnad, 0);
2448 if (err)
2449 goto err_return;
2450
2451 /* Rx */
2452 err = bnad_setup_rx(bnad, 0);
2453 if (err)
2454 goto cleanup_tx;
2455
2456 /* Port */
2457 pause_config.tx_pause = 0;
2458 pause_config.rx_pause = 0;
2459
078086f3 2460 mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
8b230ed8
RM
2461
2462 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2463 bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2464 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2465 bna_enet_enable(&bnad->bna.enet);
8b230ed8
RM
2466 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2467
2468 /* Enable broadcast */
2469 bnad_enable_default_bcast(bnad);
2470
aad75b66
RM
2471 /* Restore VLANs, if any */
2472 bnad_restore_vlans(bnad, 0);
2473
8b230ed8
RM
2474 /* Set the UCAST address */
2475 spin_lock_irqsave(&bnad->bna_lock, flags);
2476 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2477 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2478
2479 /* Start the stats timer */
2480 bnad_stats_timer_start(bnad);
2481
2482 mutex_unlock(&bnad->conf_mutex);
2483
2484 return 0;
2485
2486cleanup_tx:
2487 bnad_cleanup_tx(bnad, 0);
2488
2489err_return:
2490 mutex_unlock(&bnad->conf_mutex);
2491 return err;
2492}
2493
2494static int
2495bnad_stop(struct net_device *netdev)
2496{
2497 struct bnad *bnad = netdev_priv(netdev);
2498 unsigned long flags;
2499
2500 mutex_lock(&bnad->conf_mutex);
2501
2502 /* Stop the stats timer */
2503 bnad_stats_timer_stop(bnad);
2504
078086f3 2505 init_completion(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
2506
2507 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2508 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2509 bnad_cb_enet_disabled);
8b230ed8
RM
2510 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2511
078086f3 2512 wait_for_completion(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
2513
2514 bnad_cleanup_tx(bnad, 0);
2515 bnad_cleanup_rx(bnad, 0);
2516
2517 /* Synchronize mailbox IRQ */
2518 bnad_mbox_irq_sync(bnad);
2519
2520 mutex_unlock(&bnad->conf_mutex);
2521
2522 return 0;
2523}
2524
2525/* TX */
2526/*
2527 * bnad_start_xmit : Netdev entry point for Transmit
2528 * Called under lock held by net_device
2529 */
2530static netdev_tx_t
2531bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2532{
2533 struct bnad *bnad = netdev_priv(netdev);
078086f3
RM
2534 u32 txq_id = 0;
2535 struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
8b230ed8 2536
0120b99c
RM
2537 u16 txq_prod, vlan_tag = 0;
2538 u32 unmap_prod, wis, wis_used, wi_range;
2539 u32 vectors, vect_id, i, acked;
0120b99c 2540 int err;
271e8b79
RM
2541 unsigned int len;
2542 u32 gso_size;
8b230ed8 2543
078086f3 2544 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
0120b99c 2545 dma_addr_t dma_addr;
8b230ed8 2546 struct bna_txq_entry *txqent;
078086f3 2547 u16 flags;
8b230ed8 2548
271e8b79
RM
2549 if (unlikely(skb->len <= ETH_HLEN)) {
2550 dev_kfree_skb(skb);
2551 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2552 return NETDEV_TX_OK;
2553 }
2554 if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
8b230ed8 2555 dev_kfree_skb(skb);
271e8b79
RM
2556 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2557 return NETDEV_TX_OK;
2558 }
2559 if (unlikely(skb_headlen(skb) == 0)) {
2560 dev_kfree_skb(skb);
2561 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
8b230ed8
RM
2562 return NETDEV_TX_OK;
2563 }
2564
2565 /*
2566 * Takes care of the Tx that is scheduled between clearing the flag
19dbff9f 2567 * and the netif_tx_stop_all_queues() call.
8b230ed8 2568 */
be7fa326 2569 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
8b230ed8 2570 dev_kfree_skb(skb);
271e8b79 2571 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
8b230ed8
RM
2572 return NETDEV_TX_OK;
2573 }
2574
8b230ed8 2575 vectors = 1 + skb_shinfo(skb)->nr_frags;
271e8b79 2576 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
8b230ed8 2577 dev_kfree_skb(skb);
271e8b79 2578 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
8b230ed8
RM
2579 return NETDEV_TX_OK;
2580 }
2581 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2582 acked = 0;
078086f3
RM
2583 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2584 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
8b230ed8
RM
2585 if ((u16) (*tcb->hw_consumer_index) !=
2586 tcb->consumer_index &&
2587 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2588 acked = bnad_free_txbufs(bnad, tcb);
be7fa326
RM
2589 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2590 bna_ib_ack(tcb->i_dbell, acked);
8b230ed8
RM
2591 smp_mb__before_clear_bit();
2592 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2593 } else {
2594 netif_stop_queue(netdev);
2595 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2596 }
2597
2598 smp_mb();
2599 /*
2600 * Check again to deal with race condition between
2601 * netif_stop_queue here, and netif_wake_queue in
2602 * interrupt handler which is not inside netif tx lock.
2603 */
2604 if (likely
2605 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2606 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2607 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2608 return NETDEV_TX_BUSY;
2609 } else {
2610 netif_wake_queue(netdev);
2611 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2612 }
2613 }
2614
2615 unmap_prod = unmap_q->producer_index;
8b230ed8
RM
2616 flags = 0;
2617
2618 txq_prod = tcb->producer_index;
2619 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
8b230ed8
RM
2620 txqent->hdr.wi.reserved = 0;
2621 txqent->hdr.wi.num_vectors = vectors;
8b230ed8 2622
eab6d18d 2623 if (vlan_tx_tag_present(skb)) {
8b230ed8
RM
2624 vlan_tag = (u16) vlan_tx_tag_get(skb);
2625 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2626 }
2627 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2628 vlan_tag =
2629 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2630 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2631 }
2632
2633 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2634
2635 if (skb_is_gso(skb)) {
271e8b79
RM
2636 gso_size = skb_shinfo(skb)->gso_size;
2637
2638 if (unlikely(gso_size > netdev->mtu)) {
2639 dev_kfree_skb(skb);
2640 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2641 return NETDEV_TX_OK;
2642 }
2643 if (unlikely((gso_size + skb_transport_offset(skb) +
2644 tcp_hdrlen(skb)) >= skb->len)) {
2645 txqent->hdr.wi.opcode =
2646 __constant_htons(BNA_TXQ_WI_SEND);
2647 txqent->hdr.wi.lso_mss = 0;
2648 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2649 } else {
2650 txqent->hdr.wi.opcode =
2651 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2652 txqent->hdr.wi.lso_mss = htons(gso_size);
2653 }
2654
8b230ed8 2655 err = bnad_tso_prepare(bnad, skb);
271e8b79 2656 if (unlikely(err)) {
8b230ed8 2657 dev_kfree_skb(skb);
271e8b79 2658 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
8b230ed8
RM
2659 return NETDEV_TX_OK;
2660 }
8b230ed8
RM
2661 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2662 txqent->hdr.wi.l4_hdr_size_n_offset =
2663 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2664 (tcp_hdrlen(skb) >> 2,
2665 skb_transport_offset(skb)));
271e8b79
RM
2666 } else {
2667 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
8b230ed8
RM
2668 txqent->hdr.wi.lso_mss = 0;
2669
271e8b79
RM
2670 if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2671 dev_kfree_skb(skb);
2672 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2673 return NETDEV_TX_OK;
8b230ed8 2674 }
8b230ed8 2675
271e8b79
RM
2676 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2677 u8 proto = 0;
8b230ed8 2678
271e8b79
RM
2679 if (skb->protocol == __constant_htons(ETH_P_IP))
2680 proto = ip_hdr(skb)->protocol;
2681 else if (skb->protocol ==
2682 __constant_htons(ETH_P_IPV6)) {
2683 /* nexthdr may not be TCP immediately. */
2684 proto = ipv6_hdr(skb)->nexthdr;
2685 }
2686 if (proto == IPPROTO_TCP) {
2687 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2688 txqent->hdr.wi.l4_hdr_size_n_offset =
2689 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2690 (0, skb_transport_offset(skb)));
2691
2692 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2693
2694 if (unlikely(skb_headlen(skb) <
2695 skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2696 dev_kfree_skb(skb);
2697 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2698 return NETDEV_TX_OK;
2699 }
8b230ed8 2700
271e8b79
RM
2701 } else if (proto == IPPROTO_UDP) {
2702 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2703 txqent->hdr.wi.l4_hdr_size_n_offset =
2704 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2705 (0, skb_transport_offset(skb)));
2706
2707 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2708 if (unlikely(skb_headlen(skb) <
2709 skb_transport_offset(skb) +
2710 sizeof(struct udphdr))) {
2711 dev_kfree_skb(skb);
2712 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2713 return NETDEV_TX_OK;
2714 }
2715 } else {
8b230ed8 2716 dev_kfree_skb(skb);
271e8b79 2717 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
8b230ed8
RM
2718 return NETDEV_TX_OK;
2719 }
271e8b79
RM
2720 } else {
2721 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
8b230ed8 2722 }
8b230ed8
RM
2723 }
2724
2725 txqent->hdr.wi.flags = htons(flags);
2726
2727 txqent->hdr.wi.frame_length = htonl(skb->len);
2728
2729 unmap_q->unmap_array[unmap_prod].skb = skb;
271e8b79
RM
2730 len = skb_headlen(skb);
2731 txqent->vector[0].length = htons(len);
5ea74318
IV
2732 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2733 skb_headlen(skb), DMA_TO_DEVICE);
2734 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
8b230ed8
RM
2735 dma_addr);
2736
271e8b79 2737 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
8b230ed8
RM
2738 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2739
271e8b79
RM
2740 vect_id = 0;
2741 wis_used = 1;
2742
8b230ed8
RM
2743 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2744 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
078086f3 2745 u16 size = frag->size;
8b230ed8 2746
271e8b79
RM
2747 if (unlikely(size == 0)) {
2748 unmap_prod = unmap_q->producer_index;
2749
2750 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2751 unmap_q->unmap_array,
2752 unmap_prod, unmap_q->q_depth, skb,
2753 i);
2754 dev_kfree_skb(skb);
2755 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2756 return NETDEV_TX_OK;
2757 }
2758
2759 len += size;
2760
8b230ed8
RM
2761 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2762 vect_id = 0;
2763 if (--wi_range)
2764 txqent++;
2765 else {
2766 BNA_QE_INDX_ADD(txq_prod, wis_used,
2767 tcb->q_depth);
2768 wis_used = 0;
2769 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2770 txqent, wi_range);
8b230ed8
RM
2771 }
2772 wis_used++;
271e8b79
RM
2773 txqent->hdr.wi_ext.opcode =
2774 __constant_htons(BNA_TXQ_WI_EXTENSION);
8b230ed8
RM
2775 }
2776
2777 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2778 txqent->vector[vect_id].length = htons(size);
4d5b1a67
IC
2779 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2780 0, size, DMA_TO_DEVICE);
5ea74318 2781 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
8b230ed8
RM
2782 dma_addr);
2783 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2784 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2785 }
2786
271e8b79
RM
2787 if (unlikely(len != skb->len)) {
2788 unmap_prod = unmap_q->producer_index;
2789
2790 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2791 unmap_q->unmap_array, unmap_prod,
2792 unmap_q->q_depth, skb,
2793 skb_shinfo(skb)->nr_frags);
2794 dev_kfree_skb(skb);
2795 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2796 return NETDEV_TX_OK;
2797 }
2798
8b230ed8
RM
2799 unmap_q->producer_index = unmap_prod;
2800 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2801 tcb->producer_index = txq_prod;
2802
2803 smp_mb();
be7fa326
RM
2804
2805 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2806 return NETDEV_TX_OK;
2807
8b230ed8 2808 bna_txq_prod_indx_doorbell(tcb);
271e8b79 2809 smp_mb();
8b230ed8
RM
2810
2811 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2812 tasklet_schedule(&bnad->tx_free_tasklet);
2813
2814 return NETDEV_TX_OK;
2815}
2816
2817/*
2818 * Used spin_lock to synchronize reading of stats structures, which
2819 * is written by BNA under the same lock.
2820 */
250e061e
ED
2821static struct rtnl_link_stats64 *
2822bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
8b230ed8
RM
2823{
2824 struct bnad *bnad = netdev_priv(netdev);
2825 unsigned long flags;
2826
2827 spin_lock_irqsave(&bnad->bna_lock, flags);
2828
250e061e
ED
2829 bnad_netdev_qstats_fill(bnad, stats);
2830 bnad_netdev_hwstats_fill(bnad, stats);
8b230ed8
RM
2831
2832 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2833
250e061e 2834 return stats;
8b230ed8
RM
2835}
2836
a2122d95 2837void
8b230ed8
RM
2838bnad_set_rx_mode(struct net_device *netdev)
2839{
2840 struct bnad *bnad = netdev_priv(netdev);
2841 u32 new_mask, valid_mask;
2842 unsigned long flags;
2843
2844 spin_lock_irqsave(&bnad->bna_lock, flags);
2845
2846 new_mask = valid_mask = 0;
2847
2848 if (netdev->flags & IFF_PROMISC) {
2849 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2850 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2851 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2852 bnad->cfg_flags |= BNAD_CF_PROMISC;
2853 }
2854 } else {
2855 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2856 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2857 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2858 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2859 }
2860 }
2861
2862 if (netdev->flags & IFF_ALLMULTI) {
2863 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2864 new_mask |= BNA_RXMODE_ALLMULTI;
2865 valid_mask |= BNA_RXMODE_ALLMULTI;
2866 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2867 }
2868 } else {
2869 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2870 new_mask &= ~BNA_RXMODE_ALLMULTI;
2871 valid_mask |= BNA_RXMODE_ALLMULTI;
2872 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2873 }
2874 }
2875
271e8b79
RM
2876 if (bnad->rx_info[0].rx == NULL)
2877 goto unlock;
2878
8b230ed8
RM
2879 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2880
2881 if (!netdev_mc_empty(netdev)) {
2882 u8 *mcaddr_list;
2883 int mc_count = netdev_mc_count(netdev);
2884
2885 /* Index 0 holds the broadcast address */
2886 mcaddr_list =
2887 kzalloc((mc_count + 1) * ETH_ALEN,
2888 GFP_ATOMIC);
2889 if (!mcaddr_list)
ca1cef3a 2890 goto unlock;
8b230ed8
RM
2891
2892 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2893
2894 /* Copy rest of the MC addresses */
2895 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2896
2897 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2898 mcaddr_list, NULL);
2899
2900 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2901 kfree(mcaddr_list);
2902 }
ca1cef3a 2903unlock:
8b230ed8
RM
2904 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2905}
2906
2907/*
2908 * bna_lock is used to sync writes to netdev->addr
2909 * conf_lock cannot be used since this call may be made
2910 * in a non-blocking context.
2911 */
2912static int
2913bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2914{
2915 int err;
2916 struct bnad *bnad = netdev_priv(netdev);
2917 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2918 unsigned long flags;
2919
2920 spin_lock_irqsave(&bnad->bna_lock, flags);
2921
2922 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2923
2924 if (!err)
2925 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2926
2927 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2928
2929 return err;
2930}
2931
2932static int
078086f3 2933bnad_mtu_set(struct bnad *bnad, int mtu)
8b230ed8 2934{
8b230ed8
RM
2935 unsigned long flags;
2936
078086f3
RM
2937 init_completion(&bnad->bnad_completions.mtu_comp);
2938
2939 spin_lock_irqsave(&bnad->bna_lock, flags);
2940 bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2941 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2942
2943 wait_for_completion(&bnad->bnad_completions.mtu_comp);
2944
2945 return bnad->bnad_completions.mtu_comp_status;
2946}
2947
2948static int
2949bnad_change_mtu(struct net_device *netdev, int new_mtu)
2950{
2951 int err, mtu = netdev->mtu;
8b230ed8
RM
2952 struct bnad *bnad = netdev_priv(netdev);
2953
2954 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2955 return -EINVAL;
2956
2957 mutex_lock(&bnad->conf_mutex);
2958
2959 netdev->mtu = new_mtu;
2960
078086f3
RM
2961 mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2962 err = bnad_mtu_set(bnad, mtu);
2963 if (err)
2964 err = -EBUSY;
8b230ed8
RM
2965
2966 mutex_unlock(&bnad->conf_mutex);
2967 return err;
2968}
2969
8b230ed8
RM
2970static void
2971bnad_vlan_rx_add_vid(struct net_device *netdev,
2972 unsigned short vid)
2973{
2974 struct bnad *bnad = netdev_priv(netdev);
2975 unsigned long flags;
2976
2977 if (!bnad->rx_info[0].rx)
2978 return;
2979
2980 mutex_lock(&bnad->conf_mutex);
2981
2982 spin_lock_irqsave(&bnad->bna_lock, flags);
2983 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
f859d7cb 2984 set_bit(vid, bnad->active_vlans);
8b230ed8
RM
2985 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2986
2987 mutex_unlock(&bnad->conf_mutex);
2988}
2989
2990static void
2991bnad_vlan_rx_kill_vid(struct net_device *netdev,
2992 unsigned short vid)
2993{
2994 struct bnad *bnad = netdev_priv(netdev);
2995 unsigned long flags;
2996
2997 if (!bnad->rx_info[0].rx)
2998 return;
2999
3000 mutex_lock(&bnad->conf_mutex);
3001
3002 spin_lock_irqsave(&bnad->bna_lock, flags);
f859d7cb 3003 clear_bit(vid, bnad->active_vlans);
8b230ed8
RM
3004 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3005 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3006
3007 mutex_unlock(&bnad->conf_mutex);
3008}
3009
3010#ifdef CONFIG_NET_POLL_CONTROLLER
3011static void
3012bnad_netpoll(struct net_device *netdev)
3013{
3014 struct bnad *bnad = netdev_priv(netdev);
3015 struct bnad_rx_info *rx_info;
3016 struct bnad_rx_ctrl *rx_ctrl;
3017 u32 curr_mask;
3018 int i, j;
3019
3020 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3021 bna_intx_disable(&bnad->bna, curr_mask);
3022 bnad_isr(bnad->pcidev->irq, netdev);
3023 bna_intx_enable(&bnad->bna, curr_mask);
3024 } else {
19dbff9f
RM
3025 /*
3026 * Tx processing may happen in sending context, so no need
3027 * to explicitly process completions here
3028 */
3029
3030 /* Rx processing */
8b230ed8
RM
3031 for (i = 0; i < bnad->num_rx; i++) {
3032 rx_info = &bnad->rx_info[i];
3033 if (!rx_info->rx)
3034 continue;
3035 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3036 rx_ctrl = &rx_info->rx_ctrl[j];
271e8b79 3037 if (rx_ctrl->ccb)
8b230ed8
RM
3038 bnad_netif_rx_schedule_poll(bnad,
3039 rx_ctrl->ccb);
8b230ed8
RM
3040 }
3041 }
3042 }
3043}
3044#endif
3045
3046static const struct net_device_ops bnad_netdev_ops = {
3047 .ndo_open = bnad_open,
3048 .ndo_stop = bnad_stop,
3049 .ndo_start_xmit = bnad_start_xmit,
250e061e 3050 .ndo_get_stats64 = bnad_get_stats64,
8b230ed8 3051 .ndo_set_rx_mode = bnad_set_rx_mode,
8b230ed8
RM
3052 .ndo_validate_addr = eth_validate_addr,
3053 .ndo_set_mac_address = bnad_set_mac_address,
3054 .ndo_change_mtu = bnad_change_mtu,
8b230ed8
RM
3055 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3056 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3057#ifdef CONFIG_NET_POLL_CONTROLLER
3058 .ndo_poll_controller = bnad_netpoll
3059#endif
3060};
3061
3062static void
3063bnad_netdev_init(struct bnad *bnad, bool using_dac)
3064{
3065 struct net_device *netdev = bnad->netdev;
3066
e5ee20e7
MM
3067 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3068 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3069 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
8b230ed8 3070
e5ee20e7
MM
3071 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3072 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3073 NETIF_F_TSO | NETIF_F_TSO6;
8b230ed8 3074
e5ee20e7
MM
3075 netdev->features |= netdev->hw_features |
3076 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
8b230ed8
RM
3077
3078 if (using_dac)
3079 netdev->features |= NETIF_F_HIGHDMA;
3080
8b230ed8
RM
3081 netdev->mem_start = bnad->mmio_start;
3082 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3083
3084 netdev->netdev_ops = &bnad_netdev_ops;
3085 bnad_set_ethtool_ops(netdev);
3086}
3087
3088/*
3089 * 1. Initialize the bnad structure
3090 * 2. Setup netdev pointer in pci_dev
3091 * 3. Initialze Tx free tasklet
3092 * 4. Initialize no. of TxQ & CQs & MSIX vectors
3093 */
3094static int
3095bnad_init(struct bnad *bnad,
3096 struct pci_dev *pdev, struct net_device *netdev)
3097{
3098 unsigned long flags;
3099
3100 SET_NETDEV_DEV(netdev, &pdev->dev);
3101 pci_set_drvdata(pdev, netdev);
3102
3103 bnad->netdev = netdev;
3104 bnad->pcidev = pdev;
3105 bnad->mmio_start = pci_resource_start(pdev, 0);
3106 bnad->mmio_len = pci_resource_len(pdev, 0);
3107 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3108 if (!bnad->bar0) {
3109 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3110 pci_set_drvdata(pdev, NULL);
3111 return -ENOMEM;
3112 }
3113 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3114 (unsigned long long) bnad->mmio_len);
3115
3116 spin_lock_irqsave(&bnad->bna_lock, flags);
3117 if (!bnad_msix_disable)
3118 bnad->cfg_flags = BNAD_CF_MSIX;
3119
3120 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3121
3122 bnad_q_num_init(bnad);
3123 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3124
3125 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3126 (bnad->num_rx * bnad->num_rxp_per_rx) +
3127 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8
RM
3128
3129 bnad->txq_depth = BNAD_TXQ_DEPTH;
3130 bnad->rxq_depth = BNAD_RXQ_DEPTH;
8b230ed8
RM
3131
3132 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3133 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3134
3135 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
3136 (unsigned long)bnad);
3137
3138 return 0;
3139}
3140
3141/*
3142 * Must be called after bnad_pci_uninit()
3143 * so that iounmap() and pci_set_drvdata(NULL)
3144 * happens only after PCI uninitialization.
3145 */
3146static void
3147bnad_uninit(struct bnad *bnad)
3148{
3149 if (bnad->bar0)
3150 iounmap(bnad->bar0);
3151 pci_set_drvdata(bnad->pcidev, NULL);
3152}
3153
3154/*
3155 * Initialize locks
078086f3 3156 a) Per ioceth mutes used for serializing configuration
8b230ed8
RM
3157 changes from OS interface
3158 b) spin lock used to protect bna state machine
3159 */
3160static void
3161bnad_lock_init(struct bnad *bnad)
3162{
3163 spin_lock_init(&bnad->bna_lock);
3164 mutex_init(&bnad->conf_mutex);
3165}
3166
3167static void
3168bnad_lock_uninit(struct bnad *bnad)
3169{
3170 mutex_destroy(&bnad->conf_mutex);
3171}
3172
3173/* PCI Initialization */
3174static int
3175bnad_pci_init(struct bnad *bnad,
3176 struct pci_dev *pdev, bool *using_dac)
3177{
3178 int err;
3179
3180 err = pci_enable_device(pdev);
3181 if (err)
3182 return err;
3183 err = pci_request_regions(pdev, BNAD_NAME);
3184 if (err)
3185 goto disable_device;
5ea74318
IV
3186 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3187 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
8b230ed8
RM
3188 *using_dac = 1;
3189 } else {
5ea74318 3190 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
8b230ed8 3191 if (err) {
5ea74318
IV
3192 err = dma_set_coherent_mask(&pdev->dev,
3193 DMA_BIT_MASK(32));
8b230ed8
RM
3194 if (err)
3195 goto release_regions;
3196 }
3197 *using_dac = 0;
3198 }
3199 pci_set_master(pdev);
3200 return 0;
3201
3202release_regions:
3203 pci_release_regions(pdev);
3204disable_device:
3205 pci_disable_device(pdev);
3206
3207 return err;
3208}
3209
3210static void
3211bnad_pci_uninit(struct pci_dev *pdev)
3212{
3213 pci_release_regions(pdev);
3214 pci_disable_device(pdev);
3215}
3216
3217static int __devinit
3218bnad_pci_probe(struct pci_dev *pdev,
3219 const struct pci_device_id *pcidev_id)
3220{
3caa1e95 3221 bool using_dac;
0120b99c 3222 int err;
8b230ed8
RM
3223 struct bnad *bnad;
3224 struct bna *bna;
3225 struct net_device *netdev;
3226 struct bfa_pcidev pcidev_info;
3227 unsigned long flags;
3228
3229 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3230 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3231
3232 mutex_lock(&bnad_fwimg_mutex);
3233 if (!cna_get_firmware_buf(pdev)) {
3234 mutex_unlock(&bnad_fwimg_mutex);
3235 pr_warn("Failed to load Firmware Image!\n");
3236 return -ENODEV;
3237 }
3238 mutex_unlock(&bnad_fwimg_mutex);
3239
3240 /*
3241 * Allocates sizeof(struct net_device + struct bnad)
3242 * bnad = netdev->priv
3243 */
3244 netdev = alloc_etherdev(sizeof(struct bnad));
3245 if (!netdev) {
078086f3 3246 dev_err(&pdev->dev, "netdev allocation failed\n");
8b230ed8
RM
3247 err = -ENOMEM;
3248 return err;
3249 }
3250 bnad = netdev_priv(netdev);
3251
078086f3
RM
3252 bnad_lock_init(bnad);
3253
3254 mutex_lock(&bnad->conf_mutex);
8b230ed8
RM
3255 /*
3256 * PCI initialization
0120b99c 3257 * Output : using_dac = 1 for 64 bit DMA
be7fa326 3258 * = 0 for 32 bit DMA
8b230ed8
RM
3259 */
3260 err = bnad_pci_init(bnad, pdev, &using_dac);
3261 if (err)
44861f44 3262 goto unlock_mutex;
8b230ed8 3263
8b230ed8
RM
3264 /*
3265 * Initialize bnad structure
3266 * Setup relation between pci_dev & netdev
3267 * Init Tx free tasklet
3268 */
3269 err = bnad_init(bnad, pdev, netdev);
3270 if (err)
3271 goto pci_uninit;
078086f3 3272
8b230ed8
RM
3273 /* Initialize netdev structure, set up ethtool ops */
3274 bnad_netdev_init(bnad, using_dac);
3275
815f41e7
RM
3276 /* Set link to down state */
3277 netif_carrier_off(netdev);
3278
8b230ed8 3279 /* Get resource requirement form bna */
078086f3 3280 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 3281 bna_res_req(&bnad->res_info[0]);
078086f3 3282 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
3283
3284 /* Allocate resources from bna */
078086f3 3285 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
8b230ed8 3286 if (err)
078086f3 3287 goto drv_uninit;
8b230ed8
RM
3288
3289 bna = &bnad->bna;
3290
3291 /* Setup pcidev_info for bna_init() */
3292 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3293 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3294 pcidev_info.device_id = bnad->pcidev->device;
3295 pcidev_info.pci_bar_kva = bnad->bar0;
3296
8b230ed8
RM
3297 spin_lock_irqsave(&bnad->bna_lock, flags);
3298 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
8b230ed8
RM
3299 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3300
3301 bnad->stats.bna_stats = &bna->stats;
3302
078086f3
RM
3303 bnad_enable_msix(bnad);
3304 err = bnad_mbox_irq_alloc(bnad);
3305 if (err)
3306 goto res_free;
3307
3308
8b230ed8 3309 /* Set up timers */
078086f3 3310 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
8b230ed8 3311 ((unsigned long)bnad));
078086f3 3312 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
8b230ed8 3313 ((unsigned long)bnad));
078086f3 3314 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
1d32f769 3315 ((unsigned long)bnad));
078086f3 3316 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
8b230ed8
RM
3317 ((unsigned long)bnad));
3318
3319 /* Now start the timer before calling IOC */
078086f3 3320 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
8b230ed8
RM
3321 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3322
3323 /*
3324 * Start the chip
078086f3
RM
3325 * If the call back comes with error, we bail out.
3326 * This is a catastrophic error.
8b230ed8 3327 */
078086f3
RM
3328 err = bnad_ioceth_enable(bnad);
3329 if (err) {
3330 pr_err("BNA: Initialization failed err=%d\n",
3331 err);
3332 goto probe_success;
3333 }
3334
3335 spin_lock_irqsave(&bnad->bna_lock, flags);
3336 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3337 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3338 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3339 bna_attr(bna)->num_rxp - 1);
3340 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3341 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3342 err = -EIO;
3343 }
3caa1e95
RM
3344 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3345 if (err)
3346 goto disable_ioceth;
3347
3348 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
3349 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3350 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3351
3352 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
0caa9aae
RM
3353 if (err) {
3354 err = -EIO;
078086f3 3355 goto disable_ioceth;
0caa9aae 3356 }
078086f3
RM
3357
3358 spin_lock_irqsave(&bnad->bna_lock, flags);
3359 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3360 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
3361
3362 /* Get the burnt-in mac */
3363 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 3364 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
8b230ed8
RM
3365 bnad_set_netdev_perm_addr(bnad);
3366 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3367
0caa9aae
RM
3368 mutex_unlock(&bnad->conf_mutex);
3369
8b230ed8
RM
3370 /* Finally, reguister with net_device layer */
3371 err = register_netdev(netdev);
3372 if (err) {
3373 pr_err("BNA : Registering with netdev failed\n");
078086f3 3374 goto probe_uninit;
8b230ed8 3375 }
078086f3 3376 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
8b230ed8 3377
0caa9aae
RM
3378 return 0;
3379
078086f3
RM
3380probe_success:
3381 mutex_unlock(&bnad->conf_mutex);
8b230ed8
RM
3382 return 0;
3383
078086f3 3384probe_uninit:
3fc72370 3385 mutex_lock(&bnad->conf_mutex);
078086f3
RM
3386 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3387disable_ioceth:
3388 bnad_ioceth_disable(bnad);
3389 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3390 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3391 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
8b230ed8
RM
3392 spin_lock_irqsave(&bnad->bna_lock, flags);
3393 bna_uninit(bna);
3394 spin_unlock_irqrestore(&bnad->bna_lock, flags);
078086f3 3395 bnad_mbox_irq_free(bnad);
8b230ed8 3396 bnad_disable_msix(bnad);
078086f3
RM
3397res_free:
3398 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3399drv_uninit:
3400 bnad_uninit(bnad);
8b230ed8
RM
3401pci_uninit:
3402 bnad_pci_uninit(pdev);
44861f44 3403unlock_mutex:
078086f3 3404 mutex_unlock(&bnad->conf_mutex);
8b230ed8 3405 bnad_lock_uninit(bnad);
8b230ed8
RM
3406 free_netdev(netdev);
3407 return err;
3408}
3409
3410static void __devexit
3411bnad_pci_remove(struct pci_dev *pdev)
3412{
3413 struct net_device *netdev = pci_get_drvdata(pdev);
3414 struct bnad *bnad;
3415 struct bna *bna;
3416 unsigned long flags;
3417
3418 if (!netdev)
3419 return;
3420
3421 pr_info("%s bnad_pci_remove\n", netdev->name);
3422 bnad = netdev_priv(netdev);
3423 bna = &bnad->bna;
3424
078086f3
RM
3425 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3426 unregister_netdev(netdev);
8b230ed8
RM
3427
3428 mutex_lock(&bnad->conf_mutex);
078086f3
RM
3429 bnad_ioceth_disable(bnad);
3430 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3431 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3432 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
8b230ed8
RM
3433 spin_lock_irqsave(&bnad->bna_lock, flags);
3434 bna_uninit(bna);
3435 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 3436
078086f3
RM
3437 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3438 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3439 bnad_mbox_irq_free(bnad);
8b230ed8
RM
3440 bnad_disable_msix(bnad);
3441 bnad_pci_uninit(pdev);
078086f3 3442 mutex_unlock(&bnad->conf_mutex);
8b230ed8
RM
3443 bnad_lock_uninit(bnad);
3444 bnad_uninit(bnad);
3445 free_netdev(netdev);
3446}
3447
0120b99c 3448static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
8b230ed8
RM
3449 {
3450 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3451 PCI_DEVICE_ID_BROCADE_CT),
3452 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3453 .class_mask = 0xffff00
586b2816
RM
3454 },
3455 {
3456 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3457 BFA_PCI_DEVICE_ID_CT2),
3458 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3459 .class_mask = 0xffff00
3460 },
3461 {0, },
8b230ed8
RM
3462};
3463
3464MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3465
3466static struct pci_driver bnad_pci_driver = {
3467 .name = BNAD_NAME,
3468 .id_table = bnad_pci_id_table,
3469 .probe = bnad_pci_probe,
3470 .remove = __devexit_p(bnad_pci_remove),
3471};
3472
3473static int __init
3474bnad_module_init(void)
3475{
3476 int err;
3477
5aad0011
RM
3478 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3479 BNAD_VERSION);
8b230ed8 3480
8a891429 3481 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
8b230ed8
RM
3482
3483 err = pci_register_driver(&bnad_pci_driver);
3484 if (err < 0) {
3485 pr_err("bna : PCI registration failed in module init "
3486 "(%d)\n", err);
3487 return err;
3488 }
3489
3490 return 0;
3491}
3492
3493static void __exit
3494bnad_module_exit(void)
3495{
3496 pci_unregister_driver(&bnad_pci_driver);
3497
3498 if (bfi_fw)
3499 release_firmware(bfi_fw);
3500}
3501
3502module_init(bnad_module_init);
3503module_exit(bnad_module_exit);
3504
3505MODULE_AUTHOR("Brocade");
3506MODULE_LICENSE("GPL");
3507MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3508MODULE_VERSION(BNAD_VERSION);
3509MODULE_FIRMWARE(CNA_FW_FILE_CT);
1bf9fd70 3510MODULE_FIRMWARE(CNA_FW_FILE_CT2);