]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/brocade/bna/bnad.c
bna: Remove tx tasklet
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / brocade / bna / bnad.c
CommitLineData
8b230ed8
RM
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
f859d7cb 18#include <linux/bitops.h>
8b230ed8
RM
19#include <linux/netdevice.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/in.h>
23#include <linux/ethtool.h>
24#include <linux/if_vlan.h>
25#include <linux/if_ether.h>
26#include <linux/ip.h>
70c71606 27#include <linux/prefetch.h>
9d9779e7 28#include <linux/module.h>
8b230ed8
RM
29
30#include "bnad.h"
31#include "bna.h"
32#include "cna.h"
33
b7ee31c5 34static DEFINE_MUTEX(bnad_fwimg_mutex);
8b230ed8
RM
35
36/*
37 * Module params
38 */
39static uint bnad_msix_disable;
40module_param(bnad_msix_disable, uint, 0444);
41MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
42
43static uint bnad_ioc_auto_recover = 1;
44module_param(bnad_ioc_auto_recover, uint, 0444);
45MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
46
7afc5dbd
KG
47static uint bna_debugfs_enable = 1;
48module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50 " Range[false:0|true:1]");
51
8b230ed8
RM
52/*
53 * Global variables
54 */
55u32 bnad_rxqs_per_cq = 2;
e1e0918f 56static u32 bna_id;
57static struct mutex bnad_list_mutex;
58static LIST_HEAD(bnad_list);
b7ee31c5 59static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8b230ed8
RM
60
61/*
62 * Local MACROS
63 */
64#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
65
66#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
67
68#define BNAD_GET_MBOX_IRQ(_bnad) \
69 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
8811e267 70 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
8b230ed8
RM
71 ((_bnad)->pcidev->irq))
72
73#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
74do { \
75 (_res_info)->res_type = BNA_RES_T_MEM; \
76 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
77 (_res_info)->res_u.mem_info.num = (_num); \
78 (_res_info)->res_u.mem_info.len = \
79 sizeof(struct bnad_unmap_q) + \
80 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
81} while (0)
82
72a9730b
KG
83static void
84bnad_add_to_list(struct bnad *bnad)
85{
86 mutex_lock(&bnad_list_mutex);
87 list_add_tail(&bnad->list_entry, &bnad_list);
88 bnad->id = bna_id++;
89 mutex_unlock(&bnad_list_mutex);
90}
91
92static void
93bnad_remove_from_list(struct bnad *bnad)
94{
95 mutex_lock(&bnad_list_mutex);
96 list_del(&bnad->list_entry);
97 mutex_unlock(&bnad_list_mutex);
98}
99
8b230ed8
RM
100/*
101 * Reinitialize completions in CQ, once Rx is taken down
102 */
103static void
104bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
105{
106 struct bna_cq_entry *cmpl, *next_cmpl;
107 unsigned int wi_range, wis = 0, ccb_prod = 0;
108 int i;
109
110 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
111 wi_range);
112
113 for (i = 0; i < ccb->q_depth; i++) {
114 wis++;
115 if (likely(--wi_range))
116 next_cmpl = cmpl + 1;
117 else {
118 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
119 wis = 0;
120 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
121 next_cmpl, wi_range);
122 }
123 cmpl->valid = 0;
124 cmpl = next_cmpl;
125 }
126}
127
271e8b79
RM
128static u32
129bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
130 u32 index, u32 depth, struct sk_buff *skb, u32 frag)
131{
132 int j;
133 array[index].skb = NULL;
134
135 dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
136 skb_headlen(skb), DMA_TO_DEVICE);
137 dma_unmap_addr_set(&array[index], dma_addr, 0);
138 BNA_QE_INDX_ADD(index, 1, depth);
139
140 for (j = 0; j < frag; j++) {
141 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
01b54b14
JH
142 skb_frag_size(&skb_shinfo(skb)->frags[j]),
143 DMA_TO_DEVICE);
271e8b79
RM
144 dma_unmap_addr_set(&array[index], dma_addr, 0);
145 BNA_QE_INDX_ADD(index, 1, depth);
146 }
147
148 return index;
149}
150
8b230ed8
RM
151/*
152 * Frees all pending Tx Bufs
153 * At this point no activity is expected on the Q,
154 * so DMA unmap & freeing is fine.
155 */
156static void
157bnad_free_all_txbufs(struct bnad *bnad,
158 struct bna_tcb *tcb)
159{
0120b99c 160 u32 unmap_cons;
8b230ed8
RM
161 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
162 struct bnad_skb_unmap *unmap_array;
0120b99c 163 struct sk_buff *skb = NULL;
938fa488 164 int q;
8b230ed8
RM
165
166 unmap_array = unmap_q->unmap_array;
167
938fa488
RM
168 for (q = 0; q < unmap_q->q_depth; q++) {
169 skb = unmap_array[q].skb;
170 if (!skb)
8b230ed8 171 continue;
938fa488
RM
172
173 unmap_cons = q;
174 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
175 unmap_cons, unmap_q->q_depth, skb,
176 skb_shinfo(skb)->nr_frags);
177
8b230ed8
RM
178 dev_kfree_skb_any(skb);
179 }
180}
181
182/* Data Path Handlers */
183
184/*
185 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
186 * Can be called in a) Interrupt context
187 * b) Sending context
8b230ed8
RM
188 */
189static u32
190bnad_free_txbufs(struct bnad *bnad,
191 struct bna_tcb *tcb)
192{
271e8b79
RM
193 u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
194 u16 wis, updated_hw_cons;
8b230ed8
RM
195 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
196 struct bnad_skb_unmap *unmap_array;
0120b99c 197 struct sk_buff *skb;
8b230ed8 198
d95d1081 199 /* Just return if TX is stopped */
be7fa326 200 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
8b230ed8
RM
201 return 0;
202
203 updated_hw_cons = *(tcb->hw_consumer_index);
204
205 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
206 updated_hw_cons, tcb->q_depth);
207
208 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
209
210 unmap_array = unmap_q->unmap_array;
211 unmap_cons = unmap_q->consumer_index;
212
213 prefetch(&unmap_array[unmap_cons + 1]);
214 while (wis) {
215 skb = unmap_array[unmap_cons].skb;
216
8b230ed8
RM
217 sent_packets++;
218 sent_bytes += skb->len;
219 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
220
271e8b79
RM
221 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
222 unmap_cons, unmap_q->q_depth, skb,
223 skb_shinfo(skb)->nr_frags);
8b230ed8 224
8b230ed8
RM
225 dev_kfree_skb_any(skb);
226 }
227
228 /* Update consumer pointers. */
229 tcb->consumer_index = updated_hw_cons;
230 unmap_q->consumer_index = unmap_cons;
231
232 tcb->txq->tx_packets += sent_packets;
233 tcb->txq->tx_bytes += sent_bytes;
234
235 return sent_packets;
236}
237
8b230ed8
RM
238static u32
239bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
240{
241 struct net_device *netdev = bnad->netdev;
be7fa326 242 u32 sent = 0;
8b230ed8
RM
243
244 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
245 return 0;
246
247 sent = bnad_free_txbufs(bnad, tcb);
248 if (sent) {
249 if (netif_queue_stopped(netdev) &&
250 netif_carrier_ok(netdev) &&
251 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
252 BNAD_NETIF_WAKE_THRESHOLD) {
be7fa326
RM
253 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
254 netif_wake_queue(netdev);
255 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
256 }
8b230ed8 257 }
be7fa326
RM
258 }
259
260 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
8b230ed8 261 bna_ib_ack(tcb->i_dbell, sent);
8b230ed8
RM
262
263 smp_mb__before_clear_bit();
264 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
265
266 return sent;
267}
268
269/* MSIX Tx Completion Handler */
270static irqreturn_t
271bnad_msix_tx(int irq, void *data)
272{
273 struct bna_tcb *tcb = (struct bna_tcb *)data;
274 struct bnad *bnad = tcb->bnad;
275
276 bnad_tx(bnad, tcb);
277
278 return IRQ_HANDLED;
279}
280
281static void
282bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
283{
284 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
285
286 rcb->producer_index = 0;
287 rcb->consumer_index = 0;
288
289 unmap_q->producer_index = 0;
290 unmap_q->consumer_index = 0;
291}
292
293static void
be7fa326 294bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
8b230ed8
RM
295{
296 struct bnad_unmap_q *unmap_q;
5ea74318 297 struct bnad_skb_unmap *unmap_array;
8b230ed8 298 struct sk_buff *skb;
be7fa326 299 int unmap_cons;
8b230ed8
RM
300
301 unmap_q = rcb->unmap_q;
5ea74318 302 unmap_array = unmap_q->unmap_array;
be7fa326 303 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
5ea74318 304 skb = unmap_array[unmap_cons].skb;
be7fa326
RM
305 if (!skb)
306 continue;
5ea74318
IV
307 unmap_array[unmap_cons].skb = NULL;
308 dma_unmap_single(&bnad->pcidev->dev,
309 dma_unmap_addr(&unmap_array[unmap_cons],
310 dma_addr),
311 rcb->rxq->buffer_size,
312 DMA_FROM_DEVICE);
8b230ed8 313 dev_kfree_skb(skb);
8b230ed8 314 }
8b230ed8
RM
315 bnad_reset_rcb(bnad, rcb);
316}
317
318static void
319bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
320{
321 u16 to_alloc, alloced, unmap_prod, wi_range;
322 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
323 struct bnad_skb_unmap *unmap_array;
324 struct bna_rxq_entry *rxent;
325 struct sk_buff *skb;
326 dma_addr_t dma_addr;
327
328 alloced = 0;
329 to_alloc =
330 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
331
332 unmap_array = unmap_q->unmap_array;
333 unmap_prod = unmap_q->producer_index;
334
335 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
336
337 while (to_alloc--) {
19dbff9f 338 if (!wi_range)
8b230ed8
RM
339 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
340 wi_range);
0a0e2344
ED
341 skb = netdev_alloc_skb_ip_align(bnad->netdev,
342 rcb->rxq->buffer_size);
8b230ed8
RM
343 if (unlikely(!skb)) {
344 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
3caa1e95 345 rcb->rxq->rxbuf_alloc_failed++;
8b230ed8
RM
346 goto finishing;
347 }
8b230ed8 348 unmap_array[unmap_prod].skb = skb;
5ea74318
IV
349 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
350 rcb->rxq->buffer_size,
351 DMA_FROM_DEVICE);
352 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
8b230ed8
RM
353 dma_addr);
354 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
355 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
356
357 rxent++;
358 wi_range--;
359 alloced++;
360 }
361
362finishing:
363 if (likely(alloced)) {
364 unmap_q->producer_index = unmap_prod;
365 rcb->producer_index = unmap_prod;
366 smp_mb();
5bcf6ac0 367 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
be7fa326 368 bna_rxq_prod_indx_doorbell(rcb);
8b230ed8 369 }
8b230ed8
RM
370}
371
372static inline void
373bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
374{
375 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
376
377 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
378 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
379 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
380 bnad_alloc_n_post_rxbufs(bnad, rcb);
381 smp_mb__before_clear_bit();
382 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
383 }
384}
385
386static u32
387bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
388{
389 struct bna_cq_entry *cmpl, *next_cmpl;
390 struct bna_rcb *rcb = NULL;
391 unsigned int wi_range, packets = 0, wis = 0;
392 struct bnad_unmap_q *unmap_q;
5ea74318 393 struct bnad_skb_unmap *unmap_array;
8b230ed8 394 struct sk_buff *skb;
5ea74318 395 u32 flags, unmap_cons;
8b230ed8 396 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
078086f3
RM
397 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
398
01b54b14 399 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
be7fa326
RM
400 return 0;
401
8b230ed8
RM
402 prefetch(bnad->netdev);
403 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
404 wi_range);
405 BUG_ON(!(wi_range <= ccb->q_depth));
406 while (cmpl->valid && packets < budget) {
407 packets++;
408 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
409
078086f3 410 if (bna_is_small_rxq(cmpl->rxq_id))
8b230ed8 411 rcb = ccb->rcb[1];
078086f3
RM
412 else
413 rcb = ccb->rcb[0];
8b230ed8
RM
414
415 unmap_q = rcb->unmap_q;
5ea74318
IV
416 unmap_array = unmap_q->unmap_array;
417 unmap_cons = unmap_q->consumer_index;
8b230ed8 418
5ea74318 419 skb = unmap_array[unmap_cons].skb;
8b230ed8 420 BUG_ON(!(skb));
5ea74318
IV
421 unmap_array[unmap_cons].skb = NULL;
422 dma_unmap_single(&bnad->pcidev->dev,
423 dma_unmap_addr(&unmap_array[unmap_cons],
8b230ed8 424 dma_addr),
5ea74318
IV
425 rcb->rxq->buffer_size,
426 DMA_FROM_DEVICE);
8b230ed8
RM
427 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
428
429 /* Should be more efficient ? Performance ? */
430 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
431
432 wis++;
433 if (likely(--wi_range))
434 next_cmpl = cmpl + 1;
435 else {
436 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
437 wis = 0;
438 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
439 next_cmpl, wi_range);
440 BUG_ON(!(wi_range <= ccb->q_depth));
441 }
442 prefetch(next_cmpl);
443
444 flags = ntohl(cmpl->flags);
445 if (unlikely
446 (flags &
447 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
448 BNA_CQ_EF_TOO_LONG))) {
449 dev_kfree_skb_any(skb);
450 rcb->rxq->rx_packets_with_error++;
451 goto next;
452 }
453
454 skb_put(skb, ntohs(cmpl->length));
455 if (likely
e5ee20e7 456 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
8b230ed8
RM
457 (((flags & BNA_CQ_EF_IPV4) &&
458 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
459 (flags & BNA_CQ_EF_IPV6)) &&
460 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
461 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
462 skb->ip_summed = CHECKSUM_UNNECESSARY;
463 else
bc8acf2c 464 skb_checksum_none_assert(skb);
8b230ed8
RM
465
466 rcb->rxq->rx_packets++;
467 rcb->rxq->rx_bytes += skb->len;
468 skb->protocol = eth_type_trans(skb, bnad->netdev);
469
f859d7cb
JP
470 if (flags & BNA_CQ_EF_VLAN)
471 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
472
078086f3 473 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
f859d7cb 474 napi_gro_receive(&rx_ctrl->napi, skb);
01b54b14 475 else
f859d7cb 476 netif_receive_skb(skb);
8b230ed8
RM
477
478next:
479 cmpl->valid = 0;
480 cmpl = next_cmpl;
481 }
482
483 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
484
2be67144 485 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
271e8b79
RM
486 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
487
2be67144
RM
488 bnad_refill_rxq(bnad, ccb->rcb[0]);
489 if (ccb->rcb[1])
490 bnad_refill_rxq(bnad, ccb->rcb[1]);
8b230ed8 491
078086f3
RM
492 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
493
8b230ed8
RM
494 return packets;
495}
496
8b230ed8
RM
497static void
498bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
499{
500 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
be7fa326
RM
501 struct napi_struct *napi = &rx_ctrl->napi;
502
503 if (likely(napi_schedule_prep(napi))) {
be7fa326 504 __napi_schedule(napi);
271e8b79 505 rx_ctrl->rx_schedule++;
8b230ed8 506 }
8b230ed8
RM
507}
508
509/* MSIX Rx Path Handler */
510static irqreturn_t
511bnad_msix_rx(int irq, void *data)
512{
513 struct bna_ccb *ccb = (struct bna_ccb *)data;
8b230ed8 514
271e8b79
RM
515 if (ccb) {
516 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
2be67144 517 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
271e8b79 518 }
8b230ed8
RM
519
520 return IRQ_HANDLED;
521}
522
523/* Interrupt handlers */
524
525/* Mbox Interrupt Handlers */
526static irqreturn_t
527bnad_msix_mbox_handler(int irq, void *data)
528{
529 u32 intr_status;
e2fa6f2e 530 unsigned long flags;
be7fa326 531 struct bnad *bnad = (struct bnad *)data;
8b230ed8 532
8b230ed8 533 spin_lock_irqsave(&bnad->bna_lock, flags);
dfee325a
RM
534 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
535 spin_unlock_irqrestore(&bnad->bna_lock, flags);
536 return IRQ_HANDLED;
537 }
8b230ed8
RM
538
539 bna_intr_status_get(&bnad->bna, intr_status);
540
078086f3 541 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
8b230ed8
RM
542 bna_mbox_handler(&bnad->bna, intr_status);
543
544 spin_unlock_irqrestore(&bnad->bna_lock, flags);
545
8b230ed8
RM
546 return IRQ_HANDLED;
547}
548
549static irqreturn_t
550bnad_isr(int irq, void *data)
551{
552 int i, j;
553 u32 intr_status;
554 unsigned long flags;
be7fa326 555 struct bnad *bnad = (struct bnad *)data;
8b230ed8
RM
556 struct bnad_rx_info *rx_info;
557 struct bnad_rx_ctrl *rx_ctrl;
078086f3 558 struct bna_tcb *tcb = NULL;
8b230ed8 559
dfee325a
RM
560 spin_lock_irqsave(&bnad->bna_lock, flags);
561 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
562 spin_unlock_irqrestore(&bnad->bna_lock, flags);
e2fa6f2e 563 return IRQ_NONE;
dfee325a 564 }
8b230ed8
RM
565
566 bna_intr_status_get(&bnad->bna, intr_status);
e2fa6f2e 567
dfee325a
RM
568 if (unlikely(!intr_status)) {
569 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 570 return IRQ_NONE;
dfee325a 571 }
8b230ed8 572
078086f3 573 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
8b230ed8 574 bna_mbox_handler(&bnad->bna, intr_status);
be7fa326 575
8b230ed8
RM
576 spin_unlock_irqrestore(&bnad->bna_lock, flags);
577
be7fa326
RM
578 if (!BNA_IS_INTX_DATA_INTR(intr_status))
579 return IRQ_HANDLED;
580
8b230ed8 581 /* Process data interrupts */
be7fa326
RM
582 /* Tx processing */
583 for (i = 0; i < bnad->num_tx; i++) {
078086f3
RM
584 for (j = 0; j < bnad->num_txq_per_tx; j++) {
585 tcb = bnad->tx_info[i].tcb[j];
586 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
587 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
588 }
be7fa326
RM
589 }
590 /* Rx processing */
8b230ed8
RM
591 for (i = 0; i < bnad->num_rx; i++) {
592 rx_info = &bnad->rx_info[i];
593 if (!rx_info->rx)
594 continue;
595 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
596 rx_ctrl = &rx_info->rx_ctrl[j];
597 if (rx_ctrl->ccb)
598 bnad_netif_rx_schedule_poll(bnad,
599 rx_ctrl->ccb);
600 }
601 }
8b230ed8
RM
602 return IRQ_HANDLED;
603}
604
605/*
606 * Called in interrupt / callback context
607 * with bna_lock held, so cfg_flags access is OK
608 */
609static void
610bnad_enable_mbox_irq(struct bnad *bnad)
611{
be7fa326 612 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
e2fa6f2e 613
8b230ed8
RM
614 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
615}
616
617/*
618 * Called with bnad->bna_lock held b'cos of
619 * bnad->cfg_flags access.
620 */
b7ee31c5 621static void
8b230ed8
RM
622bnad_disable_mbox_irq(struct bnad *bnad)
623{
be7fa326 624 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
8b230ed8 625
be7fa326
RM
626 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
627}
8b230ed8 628
be7fa326
RM
629static void
630bnad_set_netdev_perm_addr(struct bnad *bnad)
631{
632 struct net_device *netdev = bnad->netdev;
e2fa6f2e 633
be7fa326
RM
634 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
635 if (is_zero_ether_addr(netdev->dev_addr))
636 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
8b230ed8
RM
637}
638
639/* Control Path Handlers */
640
641/* Callbacks */
642void
078086f3 643bnad_cb_mbox_intr_enable(struct bnad *bnad)
8b230ed8
RM
644{
645 bnad_enable_mbox_irq(bnad);
646}
647
648void
078086f3 649bnad_cb_mbox_intr_disable(struct bnad *bnad)
8b230ed8
RM
650{
651 bnad_disable_mbox_irq(bnad);
652}
653
654void
078086f3
RM
655bnad_cb_ioceth_ready(struct bnad *bnad)
656{
657 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
658 complete(&bnad->bnad_completions.ioc_comp);
659}
660
661void
662bnad_cb_ioceth_failed(struct bnad *bnad)
8b230ed8 663{
078086f3 664 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
8b230ed8 665 complete(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
666}
667
668void
078086f3 669bnad_cb_ioceth_disabled(struct bnad *bnad)
8b230ed8 670{
078086f3 671 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
8b230ed8 672 complete(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
673}
674
675static void
078086f3 676bnad_cb_enet_disabled(void *arg)
8b230ed8
RM
677{
678 struct bnad *bnad = (struct bnad *)arg;
679
8b230ed8 680 netif_carrier_off(bnad->netdev);
078086f3 681 complete(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
682}
683
684void
078086f3 685bnad_cb_ethport_link_status(struct bnad *bnad,
8b230ed8
RM
686 enum bna_link_status link_status)
687{
3db1cd5c 688 bool link_up = false;
8b230ed8
RM
689
690 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
691
692 if (link_status == BNA_CEE_UP) {
078086f3
RM
693 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
694 BNAD_UPDATE_CTR(bnad, cee_toggle);
8b230ed8 695 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
078086f3
RM
696 } else {
697 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
698 BNAD_UPDATE_CTR(bnad, cee_toggle);
8b230ed8 699 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
078086f3 700 }
8b230ed8
RM
701
702 if (link_up) {
703 if (!netif_carrier_ok(bnad->netdev)) {
078086f3
RM
704 uint tx_id, tcb_id;
705 printk(KERN_WARNING "bna: %s link up\n",
8b230ed8
RM
706 bnad->netdev->name);
707 netif_carrier_on(bnad->netdev);
708 BNAD_UPDATE_CTR(bnad, link_toggle);
078086f3
RM
709 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
710 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
711 tcb_id++) {
712 struct bna_tcb *tcb =
713 bnad->tx_info[tx_id].tcb[tcb_id];
714 u32 txq_id;
715 if (!tcb)
716 continue;
717
718 txq_id = tcb->id;
719
720 if (test_bit(BNAD_TXQ_TX_STARTED,
721 &tcb->flags)) {
722 /*
723 * Force an immediate
724 * Transmit Schedule */
725 printk(KERN_INFO "bna: %s %d "
726 "TXQ_STARTED\n",
727 bnad->netdev->name,
728 txq_id);
729 netif_wake_subqueue(
730 bnad->netdev,
731 txq_id);
732 BNAD_UPDATE_CTR(bnad,
733 netif_queue_wakeup);
734 } else {
735 netif_stop_subqueue(
736 bnad->netdev,
737 txq_id);
738 BNAD_UPDATE_CTR(bnad,
739 netif_queue_stop);
740 }
741 }
8b230ed8
RM
742 }
743 }
744 } else {
745 if (netif_carrier_ok(bnad->netdev)) {
078086f3 746 printk(KERN_WARNING "bna: %s link down\n",
8b230ed8
RM
747 bnad->netdev->name);
748 netif_carrier_off(bnad->netdev);
749 BNAD_UPDATE_CTR(bnad, link_toggle);
750 }
751 }
752}
753
754static void
078086f3 755bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
8b230ed8
RM
756{
757 struct bnad *bnad = (struct bnad *)arg;
758
759 complete(&bnad->bnad_completions.tx_comp);
760}
761
762static void
763bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
764{
765 struct bnad_tx_info *tx_info =
766 (struct bnad_tx_info *)tcb->txq->tx->priv;
767 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
768
769 tx_info->tcb[tcb->id] = tcb;
770 unmap_q->producer_index = 0;
771 unmap_q->consumer_index = 0;
772 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
773}
774
775static void
776bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
777{
778 struct bnad_tx_info *tx_info =
779 (struct bnad_tx_info *)tcb->txq->tx->priv;
780
781 tx_info->tcb[tcb->id] = NULL;
01b54b14 782 tcb->priv = NULL;
8b230ed8
RM
783}
784
785static void
786bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
787{
788 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
789
790 unmap_q->producer_index = 0;
791 unmap_q->consumer_index = 0;
792 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
793}
794
795static void
796bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
797{
798 struct bnad_rx_info *rx_info =
799 (struct bnad_rx_info *)ccb->cq->rx->priv;
800
801 rx_info->rx_ctrl[ccb->id].ccb = ccb;
802 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
803}
804
805static void
806bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
807{
808 struct bnad_rx_info *rx_info =
809 (struct bnad_rx_info *)ccb->cq->rx->priv;
810
811 rx_info->rx_ctrl[ccb->id].ccb = NULL;
812}
813
814static void
078086f3 815bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
8b230ed8
RM
816{
817 struct bnad_tx_info *tx_info =
078086f3
RM
818 (struct bnad_tx_info *)tx->priv;
819 struct bna_tcb *tcb;
820 u32 txq_id;
821 int i;
8b230ed8 822
078086f3
RM
823 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
824 tcb = tx_info->tcb[i];
825 if (!tcb)
826 continue;
827 txq_id = tcb->id;
828 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
829 netif_stop_subqueue(bnad->netdev, txq_id);
830 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
831 bnad->netdev->name, txq_id);
832 }
8b230ed8
RM
833}
834
835static void
078086f3 836bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
8b230ed8 837{
078086f3
RM
838 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
839 struct bna_tcb *tcb;
078086f3
RM
840 u32 txq_id;
841 int i;
8b230ed8 842
078086f3
RM
843 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
844 tcb = tx_info->tcb[i];
845 if (!tcb)
846 continue;
847 txq_id = tcb->id;
8b230ed8 848
01b54b14 849 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
078086f3 850 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
01b54b14 851 BUG_ON(*(tcb->hw_consumer_index) != 0);
078086f3
RM
852
853 if (netif_carrier_ok(bnad->netdev)) {
854 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
855 bnad->netdev->name, txq_id);
856 netif_wake_subqueue(bnad->netdev, txq_id);
857 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
858 }
859 }
be7fa326
RM
860
861 /*
078086f3 862 * Workaround for first ioceth enable failure & we
be7fa326
RM
863 * get a 0 MAC address. We try to get the MAC address
864 * again here.
865 */
866 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
078086f3 867 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
be7fa326
RM
868 bnad_set_netdev_perm_addr(bnad);
869 }
be7fa326
RM
870}
871
01b54b14
JH
872/*
873 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
874 */
875static void
876bnad_tx_cleanup(struct delayed_work *work)
877{
878 struct bnad_tx_info *tx_info =
879 container_of(work, struct bnad_tx_info, tx_cleanup_work);
880 struct bnad *bnad = NULL;
881 struct bnad_unmap_q *unmap_q;
882 struct bna_tcb *tcb;
883 unsigned long flags;
884 uint32_t i, pending = 0;
885
886 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
887 tcb = tx_info->tcb[i];
888 if (!tcb)
889 continue;
890
891 bnad = tcb->bnad;
892
893 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
894 pending++;
895 continue;
896 }
897
898 bnad_free_all_txbufs(bnad, tcb);
899
900 unmap_q = tcb->unmap_q;
901 unmap_q->producer_index = 0;
902 unmap_q->consumer_index = 0;
903
904 smp_mb__before_clear_bit();
905 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
906 }
907
908 if (pending) {
909 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
910 msecs_to_jiffies(1));
911 return;
912 }
913
914 spin_lock_irqsave(&bnad->bna_lock, flags);
915 bna_tx_cleanup_complete(tx_info->tx);
916 spin_unlock_irqrestore(&bnad->bna_lock, flags);
917}
918
919
be7fa326 920static void
078086f3 921bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
be7fa326 922{
078086f3
RM
923 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
924 struct bna_tcb *tcb;
925 int i;
926
927 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
928 tcb = tx_info->tcb[i];
929 if (!tcb)
930 continue;
931 }
932
01b54b14 933 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
8b230ed8
RM
934}
935
5bcf6ac0
RM
936static void
937bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
938{
939 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
940 struct bna_ccb *ccb;
941 struct bnad_rx_ctrl *rx_ctrl;
942 int i;
943
944 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
945 rx_ctrl = &rx_info->rx_ctrl[i];
946 ccb = rx_ctrl->ccb;
947 if (!ccb)
948 continue;
949
950 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
951
952 if (ccb->rcb[1])
953 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
954 }
955}
956
01b54b14
JH
957/*
958 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
959 */
960static void
961bnad_rx_cleanup(void *work)
962{
963 struct bnad_rx_info *rx_info =
964 container_of(work, struct bnad_rx_info, rx_cleanup_work);
965 struct bnad_rx_ctrl *rx_ctrl;
966 struct bnad *bnad = NULL;
967 unsigned long flags;
968 uint32_t i;
969
970 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
971 rx_ctrl = &rx_info->rx_ctrl[i];
972
973 if (!rx_ctrl->ccb)
974 continue;
975
976 bnad = rx_ctrl->ccb->bnad;
977
978 /*
979 * Wait till the poll handler has exited
980 * and nothing can be scheduled anymore
981 */
982 napi_disable(&rx_ctrl->napi);
983
984 bnad_cq_cmpl_init(bnad, rx_ctrl->ccb);
985 bnad_free_all_rxbufs(bnad, rx_ctrl->ccb->rcb[0]);
986 if (rx_ctrl->ccb->rcb[1])
987 bnad_free_all_rxbufs(bnad, rx_ctrl->ccb->rcb[1]);
988 }
989
990 spin_lock_irqsave(&bnad->bna_lock, flags);
991 bna_rx_cleanup_complete(rx_info->rx);
992 spin_unlock_irqrestore(&bnad->bna_lock, flags);
993}
994
8b230ed8 995static void
078086f3 996bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 997{
078086f3
RM
998 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
999 struct bna_ccb *ccb;
1000 struct bnad_rx_ctrl *rx_ctrl;
1001 int i;
1002
772b5235 1003 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
078086f3
RM
1004 rx_ctrl = &rx_info->rx_ctrl[i];
1005 ccb = rx_ctrl->ccb;
1006 if (!ccb)
1007 continue;
1008
1009 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1010
1011 if (ccb->rcb[1])
1012 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
078086f3 1013 }
be7fa326 1014
01b54b14 1015 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
8b230ed8
RM
1016}
1017
1018static void
078086f3 1019bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1020{
078086f3
RM
1021 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1022 struct bna_ccb *ccb;
1023 struct bna_rcb *rcb;
1024 struct bnad_rx_ctrl *rx_ctrl;
1025 struct bnad_unmap_q *unmap_q;
1026 int i;
1027 int j;
be7fa326 1028
772b5235 1029 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
078086f3
RM
1030 rx_ctrl = &rx_info->rx_ctrl[i];
1031 ccb = rx_ctrl->ccb;
1032 if (!ccb)
1033 continue;
be7fa326 1034
01b54b14 1035 napi_enable(&rx_ctrl->napi);
8b230ed8 1036
078086f3
RM
1037 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1038 rcb = ccb->rcb[j];
1039 if (!rcb)
1040 continue;
078086f3
RM
1041
1042 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
5bcf6ac0 1043 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
078086f3
RM
1044 unmap_q = rcb->unmap_q;
1045
1046 /* Now allocate & post buffers for this RCB */
1047 /* !!Allocation in callback context */
1048 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1049 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1050 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1051 bnad_alloc_n_post_rxbufs(bnad, rcb);
1052 smp_mb__before_clear_bit();
1053 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1054 }
1055 }
8b230ed8
RM
1056 }
1057}
1058
1059static void
078086f3 1060bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
8b230ed8
RM
1061{
1062 struct bnad *bnad = (struct bnad *)arg;
1063
1064 complete(&bnad->bnad_completions.rx_comp);
1065}
1066
1067static void
078086f3 1068bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1069{
078086f3 1070 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
8b230ed8
RM
1071 complete(&bnad->bnad_completions.mcast_comp);
1072}
1073
1074void
1075bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1076 struct bna_stats *stats)
1077{
1078 if (status == BNA_CB_SUCCESS)
1079 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1080
1081 if (!netif_running(bnad->netdev) ||
1082 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1083 return;
1084
1085 mod_timer(&bnad->stats_timer,
1086 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1087}
1088
078086f3
RM
1089static void
1090bnad_cb_enet_mtu_set(struct bnad *bnad)
1091{
1092 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1093 complete(&bnad->bnad_completions.mtu_comp);
1094}
1095
72a9730b
KG
1096void
1097bnad_cb_completion(void *arg, enum bfa_status status)
1098{
1099 struct bnad_iocmd_comp *iocmd_comp =
1100 (struct bnad_iocmd_comp *)arg;
1101
1102 iocmd_comp->comp_status = (u32) status;
1103 complete(&iocmd_comp->comp);
1104}
1105
8b230ed8
RM
1106/* Resource allocation, free functions */
1107
1108static void
1109bnad_mem_free(struct bnad *bnad,
1110 struct bna_mem_info *mem_info)
1111{
1112 int i;
1113 dma_addr_t dma_pa;
1114
1115 if (mem_info->mdl == NULL)
1116 return;
1117
1118 for (i = 0; i < mem_info->num; i++) {
1119 if (mem_info->mdl[i].kva != NULL) {
1120 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1121 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1122 dma_pa);
5ea74318
IV
1123 dma_free_coherent(&bnad->pcidev->dev,
1124 mem_info->mdl[i].len,
1125 mem_info->mdl[i].kva, dma_pa);
8b230ed8
RM
1126 } else
1127 kfree(mem_info->mdl[i].kva);
1128 }
1129 }
1130 kfree(mem_info->mdl);
1131 mem_info->mdl = NULL;
1132}
1133
1134static int
1135bnad_mem_alloc(struct bnad *bnad,
1136 struct bna_mem_info *mem_info)
1137{
1138 int i;
1139 dma_addr_t dma_pa;
1140
1141 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1142 mem_info->mdl = NULL;
1143 return 0;
1144 }
1145
1146 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1147 GFP_KERNEL);
1148 if (mem_info->mdl == NULL)
1149 return -ENOMEM;
1150
1151 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1152 for (i = 0; i < mem_info->num; i++) {
1153 mem_info->mdl[i].len = mem_info->len;
1154 mem_info->mdl[i].kva =
5ea74318
IV
1155 dma_alloc_coherent(&bnad->pcidev->dev,
1156 mem_info->len, &dma_pa,
1157 GFP_KERNEL);
8b230ed8
RM
1158
1159 if (mem_info->mdl[i].kva == NULL)
1160 goto err_return;
1161
1162 BNA_SET_DMA_ADDR(dma_pa,
1163 &(mem_info->mdl[i].dma));
1164 }
1165 } else {
1166 for (i = 0; i < mem_info->num; i++) {
1167 mem_info->mdl[i].len = mem_info->len;
1168 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1169 GFP_KERNEL);
1170 if (mem_info->mdl[i].kva == NULL)
1171 goto err_return;
1172 }
1173 }
1174
1175 return 0;
1176
1177err_return:
1178 bnad_mem_free(bnad, mem_info);
1179 return -ENOMEM;
1180}
1181
1182/* Free IRQ for Mailbox */
1183static void
078086f3 1184bnad_mbox_irq_free(struct bnad *bnad)
8b230ed8
RM
1185{
1186 int irq;
1187 unsigned long flags;
1188
8b230ed8 1189 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 1190 bnad_disable_mbox_irq(bnad);
e2fa6f2e 1191 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1192
1193 irq = BNAD_GET_MBOX_IRQ(bnad);
be7fa326 1194 free_irq(irq, bnad);
8b230ed8
RM
1195}
1196
1197/*
1198 * Allocates IRQ for Mailbox, but keep it disabled
1199 * This will be enabled once we get the mbox enable callback
1200 * from bna
1201 */
1202static int
078086f3 1203bnad_mbox_irq_alloc(struct bnad *bnad)
8b230ed8 1204{
0120b99c
RM
1205 int err = 0;
1206 unsigned long irq_flags, flags;
8b230ed8 1207 u32 irq;
0120b99c 1208 irq_handler_t irq_handler;
8b230ed8 1209
8b230ed8
RM
1210 spin_lock_irqsave(&bnad->bna_lock, flags);
1211 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1212 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
8811e267 1213 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
8279171a 1214 irq_flags = 0;
8b230ed8
RM
1215 } else {
1216 irq_handler = (irq_handler_t)bnad_isr;
1217 irq = bnad->pcidev->irq;
5f77898d 1218 irq_flags = IRQF_SHARED;
8b230ed8 1219 }
8811e267 1220
8b230ed8 1221 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1222 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1223
e2fa6f2e
RM
1224 /*
1225 * Set the Mbox IRQ disable flag, so that the IRQ handler
1226 * called from request_irq() for SHARED IRQs do not execute
1227 */
1228 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1229
be7fa326
RM
1230 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1231
8279171a 1232 err = request_irq(irq, irq_handler, irq_flags,
be7fa326 1233 bnad->mbox_irq_name, bnad);
e2fa6f2e 1234
be7fa326 1235 return err;
8b230ed8
RM
1236}
1237
1238static void
1239bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1240{
1241 kfree(intr_info->idl);
1242 intr_info->idl = NULL;
1243}
1244
1245/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1246static int
1247bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
078086f3 1248 u32 txrx_id, struct bna_intr_info *intr_info)
8b230ed8
RM
1249{
1250 int i, vector_start = 0;
1251 u32 cfg_flags;
1252 unsigned long flags;
1253
1254 spin_lock_irqsave(&bnad->bna_lock, flags);
1255 cfg_flags = bnad->cfg_flags;
1256 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1257
1258 if (cfg_flags & BNAD_CF_MSIX) {
1259 intr_info->intr_type = BNA_INTR_T_MSIX;
1260 intr_info->idl = kcalloc(intr_info->num,
1261 sizeof(struct bna_intr_descr),
1262 GFP_KERNEL);
1263 if (!intr_info->idl)
1264 return -ENOMEM;
1265
1266 switch (src) {
1267 case BNAD_INTR_TX:
8811e267 1268 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
8b230ed8
RM
1269 break;
1270
1271 case BNAD_INTR_RX:
8811e267
RM
1272 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1273 (bnad->num_tx * bnad->num_txq_per_tx) +
8b230ed8
RM
1274 txrx_id;
1275 break;
1276
1277 default:
1278 BUG();
1279 }
1280
1281 for (i = 0; i < intr_info->num; i++)
1282 intr_info->idl[i].vector = vector_start + i;
1283 } else {
1284 intr_info->intr_type = BNA_INTR_T_INTX;
1285 intr_info->num = 1;
1286 intr_info->idl = kcalloc(intr_info->num,
1287 sizeof(struct bna_intr_descr),
1288 GFP_KERNEL);
1289 if (!intr_info->idl)
1290 return -ENOMEM;
1291
1292 switch (src) {
1293 case BNAD_INTR_TX:
8811e267 1294 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
8b230ed8
RM
1295 break;
1296
1297 case BNAD_INTR_RX:
8811e267 1298 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
8b230ed8
RM
1299 break;
1300 }
1301 }
1302 return 0;
1303}
1304
1305/**
1306 * NOTE: Should be called for MSIX only
1307 * Unregisters Tx MSIX vector(s) from the kernel
1308 */
1309static void
1310bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1311 int num_txqs)
1312{
1313 int i;
1314 int vector_num;
1315
1316 for (i = 0; i < num_txqs; i++) {
1317 if (tx_info->tcb[i] == NULL)
1318 continue;
1319
1320 vector_num = tx_info->tcb[i]->intr_vector;
1321 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1322 }
1323}
1324
1325/**
1326 * NOTE: Should be called for MSIX only
1327 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1328 */
1329static int
1330bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
078086f3 1331 u32 tx_id, int num_txqs)
8b230ed8
RM
1332{
1333 int i;
1334 int err;
1335 int vector_num;
1336
1337 for (i = 0; i < num_txqs; i++) {
1338 vector_num = tx_info->tcb[i]->intr_vector;
1339 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1340 tx_id + tx_info->tcb[i]->id);
1341 err = request_irq(bnad->msix_table[vector_num].vector,
1342 (irq_handler_t)bnad_msix_tx, 0,
1343 tx_info->tcb[i]->name,
1344 tx_info->tcb[i]);
1345 if (err)
1346 goto err_return;
1347 }
1348
1349 return 0;
1350
1351err_return:
1352 if (i > 0)
1353 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1354 return -1;
1355}
1356
1357/**
1358 * NOTE: Should be called for MSIX only
1359 * Unregisters Rx MSIX vector(s) from the kernel
1360 */
1361static void
1362bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1363 int num_rxps)
1364{
1365 int i;
1366 int vector_num;
1367
1368 for (i = 0; i < num_rxps; i++) {
1369 if (rx_info->rx_ctrl[i].ccb == NULL)
1370 continue;
1371
1372 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1373 free_irq(bnad->msix_table[vector_num].vector,
1374 rx_info->rx_ctrl[i].ccb);
1375 }
1376}
1377
1378/**
1379 * NOTE: Should be called for MSIX only
1380 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1381 */
1382static int
1383bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
078086f3 1384 u32 rx_id, int num_rxps)
8b230ed8
RM
1385{
1386 int i;
1387 int err;
1388 int vector_num;
1389
1390 for (i = 0; i < num_rxps; i++) {
1391 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1392 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1393 bnad->netdev->name,
1394 rx_id + rx_info->rx_ctrl[i].ccb->id);
1395 err = request_irq(bnad->msix_table[vector_num].vector,
1396 (irq_handler_t)bnad_msix_rx, 0,
1397 rx_info->rx_ctrl[i].ccb->name,
1398 rx_info->rx_ctrl[i].ccb);
1399 if (err)
1400 goto err_return;
1401 }
1402
1403 return 0;
1404
1405err_return:
1406 if (i > 0)
1407 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1408 return -1;
1409}
1410
1411/* Free Tx object Resources */
1412static void
1413bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1414{
1415 int i;
1416
1417 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1418 if (res_info[i].res_type == BNA_RES_T_MEM)
1419 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1420 else if (res_info[i].res_type == BNA_RES_T_INTR)
1421 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1422 }
1423}
1424
1425/* Allocates memory and interrupt resources for Tx object */
1426static int
1427bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
078086f3 1428 u32 tx_id)
8b230ed8
RM
1429{
1430 int i, err = 0;
1431
1432 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1433 if (res_info[i].res_type == BNA_RES_T_MEM)
1434 err = bnad_mem_alloc(bnad,
1435 &res_info[i].res_u.mem_info);
1436 else if (res_info[i].res_type == BNA_RES_T_INTR)
1437 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1438 &res_info[i].res_u.intr_info);
1439 if (err)
1440 goto err_return;
1441 }
1442 return 0;
1443
1444err_return:
1445 bnad_tx_res_free(bnad, res_info);
1446 return err;
1447}
1448
1449/* Free Rx object Resources */
1450static void
1451bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1452{
1453 int i;
1454
1455 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1456 if (res_info[i].res_type == BNA_RES_T_MEM)
1457 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1458 else if (res_info[i].res_type == BNA_RES_T_INTR)
1459 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1460 }
1461}
1462
1463/* Allocates memory and interrupt resources for Rx object */
1464static int
1465bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1466 uint rx_id)
1467{
1468 int i, err = 0;
1469
1470 /* All memory needs to be allocated before setup_ccbs */
1471 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1472 if (res_info[i].res_type == BNA_RES_T_MEM)
1473 err = bnad_mem_alloc(bnad,
1474 &res_info[i].res_u.mem_info);
1475 else if (res_info[i].res_type == BNA_RES_T_INTR)
1476 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1477 &res_info[i].res_u.intr_info);
1478 if (err)
1479 goto err_return;
1480 }
1481 return 0;
1482
1483err_return:
1484 bnad_rx_res_free(bnad, res_info);
1485 return err;
1486}
1487
1488/* Timer callbacks */
1489/* a) IOC timer */
1490static void
1491bnad_ioc_timeout(unsigned long data)
1492{
1493 struct bnad *bnad = (struct bnad *)data;
1494 unsigned long flags;
1495
1496 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1497 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1498 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1499}
1500
1501static void
1502bnad_ioc_hb_check(unsigned long data)
1503{
1504 struct bnad *bnad = (struct bnad *)data;
1505 unsigned long flags;
1506
1507 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1508 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1509 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1510}
1511
1512static void
1d32f769 1513bnad_iocpf_timeout(unsigned long data)
8b230ed8
RM
1514{
1515 struct bnad *bnad = (struct bnad *)data;
1516 unsigned long flags;
1517
1518 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1519 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1d32f769
RM
1520 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1521}
1522
1523static void
1524bnad_iocpf_sem_timeout(unsigned long data)
1525{
1526 struct bnad *bnad = (struct bnad *)data;
1527 unsigned long flags;
1528
1529 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1530 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1531 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1532}
1533
1534/*
1535 * All timer routines use bnad->bna_lock to protect against
1536 * the following race, which may occur in case of no locking:
0120b99c 1537 * Time CPU m CPU n
8b230ed8
RM
1538 * 0 1 = test_bit
1539 * 1 clear_bit
1540 * 2 del_timer_sync
1541 * 3 mod_timer
1542 */
1543
1544/* b) Dynamic Interrupt Moderation Timer */
1545static void
1546bnad_dim_timeout(unsigned long data)
1547{
1548 struct bnad *bnad = (struct bnad *)data;
1549 struct bnad_rx_info *rx_info;
1550 struct bnad_rx_ctrl *rx_ctrl;
1551 int i, j;
1552 unsigned long flags;
1553
1554 if (!netif_carrier_ok(bnad->netdev))
1555 return;
1556
1557 spin_lock_irqsave(&bnad->bna_lock, flags);
1558 for (i = 0; i < bnad->num_rx; i++) {
1559 rx_info = &bnad->rx_info[i];
1560 if (!rx_info->rx)
1561 continue;
1562 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1563 rx_ctrl = &rx_info->rx_ctrl[j];
1564 if (!rx_ctrl->ccb)
1565 continue;
1566 bna_rx_dim_update(rx_ctrl->ccb);
1567 }
1568 }
1569
1570 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1571 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1572 mod_timer(&bnad->dim_timer,
1573 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1574 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1575}
1576
1577/* c) Statistics Timer */
1578static void
1579bnad_stats_timeout(unsigned long data)
1580{
1581 struct bnad *bnad = (struct bnad *)data;
1582 unsigned long flags;
1583
1584 if (!netif_running(bnad->netdev) ||
1585 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1586 return;
1587
1588 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1589 bna_hw_stats_get(&bnad->bna);
8b230ed8
RM
1590 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1591}
1592
1593/*
1594 * Set up timer for DIM
1595 * Called with bnad->bna_lock held
1596 */
1597void
1598bnad_dim_timer_start(struct bnad *bnad)
1599{
1600 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1601 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1602 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1603 (unsigned long)bnad);
1604 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1605 mod_timer(&bnad->dim_timer,
1606 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1607 }
1608}
1609
1610/*
1611 * Set up timer for statistics
1612 * Called with mutex_lock(&bnad->conf_mutex) held
1613 */
1614static void
1615bnad_stats_timer_start(struct bnad *bnad)
1616{
1617 unsigned long flags;
1618
1619 spin_lock_irqsave(&bnad->bna_lock, flags);
1620 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1621 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1622 (unsigned long)bnad);
1623 mod_timer(&bnad->stats_timer,
1624 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1625 }
1626 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1627}
1628
1629/*
1630 * Stops the stats timer
1631 * Called with mutex_lock(&bnad->conf_mutex) held
1632 */
1633static void
1634bnad_stats_timer_stop(struct bnad *bnad)
1635{
1636 int to_del = 0;
1637 unsigned long flags;
1638
1639 spin_lock_irqsave(&bnad->bna_lock, flags);
1640 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1641 to_del = 1;
1642 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1643 if (to_del)
1644 del_timer_sync(&bnad->stats_timer);
1645}
1646
1647/* Utilities */
1648
1649static void
1650bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1651{
1652 int i = 1; /* Index 0 has broadcast address */
1653 struct netdev_hw_addr *mc_addr;
1654
1655 netdev_for_each_mc_addr(mc_addr, netdev) {
1656 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1657 ETH_ALEN);
1658 i++;
1659 }
1660}
1661
1662static int
1663bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1664{
1665 struct bnad_rx_ctrl *rx_ctrl =
1666 container_of(napi, struct bnad_rx_ctrl, napi);
2be67144 1667 struct bnad *bnad = rx_ctrl->bnad;
8b230ed8
RM
1668 int rcvd = 0;
1669
271e8b79 1670 rx_ctrl->rx_poll_ctr++;
8b230ed8
RM
1671
1672 if (!netif_carrier_ok(bnad->netdev))
1673 goto poll_exit;
1674
2be67144 1675 rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
271e8b79 1676 if (rcvd >= budget)
8b230ed8
RM
1677 return rcvd;
1678
1679poll_exit:
19dbff9f 1680 napi_complete(napi);
8b230ed8 1681
271e8b79 1682 rx_ctrl->rx_complete++;
2be67144
RM
1683
1684 if (rx_ctrl->ccb)
271e8b79
RM
1685 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1686
8b230ed8
RM
1687 return rcvd;
1688}
1689
2be67144 1690#define BNAD_NAPI_POLL_QUOTA 64
8b230ed8 1691static void
01b54b14 1692bnad_napi_add(struct bnad *bnad, u32 rx_id)
8b230ed8 1693{
8b230ed8
RM
1694 struct bnad_rx_ctrl *rx_ctrl;
1695 int i;
8b230ed8
RM
1696
1697 /* Initialize & enable NAPI */
1698 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1699 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1700 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
2be67144
RM
1701 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1702 }
1703}
1704
1705static void
01b54b14 1706bnad_napi_delete(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
1707{
1708 int i;
1709
1710 /* First disable and then clean up */
01b54b14 1711 for (i = 0; i < bnad->num_rxp_per_rx; i++)
8b230ed8 1712 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
8b230ed8
RM
1713}
1714
1715/* Should be held with conf_lock held */
1716void
078086f3 1717bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
8b230ed8
RM
1718{
1719 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1720 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1721 unsigned long flags;
1722
1723 if (!tx_info->tx)
1724 return;
1725
1726 init_completion(&bnad->bnad_completions.tx_comp);
1727 spin_lock_irqsave(&bnad->bna_lock, flags);
1728 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1729 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1730 wait_for_completion(&bnad->bnad_completions.tx_comp);
1731
1732 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1733 bnad_tx_msix_unregister(bnad, tx_info,
1734 bnad->num_txq_per_tx);
1735
1736 spin_lock_irqsave(&bnad->bna_lock, flags);
1737 bna_tx_destroy(tx_info->tx);
1738 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1739
1740 tx_info->tx = NULL;
078086f3 1741 tx_info->tx_id = 0;
8b230ed8 1742
8b230ed8
RM
1743 bnad_tx_res_free(bnad, res_info);
1744}
1745
1746/* Should be held with conf_lock held */
1747int
078086f3 1748bnad_setup_tx(struct bnad *bnad, u32 tx_id)
8b230ed8
RM
1749{
1750 int err;
1751 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1752 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1753 struct bna_intr_info *intr_info =
1754 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1755 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
d91d25d5 1756 static const struct bna_tx_event_cbfn tx_cbfn = {
1757 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1758 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1759 .tx_stall_cbfn = bnad_cb_tx_stall,
1760 .tx_resume_cbfn = bnad_cb_tx_resume,
1761 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1762 };
1763
8b230ed8
RM
1764 struct bna_tx *tx;
1765 unsigned long flags;
1766
078086f3
RM
1767 tx_info->tx_id = tx_id;
1768
8b230ed8
RM
1769 /* Initialize the Tx object configuration */
1770 tx_config->num_txq = bnad->num_txq_per_tx;
1771 tx_config->txq_depth = bnad->txq_depth;
1772 tx_config->tx_type = BNA_TX_T_REGULAR;
078086f3 1773 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
8b230ed8 1774
8b230ed8
RM
1775 /* Get BNA's resource requirement for one tx object */
1776 spin_lock_irqsave(&bnad->bna_lock, flags);
1777 bna_tx_res_req(bnad->num_txq_per_tx,
1778 bnad->txq_depth, res_info);
1779 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1780
1781 /* Fill Unmap Q memory requirements */
1782 BNAD_FILL_UNMAPQ_MEM_REQ(
1783 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1784 bnad->num_txq_per_tx,
1785 BNAD_TX_UNMAPQ_DEPTH);
1786
1787 /* Allocate resources */
1788 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1789 if (err)
1790 return err;
1791
1792 /* Ask BNA to create one Tx object, supplying required resources */
1793 spin_lock_irqsave(&bnad->bna_lock, flags);
1794 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1795 tx_info);
1796 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1797 if (!tx)
1798 goto err_return;
1799 tx_info->tx = tx;
1800
01b54b14
JH
1801 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1802 (work_func_t)bnad_tx_cleanup);
1803
8b230ed8
RM
1804 /* Register ISR for the Tx object */
1805 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1806 err = bnad_tx_msix_register(bnad, tx_info,
1807 tx_id, bnad->num_txq_per_tx);
1808 if (err)
1809 goto err_return;
1810 }
1811
1812 spin_lock_irqsave(&bnad->bna_lock, flags);
1813 bna_tx_enable(tx);
1814 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1815
1816 return 0;
1817
1818err_return:
1819 bnad_tx_res_free(bnad, res_info);
1820 return err;
1821}
1822
1823/* Setup the rx config for bna_rx_create */
1824/* bnad decides the configuration */
1825static void
1826bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1827{
1828 rx_config->rx_type = BNA_RX_T_REGULAR;
1829 rx_config->num_paths = bnad->num_rxp_per_rx;
078086f3 1830 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
8b230ed8
RM
1831
1832 if (bnad->num_rxp_per_rx > 1) {
1833 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1834 rx_config->rss_config.hash_type =
078086f3
RM
1835 (BFI_ENET_RSS_IPV6 |
1836 BFI_ENET_RSS_IPV6_TCP |
1837 BFI_ENET_RSS_IPV4 |
1838 BFI_ENET_RSS_IPV4_TCP);
8b230ed8
RM
1839 rx_config->rss_config.hash_mask =
1840 bnad->num_rxp_per_rx - 1;
1841 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1842 sizeof(rx_config->rss_config.toeplitz_hash_key));
1843 } else {
1844 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1845 memset(&rx_config->rss_config, 0,
1846 sizeof(rx_config->rss_config));
1847 }
1848 rx_config->rxp_type = BNA_RXP_SLR;
1849 rx_config->q_depth = bnad->rxq_depth;
1850
1851 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1852
1853 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1854}
1855
2be67144
RM
1856static void
1857bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1858{
1859 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1860 int i;
1861
1862 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1863 rx_info->rx_ctrl[i].bnad = bnad;
1864}
1865
8b230ed8
RM
1866/* Called with mutex_lock(&bnad->conf_mutex) held */
1867void
078086f3 1868bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
1869{
1870 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1871 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1872 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1873 unsigned long flags;
271e8b79 1874 int to_del = 0;
8b230ed8
RM
1875
1876 if (!rx_info->rx)
1877 return;
1878
1879 if (0 == rx_id) {
1880 spin_lock_irqsave(&bnad->bna_lock, flags);
271e8b79
RM
1881 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1882 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
8b230ed8 1883 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
271e8b79
RM
1884 to_del = 1;
1885 }
8b230ed8 1886 spin_unlock_irqrestore(&bnad->bna_lock, flags);
271e8b79 1887 if (to_del)
8b230ed8
RM
1888 del_timer_sync(&bnad->dim_timer);
1889 }
1890
8b230ed8
RM
1891 init_completion(&bnad->bnad_completions.rx_comp);
1892 spin_lock_irqsave(&bnad->bna_lock, flags);
1893 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1894 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1895 wait_for_completion(&bnad->bnad_completions.rx_comp);
1896
1897 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1898 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1899
01b54b14 1900 bnad_napi_delete(bnad, rx_id);
2be67144 1901
8b230ed8
RM
1902 spin_lock_irqsave(&bnad->bna_lock, flags);
1903 bna_rx_destroy(rx_info->rx);
8b230ed8
RM
1904
1905 rx_info->rx = NULL;
3caa1e95 1906 rx_info->rx_id = 0;
b9fa1fbf 1907 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1908
1909 bnad_rx_res_free(bnad, res_info);
1910}
1911
1912/* Called with mutex_lock(&bnad->conf_mutex) held */
1913int
078086f3 1914bnad_setup_rx(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
1915{
1916 int err;
1917 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1918 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1919 struct bna_intr_info *intr_info =
1920 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1921 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
d91d25d5 1922 static const struct bna_rx_event_cbfn rx_cbfn = {
1923 .rcb_setup_cbfn = bnad_cb_rcb_setup,
01b54b14 1924 .rcb_destroy_cbfn = NULL,
d91d25d5 1925 .ccb_setup_cbfn = bnad_cb_ccb_setup,
1926 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
5bcf6ac0 1927 .rx_stall_cbfn = bnad_cb_rx_stall,
d91d25d5 1928 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
1929 .rx_post_cbfn = bnad_cb_rx_post,
1930 };
8b230ed8
RM
1931 struct bna_rx *rx;
1932 unsigned long flags;
1933
078086f3
RM
1934 rx_info->rx_id = rx_id;
1935
8b230ed8
RM
1936 /* Initialize the Rx object configuration */
1937 bnad_init_rx_config(bnad, rx_config);
1938
8b230ed8
RM
1939 /* Get BNA's resource requirement for one Rx object */
1940 spin_lock_irqsave(&bnad->bna_lock, flags);
1941 bna_rx_res_req(rx_config, res_info);
1942 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1943
1944 /* Fill Unmap Q memory requirements */
1945 BNAD_FILL_UNMAPQ_MEM_REQ(
1946 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1947 rx_config->num_paths +
1948 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1949 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1950
1951 /* Allocate resource */
1952 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1953 if (err)
1954 return err;
1955
2be67144
RM
1956 bnad_rx_ctrl_init(bnad, rx_id);
1957
8b230ed8
RM
1958 /* Ask BNA to create one Rx object, supplying required resources */
1959 spin_lock_irqsave(&bnad->bna_lock, flags);
1960 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1961 rx_info);
3caa1e95
RM
1962 if (!rx) {
1963 err = -ENOMEM;
b9fa1fbf 1964 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 1965 goto err_return;
3caa1e95 1966 }
8b230ed8 1967 rx_info->rx = rx;
b9fa1fbf 1968 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 1969
01b54b14
JH
1970 INIT_WORK(&rx_info->rx_cleanup_work,
1971 (work_func_t)(bnad_rx_cleanup));
1972
2be67144
RM
1973 /*
1974 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1975 * so that IRQ handler cannot schedule NAPI at this point.
1976 */
01b54b14 1977 bnad_napi_add(bnad, rx_id);
2be67144 1978
8b230ed8
RM
1979 /* Register ISR for the Rx object */
1980 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1981 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1982 rx_config->num_paths);
1983 if (err)
1984 goto err_return;
1985 }
1986
8b230ed8
RM
1987 spin_lock_irqsave(&bnad->bna_lock, flags);
1988 if (0 == rx_id) {
1989 /* Set up Dynamic Interrupt Moderation Vector */
1990 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1991 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1992
1993 /* Enable VLAN filtering only on the default Rx */
1994 bna_rx_vlanfilter_enable(rx);
1995
1996 /* Start the DIM timer */
1997 bnad_dim_timer_start(bnad);
1998 }
1999
2000 bna_rx_enable(rx);
2001 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2002
2003 return 0;
2004
2005err_return:
2006 bnad_cleanup_rx(bnad, rx_id);
2007 return err;
2008}
2009
2010/* Called with conf_lock & bnad->bna_lock held */
2011void
2012bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2013{
2014 struct bnad_tx_info *tx_info;
2015
2016 tx_info = &bnad->tx_info[0];
2017 if (!tx_info->tx)
2018 return;
2019
2020 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2021}
2022
2023/* Called with conf_lock & bnad->bna_lock held */
2024void
2025bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2026{
2027 struct bnad_rx_info *rx_info;
0120b99c 2028 int i;
8b230ed8
RM
2029
2030 for (i = 0; i < bnad->num_rx; i++) {
2031 rx_info = &bnad->rx_info[i];
2032 if (!rx_info->rx)
2033 continue;
2034 bna_rx_coalescing_timeo_set(rx_info->rx,
2035 bnad->rx_coalescing_timeo);
2036 }
2037}
2038
2039/*
2040 * Called with bnad->bna_lock held
2041 */
a2122d95 2042int
8b230ed8
RM
2043bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2044{
2045 int ret;
2046
2047 if (!is_valid_ether_addr(mac_addr))
2048 return -EADDRNOTAVAIL;
2049
2050 /* If datapath is down, pretend everything went through */
2051 if (!bnad->rx_info[0].rx)
2052 return 0;
2053
2054 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2055 if (ret != BNA_CB_SUCCESS)
2056 return -EADDRNOTAVAIL;
2057
2058 return 0;
2059}
2060
2061/* Should be called with conf_lock held */
a2122d95 2062int
8b230ed8
RM
2063bnad_enable_default_bcast(struct bnad *bnad)
2064{
2065 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2066 int ret;
2067 unsigned long flags;
2068
2069 init_completion(&bnad->bnad_completions.mcast_comp);
2070
2071 spin_lock_irqsave(&bnad->bna_lock, flags);
2072 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2073 bnad_cb_rx_mcast_add);
2074 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2075
2076 if (ret == BNA_CB_SUCCESS)
2077 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2078 else
2079 return -ENODEV;
2080
2081 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2082 return -ENODEV;
2083
2084 return 0;
2085}
2086
19dbff9f 2087/* Called with mutex_lock(&bnad->conf_mutex) held */
a2122d95 2088void
aad75b66
RM
2089bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2090{
f859d7cb 2091 u16 vid;
aad75b66
RM
2092 unsigned long flags;
2093
f859d7cb 2094 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
aad75b66 2095 spin_lock_irqsave(&bnad->bna_lock, flags);
f859d7cb 2096 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
aad75b66
RM
2097 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2098 }
2099}
2100
8b230ed8
RM
2101/* Statistics utilities */
2102void
250e061e 2103bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 2104{
8b230ed8
RM
2105 int i, j;
2106
2107 for (i = 0; i < bnad->num_rx; i++) {
2108 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2109 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
250e061e 2110 stats->rx_packets += bnad->rx_info[i].
8b230ed8 2111 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
250e061e 2112 stats->rx_bytes += bnad->rx_info[i].
8b230ed8
RM
2113 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2114 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2115 bnad->rx_info[i].rx_ctrl[j].ccb->
2116 rcb[1]->rxq) {
250e061e 2117 stats->rx_packets +=
8b230ed8
RM
2118 bnad->rx_info[i].rx_ctrl[j].
2119 ccb->rcb[1]->rxq->rx_packets;
250e061e 2120 stats->rx_bytes +=
8b230ed8
RM
2121 bnad->rx_info[i].rx_ctrl[j].
2122 ccb->rcb[1]->rxq->rx_bytes;
2123 }
2124 }
2125 }
2126 }
2127 for (i = 0; i < bnad->num_tx; i++) {
2128 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2129 if (bnad->tx_info[i].tcb[j]) {
250e061e 2130 stats->tx_packets +=
8b230ed8 2131 bnad->tx_info[i].tcb[j]->txq->tx_packets;
250e061e 2132 stats->tx_bytes +=
8b230ed8
RM
2133 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2134 }
2135 }
2136 }
2137}
2138
2139/*
2140 * Must be called with the bna_lock held.
2141 */
2142void
250e061e 2143bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 2144{
078086f3
RM
2145 struct bfi_enet_stats_mac *mac_stats;
2146 u32 bmap;
8b230ed8
RM
2147 int i;
2148
078086f3 2149 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
250e061e 2150 stats->rx_errors =
8b230ed8
RM
2151 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2152 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2153 mac_stats->rx_undersize;
250e061e 2154 stats->tx_errors = mac_stats->tx_fcs_error +
8b230ed8 2155 mac_stats->tx_undersize;
250e061e
ED
2156 stats->rx_dropped = mac_stats->rx_drop;
2157 stats->tx_dropped = mac_stats->tx_drop;
2158 stats->multicast = mac_stats->rx_multicast;
2159 stats->collisions = mac_stats->tx_total_collision;
8b230ed8 2160
250e061e 2161 stats->rx_length_errors = mac_stats->rx_frame_length_error;
8b230ed8
RM
2162
2163 /* receive ring buffer overflow ?? */
2164
250e061e
ED
2165 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2166 stats->rx_frame_errors = mac_stats->rx_alignment_error;
8b230ed8 2167 /* recv'r fifo overrun */
078086f3
RM
2168 bmap = bna_rx_rid_mask(&bnad->bna);
2169 for (i = 0; bmap; i++) {
8b230ed8 2170 if (bmap & 1) {
250e061e 2171 stats->rx_fifo_errors +=
8b230ed8 2172 bnad->stats.bna_stats->
078086f3 2173 hw_stats.rxf_stats[i].frame_drops;
8b230ed8
RM
2174 break;
2175 }
2176 bmap >>= 1;
2177 }
2178}
2179
2180static void
2181bnad_mbox_irq_sync(struct bnad *bnad)
2182{
2183 u32 irq;
2184 unsigned long flags;
2185
2186 spin_lock_irqsave(&bnad->bna_lock, flags);
2187 if (bnad->cfg_flags & BNAD_CF_MSIX)
8811e267 2188 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
8b230ed8
RM
2189 else
2190 irq = bnad->pcidev->irq;
2191 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2192
2193 synchronize_irq(irq);
2194}
2195
2196/* Utility used by bnad_start_xmit, for doing TSO */
2197static int
2198bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2199{
2200 int err;
2201
8b230ed8
RM
2202 if (skb_header_cloned(skb)) {
2203 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2204 if (err) {
2205 BNAD_UPDATE_CTR(bnad, tso_err);
2206 return err;
2207 }
2208 }
2209
2210 /*
2211 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2212 * excluding the length field.
2213 */
2214 if (skb->protocol == htons(ETH_P_IP)) {
2215 struct iphdr *iph = ip_hdr(skb);
2216
2217 /* Do we really need these? */
2218 iph->tot_len = 0;
2219 iph->check = 0;
2220
2221 tcp_hdr(skb)->check =
2222 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2223 IPPROTO_TCP, 0);
2224 BNAD_UPDATE_CTR(bnad, tso4);
2225 } else {
2226 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2227
8b230ed8
RM
2228 ipv6h->payload_len = 0;
2229 tcp_hdr(skb)->check =
2230 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2231 IPPROTO_TCP, 0);
2232 BNAD_UPDATE_CTR(bnad, tso6);
2233 }
2234
2235 return 0;
2236}
2237
2238/*
2239 * Initialize Q numbers depending on Rx Paths
2240 * Called with bnad->bna_lock held, because of cfg_flags
2241 * access.
2242 */
2243static void
2244bnad_q_num_init(struct bnad *bnad)
2245{
2246 int rxps;
2247
2248 rxps = min((uint)num_online_cpus(),
772b5235 2249 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
8b230ed8
RM
2250
2251 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2252 rxps = 1; /* INTx */
2253
2254 bnad->num_rx = 1;
2255 bnad->num_tx = 1;
2256 bnad->num_rxp_per_rx = rxps;
2257 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2258}
2259
2260/*
2261 * Adjusts the Q numbers, given a number of msix vectors
2262 * Give preference to RSS as opposed to Tx priority Queues,
2263 * in such a case, just use 1 Tx Q
2264 * Called with bnad->bna_lock held b'cos of cfg_flags access
2265 */
2266static void
078086f3 2267bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
8b230ed8
RM
2268{
2269 bnad->num_txq_per_tx = 1;
2270 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2271 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2272 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2273 bnad->num_rxp_per_rx = msix_vectors -
2274 (bnad->num_tx * bnad->num_txq_per_tx) -
2275 BNAD_MAILBOX_MSIX_VECTORS;
2276 } else
2277 bnad->num_rxp_per_rx = 1;
2278}
2279
078086f3
RM
2280/* Enable / disable ioceth */
2281static int
2282bnad_ioceth_disable(struct bnad *bnad)
8b230ed8
RM
2283{
2284 unsigned long flags;
078086f3 2285 int err = 0;
8b230ed8
RM
2286
2287 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2288 init_completion(&bnad->bnad_completions.ioc_comp);
2289 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
8b230ed8
RM
2290 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2291
078086f3
RM
2292 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2293 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2294
2295 err = bnad->bnad_completions.ioc_comp_status;
2296 return err;
8b230ed8
RM
2297}
2298
2299static int
078086f3 2300bnad_ioceth_enable(struct bnad *bnad)
8b230ed8
RM
2301{
2302 int err = 0;
2303 unsigned long flags;
2304
8b230ed8 2305 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2306 init_completion(&bnad->bnad_completions.ioc_comp);
2307 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2308 bna_ioceth_enable(&bnad->bna.ioceth);
8b230ed8
RM
2309 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2310
078086f3
RM
2311 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2312 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
8b230ed8 2313
078086f3 2314 err = bnad->bnad_completions.ioc_comp_status;
8b230ed8
RM
2315
2316 return err;
2317}
2318
2319/* Free BNA resources */
2320static void
078086f3
RM
2321bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2322 u32 res_val_max)
8b230ed8
RM
2323{
2324 int i;
8b230ed8 2325
078086f3
RM
2326 for (i = 0; i < res_val_max; i++)
2327 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
8b230ed8
RM
2328}
2329
2330/* Allocates memory and interrupt resources for BNA */
2331static int
078086f3
RM
2332bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2333 u32 res_val_max)
8b230ed8
RM
2334{
2335 int i, err;
8b230ed8 2336
078086f3
RM
2337 for (i = 0; i < res_val_max; i++) {
2338 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
8b230ed8
RM
2339 if (err)
2340 goto err_return;
2341 }
2342 return 0;
2343
2344err_return:
078086f3 2345 bnad_res_free(bnad, res_info, res_val_max);
8b230ed8
RM
2346 return err;
2347}
2348
2349/* Interrupt enable / disable */
2350static void
2351bnad_enable_msix(struct bnad *bnad)
2352{
2353 int i, ret;
8b230ed8
RM
2354 unsigned long flags;
2355
2356 spin_lock_irqsave(&bnad->bna_lock, flags);
2357 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2358 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2359 return;
2360 }
2361 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2362
2363 if (bnad->msix_table)
2364 return;
2365
8b230ed8 2366 bnad->msix_table =
b7ee31c5 2367 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
8b230ed8
RM
2368
2369 if (!bnad->msix_table)
2370 goto intx_mode;
2371
b7ee31c5 2372 for (i = 0; i < bnad->msix_num; i++)
8b230ed8
RM
2373 bnad->msix_table[i].entry = i;
2374
b7ee31c5 2375 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
8b230ed8
RM
2376 if (ret > 0) {
2377 /* Not enough MSI-X vectors. */
19dbff9f
RM
2378 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2379 ret, bnad->msix_num);
8b230ed8
RM
2380
2381 spin_lock_irqsave(&bnad->bna_lock, flags);
2382 /* ret = #of vectors that we got */
271e8b79
RM
2383 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2384 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
8b230ed8
RM
2385 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2386
271e8b79 2387 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
8b230ed8 2388 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8 2389
078086f3
RM
2390 if (bnad->msix_num > ret)
2391 goto intx_mode;
2392
8b230ed8
RM
2393 /* Try once more with adjusted numbers */
2394 /* If this fails, fall back to INTx */
2395 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
b7ee31c5 2396 bnad->msix_num);
8b230ed8
RM
2397 if (ret)
2398 goto intx_mode;
2399
2400 } else if (ret < 0)
2401 goto intx_mode;
078086f3
RM
2402
2403 pci_intx(bnad->pcidev, 0);
2404
8b230ed8
RM
2405 return;
2406
2407intx_mode:
19dbff9f 2408 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
8b230ed8
RM
2409
2410 kfree(bnad->msix_table);
2411 bnad->msix_table = NULL;
2412 bnad->msix_num = 0;
8b230ed8
RM
2413 spin_lock_irqsave(&bnad->bna_lock, flags);
2414 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2415 bnad_q_num_init(bnad);
2416 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2417}
2418
2419static void
2420bnad_disable_msix(struct bnad *bnad)
2421{
2422 u32 cfg_flags;
2423 unsigned long flags;
2424
2425 spin_lock_irqsave(&bnad->bna_lock, flags);
2426 cfg_flags = bnad->cfg_flags;
2427 if (bnad->cfg_flags & BNAD_CF_MSIX)
2428 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2429 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2430
2431 if (cfg_flags & BNAD_CF_MSIX) {
2432 pci_disable_msix(bnad->pcidev);
2433 kfree(bnad->msix_table);
2434 bnad->msix_table = NULL;
2435 }
2436}
2437
2438/* Netdev entry points */
2439static int
2440bnad_open(struct net_device *netdev)
2441{
2442 int err;
2443 struct bnad *bnad = netdev_priv(netdev);
2444 struct bna_pause_config pause_config;
2445 int mtu;
2446 unsigned long flags;
2447
2448 mutex_lock(&bnad->conf_mutex);
2449
2450 /* Tx */
2451 err = bnad_setup_tx(bnad, 0);
2452 if (err)
2453 goto err_return;
2454
2455 /* Rx */
2456 err = bnad_setup_rx(bnad, 0);
2457 if (err)
2458 goto cleanup_tx;
2459
2460 /* Port */
2461 pause_config.tx_pause = 0;
2462 pause_config.rx_pause = 0;
2463
078086f3 2464 mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
8b230ed8
RM
2465
2466 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2467 bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2468 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2469 bna_enet_enable(&bnad->bna.enet);
8b230ed8
RM
2470 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2471
2472 /* Enable broadcast */
2473 bnad_enable_default_bcast(bnad);
2474
aad75b66
RM
2475 /* Restore VLANs, if any */
2476 bnad_restore_vlans(bnad, 0);
2477
8b230ed8
RM
2478 /* Set the UCAST address */
2479 spin_lock_irqsave(&bnad->bna_lock, flags);
2480 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2481 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2482
2483 /* Start the stats timer */
2484 bnad_stats_timer_start(bnad);
2485
2486 mutex_unlock(&bnad->conf_mutex);
2487
2488 return 0;
2489
2490cleanup_tx:
2491 bnad_cleanup_tx(bnad, 0);
2492
2493err_return:
2494 mutex_unlock(&bnad->conf_mutex);
2495 return err;
2496}
2497
2498static int
2499bnad_stop(struct net_device *netdev)
2500{
2501 struct bnad *bnad = netdev_priv(netdev);
2502 unsigned long flags;
2503
2504 mutex_lock(&bnad->conf_mutex);
2505
2506 /* Stop the stats timer */
2507 bnad_stats_timer_stop(bnad);
2508
078086f3 2509 init_completion(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
2510
2511 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2512 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2513 bnad_cb_enet_disabled);
8b230ed8
RM
2514 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2515
078086f3 2516 wait_for_completion(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
2517
2518 bnad_cleanup_tx(bnad, 0);
2519 bnad_cleanup_rx(bnad, 0);
2520
2521 /* Synchronize mailbox IRQ */
2522 bnad_mbox_irq_sync(bnad);
2523
2524 mutex_unlock(&bnad->conf_mutex);
2525
2526 return 0;
2527}
2528
2529/* TX */
2530/*
2531 * bnad_start_xmit : Netdev entry point for Transmit
2532 * Called under lock held by net_device
2533 */
2534static netdev_tx_t
2535bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2536{
2537 struct bnad *bnad = netdev_priv(netdev);
078086f3
RM
2538 u32 txq_id = 0;
2539 struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
8b230ed8 2540
0120b99c
RM
2541 u16 txq_prod, vlan_tag = 0;
2542 u32 unmap_prod, wis, wis_used, wi_range;
2543 u32 vectors, vect_id, i, acked;
0120b99c 2544 int err;
271e8b79
RM
2545 unsigned int len;
2546 u32 gso_size;
8b230ed8 2547
078086f3 2548 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
0120b99c 2549 dma_addr_t dma_addr;
8b230ed8 2550 struct bna_txq_entry *txqent;
078086f3 2551 u16 flags;
8b230ed8 2552
271e8b79
RM
2553 if (unlikely(skb->len <= ETH_HLEN)) {
2554 dev_kfree_skb(skb);
2555 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2556 return NETDEV_TX_OK;
2557 }
2558 if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
8b230ed8 2559 dev_kfree_skb(skb);
271e8b79
RM
2560 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2561 return NETDEV_TX_OK;
2562 }
2563 if (unlikely(skb_headlen(skb) == 0)) {
2564 dev_kfree_skb(skb);
2565 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
8b230ed8
RM
2566 return NETDEV_TX_OK;
2567 }
2568
2569 /*
2570 * Takes care of the Tx that is scheduled between clearing the flag
19dbff9f 2571 * and the netif_tx_stop_all_queues() call.
8b230ed8 2572 */
be7fa326 2573 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
8b230ed8 2574 dev_kfree_skb(skb);
271e8b79 2575 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
8b230ed8
RM
2576 return NETDEV_TX_OK;
2577 }
2578
8b230ed8 2579 vectors = 1 + skb_shinfo(skb)->nr_frags;
271e8b79 2580 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
8b230ed8 2581 dev_kfree_skb(skb);
271e8b79 2582 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
8b230ed8
RM
2583 return NETDEV_TX_OK;
2584 }
2585 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2586 acked = 0;
078086f3
RM
2587 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2588 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
8b230ed8
RM
2589 if ((u16) (*tcb->hw_consumer_index) !=
2590 tcb->consumer_index &&
2591 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2592 acked = bnad_free_txbufs(bnad, tcb);
be7fa326
RM
2593 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2594 bna_ib_ack(tcb->i_dbell, acked);
8b230ed8
RM
2595 smp_mb__before_clear_bit();
2596 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2597 } else {
2598 netif_stop_queue(netdev);
2599 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2600 }
2601
2602 smp_mb();
2603 /*
2604 * Check again to deal with race condition between
2605 * netif_stop_queue here, and netif_wake_queue in
2606 * interrupt handler which is not inside netif tx lock.
2607 */
2608 if (likely
2609 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2610 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2611 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2612 return NETDEV_TX_BUSY;
2613 } else {
2614 netif_wake_queue(netdev);
2615 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2616 }
2617 }
2618
2619 unmap_prod = unmap_q->producer_index;
8b230ed8
RM
2620 flags = 0;
2621
2622 txq_prod = tcb->producer_index;
2623 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
8b230ed8
RM
2624 txqent->hdr.wi.reserved = 0;
2625 txqent->hdr.wi.num_vectors = vectors;
8b230ed8 2626
eab6d18d 2627 if (vlan_tx_tag_present(skb)) {
8b230ed8
RM
2628 vlan_tag = (u16) vlan_tx_tag_get(skb);
2629 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2630 }
2631 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2632 vlan_tag =
2633 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2634 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2635 }
2636
2637 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2638
2639 if (skb_is_gso(skb)) {
271e8b79
RM
2640 gso_size = skb_shinfo(skb)->gso_size;
2641
2642 if (unlikely(gso_size > netdev->mtu)) {
2643 dev_kfree_skb(skb);
2644 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2645 return NETDEV_TX_OK;
2646 }
2647 if (unlikely((gso_size + skb_transport_offset(skb) +
2648 tcp_hdrlen(skb)) >= skb->len)) {
2649 txqent->hdr.wi.opcode =
2650 __constant_htons(BNA_TXQ_WI_SEND);
2651 txqent->hdr.wi.lso_mss = 0;
2652 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2653 } else {
2654 txqent->hdr.wi.opcode =
2655 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2656 txqent->hdr.wi.lso_mss = htons(gso_size);
2657 }
2658
8b230ed8 2659 err = bnad_tso_prepare(bnad, skb);
271e8b79 2660 if (unlikely(err)) {
8b230ed8 2661 dev_kfree_skb(skb);
271e8b79 2662 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
8b230ed8
RM
2663 return NETDEV_TX_OK;
2664 }
8b230ed8
RM
2665 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2666 txqent->hdr.wi.l4_hdr_size_n_offset =
2667 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2668 (tcp_hdrlen(skb) >> 2,
2669 skb_transport_offset(skb)));
271e8b79
RM
2670 } else {
2671 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
8b230ed8
RM
2672 txqent->hdr.wi.lso_mss = 0;
2673
271e8b79
RM
2674 if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2675 dev_kfree_skb(skb);
2676 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2677 return NETDEV_TX_OK;
8b230ed8 2678 }
8b230ed8 2679
271e8b79
RM
2680 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2681 u8 proto = 0;
8b230ed8 2682
271e8b79
RM
2683 if (skb->protocol == __constant_htons(ETH_P_IP))
2684 proto = ip_hdr(skb)->protocol;
2685 else if (skb->protocol ==
2686 __constant_htons(ETH_P_IPV6)) {
2687 /* nexthdr may not be TCP immediately. */
2688 proto = ipv6_hdr(skb)->nexthdr;
2689 }
2690 if (proto == IPPROTO_TCP) {
2691 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2692 txqent->hdr.wi.l4_hdr_size_n_offset =
2693 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2694 (0, skb_transport_offset(skb)));
2695
2696 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2697
2698 if (unlikely(skb_headlen(skb) <
2699 skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2700 dev_kfree_skb(skb);
2701 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2702 return NETDEV_TX_OK;
2703 }
8b230ed8 2704
271e8b79
RM
2705 } else if (proto == IPPROTO_UDP) {
2706 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2707 txqent->hdr.wi.l4_hdr_size_n_offset =
2708 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2709 (0, skb_transport_offset(skb)));
2710
2711 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2712 if (unlikely(skb_headlen(skb) <
2713 skb_transport_offset(skb) +
2714 sizeof(struct udphdr))) {
2715 dev_kfree_skb(skb);
2716 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2717 return NETDEV_TX_OK;
2718 }
2719 } else {
8b230ed8 2720 dev_kfree_skb(skb);
271e8b79 2721 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
8b230ed8
RM
2722 return NETDEV_TX_OK;
2723 }
271e8b79
RM
2724 } else {
2725 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
8b230ed8 2726 }
8b230ed8
RM
2727 }
2728
2729 txqent->hdr.wi.flags = htons(flags);
2730
2731 txqent->hdr.wi.frame_length = htonl(skb->len);
2732
2733 unmap_q->unmap_array[unmap_prod].skb = skb;
271e8b79
RM
2734 len = skb_headlen(skb);
2735 txqent->vector[0].length = htons(len);
5ea74318
IV
2736 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2737 skb_headlen(skb), DMA_TO_DEVICE);
2738 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
8b230ed8
RM
2739 dma_addr);
2740
271e8b79 2741 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
8b230ed8
RM
2742 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2743
271e8b79
RM
2744 vect_id = 0;
2745 wis_used = 1;
2746
8b230ed8 2747 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08
ED
2748 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2749 u16 size = skb_frag_size(frag);
8b230ed8 2750
271e8b79
RM
2751 if (unlikely(size == 0)) {
2752 unmap_prod = unmap_q->producer_index;
2753
2754 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2755 unmap_q->unmap_array,
2756 unmap_prod, unmap_q->q_depth, skb,
2757 i);
2758 dev_kfree_skb(skb);
2759 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2760 return NETDEV_TX_OK;
2761 }
2762
2763 len += size;
2764
8b230ed8
RM
2765 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2766 vect_id = 0;
2767 if (--wi_range)
2768 txqent++;
2769 else {
2770 BNA_QE_INDX_ADD(txq_prod, wis_used,
2771 tcb->q_depth);
2772 wis_used = 0;
2773 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2774 txqent, wi_range);
8b230ed8
RM
2775 }
2776 wis_used++;
271e8b79
RM
2777 txqent->hdr.wi_ext.opcode =
2778 __constant_htons(BNA_TXQ_WI_EXTENSION);
8b230ed8
RM
2779 }
2780
2781 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2782 txqent->vector[vect_id].length = htons(size);
4d5b1a67
IC
2783 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2784 0, size, DMA_TO_DEVICE);
5ea74318 2785 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
8b230ed8
RM
2786 dma_addr);
2787 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2788 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2789 }
2790
271e8b79
RM
2791 if (unlikely(len != skb->len)) {
2792 unmap_prod = unmap_q->producer_index;
2793
2794 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2795 unmap_q->unmap_array, unmap_prod,
2796 unmap_q->q_depth, skb,
2797 skb_shinfo(skb)->nr_frags);
2798 dev_kfree_skb(skb);
2799 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2800 return NETDEV_TX_OK;
2801 }
2802
8b230ed8
RM
2803 unmap_q->producer_index = unmap_prod;
2804 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2805 tcb->producer_index = txq_prod;
2806
2807 smp_mb();
be7fa326
RM
2808
2809 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2810 return NETDEV_TX_OK;
2811
8b230ed8 2812 bna_txq_prod_indx_doorbell(tcb);
271e8b79 2813 smp_mb();
8b230ed8 2814
8b230ed8
RM
2815 return NETDEV_TX_OK;
2816}
2817
2818/*
2819 * Used spin_lock to synchronize reading of stats structures, which
2820 * is written by BNA under the same lock.
2821 */
250e061e
ED
2822static struct rtnl_link_stats64 *
2823bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
8b230ed8
RM
2824{
2825 struct bnad *bnad = netdev_priv(netdev);
2826 unsigned long flags;
2827
2828 spin_lock_irqsave(&bnad->bna_lock, flags);
2829
250e061e
ED
2830 bnad_netdev_qstats_fill(bnad, stats);
2831 bnad_netdev_hwstats_fill(bnad, stats);
8b230ed8
RM
2832
2833 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2834
250e061e 2835 return stats;
8b230ed8
RM
2836}
2837
a2122d95 2838void
8b230ed8
RM
2839bnad_set_rx_mode(struct net_device *netdev)
2840{
2841 struct bnad *bnad = netdev_priv(netdev);
2842 u32 new_mask, valid_mask;
2843 unsigned long flags;
2844
2845 spin_lock_irqsave(&bnad->bna_lock, flags);
2846
2847 new_mask = valid_mask = 0;
2848
2849 if (netdev->flags & IFF_PROMISC) {
2850 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2851 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2852 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2853 bnad->cfg_flags |= BNAD_CF_PROMISC;
2854 }
2855 } else {
2856 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2857 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2858 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2859 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2860 }
2861 }
2862
2863 if (netdev->flags & IFF_ALLMULTI) {
2864 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2865 new_mask |= BNA_RXMODE_ALLMULTI;
2866 valid_mask |= BNA_RXMODE_ALLMULTI;
2867 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2868 }
2869 } else {
2870 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2871 new_mask &= ~BNA_RXMODE_ALLMULTI;
2872 valid_mask |= BNA_RXMODE_ALLMULTI;
2873 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2874 }
2875 }
2876
271e8b79
RM
2877 if (bnad->rx_info[0].rx == NULL)
2878 goto unlock;
2879
8b230ed8
RM
2880 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2881
2882 if (!netdev_mc_empty(netdev)) {
2883 u8 *mcaddr_list;
2884 int mc_count = netdev_mc_count(netdev);
2885
2886 /* Index 0 holds the broadcast address */
2887 mcaddr_list =
2888 kzalloc((mc_count + 1) * ETH_ALEN,
2889 GFP_ATOMIC);
2890 if (!mcaddr_list)
ca1cef3a 2891 goto unlock;
8b230ed8
RM
2892
2893 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2894
2895 /* Copy rest of the MC addresses */
2896 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2897
2898 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2899 mcaddr_list, NULL);
2900
2901 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2902 kfree(mcaddr_list);
2903 }
ca1cef3a 2904unlock:
8b230ed8
RM
2905 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2906}
2907
2908/*
2909 * bna_lock is used to sync writes to netdev->addr
2910 * conf_lock cannot be used since this call may be made
2911 * in a non-blocking context.
2912 */
2913static int
2914bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2915{
2916 int err;
2917 struct bnad *bnad = netdev_priv(netdev);
2918 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2919 unsigned long flags;
2920
2921 spin_lock_irqsave(&bnad->bna_lock, flags);
2922
2923 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2924
2925 if (!err)
2926 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2927
2928 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2929
2930 return err;
2931}
2932
2933static int
078086f3 2934bnad_mtu_set(struct bnad *bnad, int mtu)
8b230ed8 2935{
8b230ed8
RM
2936 unsigned long flags;
2937
078086f3
RM
2938 init_completion(&bnad->bnad_completions.mtu_comp);
2939
2940 spin_lock_irqsave(&bnad->bna_lock, flags);
2941 bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2942 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2943
2944 wait_for_completion(&bnad->bnad_completions.mtu_comp);
2945
2946 return bnad->bnad_completions.mtu_comp_status;
2947}
2948
2949static int
2950bnad_change_mtu(struct net_device *netdev, int new_mtu)
2951{
2952 int err, mtu = netdev->mtu;
8b230ed8
RM
2953 struct bnad *bnad = netdev_priv(netdev);
2954
2955 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2956 return -EINVAL;
2957
2958 mutex_lock(&bnad->conf_mutex);
2959
2960 netdev->mtu = new_mtu;
2961
078086f3
RM
2962 mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2963 err = bnad_mtu_set(bnad, mtu);
2964 if (err)
2965 err = -EBUSY;
8b230ed8
RM
2966
2967 mutex_unlock(&bnad->conf_mutex);
2968 return err;
2969}
2970
8e586137 2971static int
8b230ed8
RM
2972bnad_vlan_rx_add_vid(struct net_device *netdev,
2973 unsigned short vid)
2974{
2975 struct bnad *bnad = netdev_priv(netdev);
2976 unsigned long flags;
2977
2978 if (!bnad->rx_info[0].rx)
8e586137 2979 return 0;
8b230ed8
RM
2980
2981 mutex_lock(&bnad->conf_mutex);
2982
2983 spin_lock_irqsave(&bnad->bna_lock, flags);
2984 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
f859d7cb 2985 set_bit(vid, bnad->active_vlans);
8b230ed8
RM
2986 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2987
2988 mutex_unlock(&bnad->conf_mutex);
8e586137
JP
2989
2990 return 0;
8b230ed8
RM
2991}
2992
8e586137 2993static int
8b230ed8
RM
2994bnad_vlan_rx_kill_vid(struct net_device *netdev,
2995 unsigned short vid)
2996{
2997 struct bnad *bnad = netdev_priv(netdev);
2998 unsigned long flags;
2999
3000 if (!bnad->rx_info[0].rx)
8e586137 3001 return 0;
8b230ed8
RM
3002
3003 mutex_lock(&bnad->conf_mutex);
3004
3005 spin_lock_irqsave(&bnad->bna_lock, flags);
f859d7cb 3006 clear_bit(vid, bnad->active_vlans);
8b230ed8
RM
3007 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3008 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3009
3010 mutex_unlock(&bnad->conf_mutex);
8e586137
JP
3011
3012 return 0;
8b230ed8
RM
3013}
3014
3015#ifdef CONFIG_NET_POLL_CONTROLLER
3016static void
3017bnad_netpoll(struct net_device *netdev)
3018{
3019 struct bnad *bnad = netdev_priv(netdev);
3020 struct bnad_rx_info *rx_info;
3021 struct bnad_rx_ctrl *rx_ctrl;
3022 u32 curr_mask;
3023 int i, j;
3024
3025 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3026 bna_intx_disable(&bnad->bna, curr_mask);
3027 bnad_isr(bnad->pcidev->irq, netdev);
3028 bna_intx_enable(&bnad->bna, curr_mask);
3029 } else {
19dbff9f
RM
3030 /*
3031 * Tx processing may happen in sending context, so no need
3032 * to explicitly process completions here
3033 */
3034
3035 /* Rx processing */
8b230ed8
RM
3036 for (i = 0; i < bnad->num_rx; i++) {
3037 rx_info = &bnad->rx_info[i];
3038 if (!rx_info->rx)
3039 continue;
3040 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3041 rx_ctrl = &rx_info->rx_ctrl[j];
271e8b79 3042 if (rx_ctrl->ccb)
8b230ed8
RM
3043 bnad_netif_rx_schedule_poll(bnad,
3044 rx_ctrl->ccb);
8b230ed8
RM
3045 }
3046 }
3047 }
3048}
3049#endif
3050
3051static const struct net_device_ops bnad_netdev_ops = {
3052 .ndo_open = bnad_open,
3053 .ndo_stop = bnad_stop,
3054 .ndo_start_xmit = bnad_start_xmit,
250e061e 3055 .ndo_get_stats64 = bnad_get_stats64,
8b230ed8 3056 .ndo_set_rx_mode = bnad_set_rx_mode,
8b230ed8
RM
3057 .ndo_validate_addr = eth_validate_addr,
3058 .ndo_set_mac_address = bnad_set_mac_address,
3059 .ndo_change_mtu = bnad_change_mtu,
8b230ed8
RM
3060 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3061 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3062#ifdef CONFIG_NET_POLL_CONTROLLER
3063 .ndo_poll_controller = bnad_netpoll
3064#endif
3065};
3066
3067static void
3068bnad_netdev_init(struct bnad *bnad, bool using_dac)
3069{
3070 struct net_device *netdev = bnad->netdev;
3071
e5ee20e7
MM
3072 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3073 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3074 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
8b230ed8 3075
e5ee20e7
MM
3076 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3077 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3078 NETIF_F_TSO | NETIF_F_TSO6;
8b230ed8 3079
e5ee20e7
MM
3080 netdev->features |= netdev->hw_features |
3081 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
8b230ed8
RM
3082
3083 if (using_dac)
3084 netdev->features |= NETIF_F_HIGHDMA;
3085
8b230ed8
RM
3086 netdev->mem_start = bnad->mmio_start;
3087 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3088
3089 netdev->netdev_ops = &bnad_netdev_ops;
3090 bnad_set_ethtool_ops(netdev);
3091}
3092
3093/*
3094 * 1. Initialize the bnad structure
3095 * 2. Setup netdev pointer in pci_dev
d95d1081
JH
3096 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3097 * 4. Initialize work queue.
8b230ed8
RM
3098 */
3099static int
3100bnad_init(struct bnad *bnad,
3101 struct pci_dev *pdev, struct net_device *netdev)
3102{
3103 unsigned long flags;
3104
3105 SET_NETDEV_DEV(netdev, &pdev->dev);
3106 pci_set_drvdata(pdev, netdev);
3107
3108 bnad->netdev = netdev;
3109 bnad->pcidev = pdev;
3110 bnad->mmio_start = pci_resource_start(pdev, 0);
3111 bnad->mmio_len = pci_resource_len(pdev, 0);
3112 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3113 if (!bnad->bar0) {
3114 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3115 pci_set_drvdata(pdev, NULL);
3116 return -ENOMEM;
3117 }
3118 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3119 (unsigned long long) bnad->mmio_len);
3120
3121 spin_lock_irqsave(&bnad->bna_lock, flags);
3122 if (!bnad_msix_disable)
3123 bnad->cfg_flags = BNAD_CF_MSIX;
3124
3125 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3126
3127 bnad_q_num_init(bnad);
3128 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3129
3130 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3131 (bnad->num_rx * bnad->num_rxp_per_rx) +
3132 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8
RM
3133
3134 bnad->txq_depth = BNAD_TXQ_DEPTH;
3135 bnad->rxq_depth = BNAD_RXQ_DEPTH;
8b230ed8
RM
3136
3137 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3138 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3139
01b54b14
JH
3140 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3141 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3142
3143 if (!bnad->work_q)
3144 return -ENOMEM;
3145
8b230ed8
RM
3146 return 0;
3147}
3148
3149/*
3150 * Must be called after bnad_pci_uninit()
3151 * so that iounmap() and pci_set_drvdata(NULL)
3152 * happens only after PCI uninitialization.
3153 */
3154static void
3155bnad_uninit(struct bnad *bnad)
3156{
01b54b14
JH
3157 if (bnad->work_q) {
3158 flush_workqueue(bnad->work_q);
3159 destroy_workqueue(bnad->work_q);
3160 bnad->work_q = NULL;
3161 }
3162
8b230ed8
RM
3163 if (bnad->bar0)
3164 iounmap(bnad->bar0);
3165 pci_set_drvdata(bnad->pcidev, NULL);
3166}
3167
3168/*
3169 * Initialize locks
078086f3 3170 a) Per ioceth mutes used for serializing configuration
8b230ed8
RM
3171 changes from OS interface
3172 b) spin lock used to protect bna state machine
3173 */
3174static void
3175bnad_lock_init(struct bnad *bnad)
3176{
3177 spin_lock_init(&bnad->bna_lock);
3178 mutex_init(&bnad->conf_mutex);
72a9730b 3179 mutex_init(&bnad_list_mutex);
8b230ed8
RM
3180}
3181
3182static void
3183bnad_lock_uninit(struct bnad *bnad)
3184{
3185 mutex_destroy(&bnad->conf_mutex);
72a9730b 3186 mutex_destroy(&bnad_list_mutex);
8b230ed8
RM
3187}
3188
3189/* PCI Initialization */
3190static int
3191bnad_pci_init(struct bnad *bnad,
3192 struct pci_dev *pdev, bool *using_dac)
3193{
3194 int err;
3195
3196 err = pci_enable_device(pdev);
3197 if (err)
3198 return err;
3199 err = pci_request_regions(pdev, BNAD_NAME);
3200 if (err)
3201 goto disable_device;
5ea74318
IV
3202 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3203 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3db1cd5c 3204 *using_dac = true;
8b230ed8 3205 } else {
5ea74318 3206 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
8b230ed8 3207 if (err) {
5ea74318
IV
3208 err = dma_set_coherent_mask(&pdev->dev,
3209 DMA_BIT_MASK(32));
8b230ed8
RM
3210 if (err)
3211 goto release_regions;
3212 }
3db1cd5c 3213 *using_dac = false;
8b230ed8
RM
3214 }
3215 pci_set_master(pdev);
3216 return 0;
3217
3218release_regions:
3219 pci_release_regions(pdev);
3220disable_device:
3221 pci_disable_device(pdev);
3222
3223 return err;
3224}
3225
3226static void
3227bnad_pci_uninit(struct pci_dev *pdev)
3228{
3229 pci_release_regions(pdev);
3230 pci_disable_device(pdev);
3231}
3232
3233static int __devinit
3234bnad_pci_probe(struct pci_dev *pdev,
3235 const struct pci_device_id *pcidev_id)
3236{
3caa1e95 3237 bool using_dac;
0120b99c 3238 int err;
8b230ed8
RM
3239 struct bnad *bnad;
3240 struct bna *bna;
3241 struct net_device *netdev;
3242 struct bfa_pcidev pcidev_info;
3243 unsigned long flags;
3244
3245 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3246 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3247
3248 mutex_lock(&bnad_fwimg_mutex);
3249 if (!cna_get_firmware_buf(pdev)) {
3250 mutex_unlock(&bnad_fwimg_mutex);
3251 pr_warn("Failed to load Firmware Image!\n");
3252 return -ENODEV;
3253 }
3254 mutex_unlock(&bnad_fwimg_mutex);
3255
3256 /*
3257 * Allocates sizeof(struct net_device + struct bnad)
3258 * bnad = netdev->priv
3259 */
3260 netdev = alloc_etherdev(sizeof(struct bnad));
3261 if (!netdev) {
8b230ed8
RM
3262 err = -ENOMEM;
3263 return err;
3264 }
3265 bnad = netdev_priv(netdev);
078086f3 3266 bnad_lock_init(bnad);
72a9730b 3267 bnad_add_to_list(bnad);
078086f3
RM
3268
3269 mutex_lock(&bnad->conf_mutex);
8b230ed8
RM
3270 /*
3271 * PCI initialization
0120b99c 3272 * Output : using_dac = 1 for 64 bit DMA
be7fa326 3273 * = 0 for 32 bit DMA
8b230ed8
RM
3274 */
3275 err = bnad_pci_init(bnad, pdev, &using_dac);
3276 if (err)
44861f44 3277 goto unlock_mutex;
8b230ed8 3278
8b230ed8
RM
3279 /*
3280 * Initialize bnad structure
3281 * Setup relation between pci_dev & netdev
8b230ed8
RM
3282 */
3283 err = bnad_init(bnad, pdev, netdev);
3284 if (err)
3285 goto pci_uninit;
078086f3 3286
8b230ed8
RM
3287 /* Initialize netdev structure, set up ethtool ops */
3288 bnad_netdev_init(bnad, using_dac);
3289
815f41e7
RM
3290 /* Set link to down state */
3291 netif_carrier_off(netdev);
3292
7afc5dbd
KG
3293 /* Setup the debugfs node for this bfad */
3294 if (bna_debugfs_enable)
3295 bnad_debugfs_init(bnad);
3296
8b230ed8 3297 /* Get resource requirement form bna */
078086f3 3298 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 3299 bna_res_req(&bnad->res_info[0]);
078086f3 3300 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
3301
3302 /* Allocate resources from bna */
078086f3 3303 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
8b230ed8 3304 if (err)
078086f3 3305 goto drv_uninit;
8b230ed8
RM
3306
3307 bna = &bnad->bna;
3308
3309 /* Setup pcidev_info for bna_init() */
3310 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3311 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3312 pcidev_info.device_id = bnad->pcidev->device;
3313 pcidev_info.pci_bar_kva = bnad->bar0;
3314
8b230ed8
RM
3315 spin_lock_irqsave(&bnad->bna_lock, flags);
3316 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
8b230ed8
RM
3317 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3318
3319 bnad->stats.bna_stats = &bna->stats;
3320
078086f3
RM
3321 bnad_enable_msix(bnad);
3322 err = bnad_mbox_irq_alloc(bnad);
3323 if (err)
3324 goto res_free;
3325
3326
8b230ed8 3327 /* Set up timers */
078086f3 3328 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
8b230ed8 3329 ((unsigned long)bnad));
078086f3 3330 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
8b230ed8 3331 ((unsigned long)bnad));
078086f3 3332 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
1d32f769 3333 ((unsigned long)bnad));
078086f3 3334 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
8b230ed8
RM
3335 ((unsigned long)bnad));
3336
3337 /* Now start the timer before calling IOC */
078086f3 3338 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
8b230ed8
RM
3339 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3340
3341 /*
3342 * Start the chip
078086f3
RM
3343 * If the call back comes with error, we bail out.
3344 * This is a catastrophic error.
8b230ed8 3345 */
078086f3
RM
3346 err = bnad_ioceth_enable(bnad);
3347 if (err) {
3348 pr_err("BNA: Initialization failed err=%d\n",
3349 err);
3350 goto probe_success;
3351 }
3352
3353 spin_lock_irqsave(&bnad->bna_lock, flags);
3354 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3355 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3356 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3357 bna_attr(bna)->num_rxp - 1);
3358 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3359 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3360 err = -EIO;
3361 }
3caa1e95
RM
3362 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3363 if (err)
3364 goto disable_ioceth;
3365
3366 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
3367 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3368 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3369
3370 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
0caa9aae
RM
3371 if (err) {
3372 err = -EIO;
078086f3 3373 goto disable_ioceth;
0caa9aae 3374 }
078086f3
RM
3375
3376 spin_lock_irqsave(&bnad->bna_lock, flags);
3377 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3378 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
3379
3380 /* Get the burnt-in mac */
3381 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 3382 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
8b230ed8
RM
3383 bnad_set_netdev_perm_addr(bnad);
3384 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3385
0caa9aae
RM
3386 mutex_unlock(&bnad->conf_mutex);
3387
8b230ed8
RM
3388 /* Finally, reguister with net_device layer */
3389 err = register_netdev(netdev);
3390 if (err) {
3391 pr_err("BNA : Registering with netdev failed\n");
078086f3 3392 goto probe_uninit;
8b230ed8 3393 }
078086f3 3394 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
8b230ed8 3395
0caa9aae
RM
3396 return 0;
3397
078086f3
RM
3398probe_success:
3399 mutex_unlock(&bnad->conf_mutex);
8b230ed8
RM
3400 return 0;
3401
078086f3 3402probe_uninit:
3fc72370 3403 mutex_lock(&bnad->conf_mutex);
078086f3
RM
3404 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3405disable_ioceth:
3406 bnad_ioceth_disable(bnad);
3407 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3408 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3409 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
8b230ed8
RM
3410 spin_lock_irqsave(&bnad->bna_lock, flags);
3411 bna_uninit(bna);
3412 spin_unlock_irqrestore(&bnad->bna_lock, flags);
078086f3 3413 bnad_mbox_irq_free(bnad);
8b230ed8 3414 bnad_disable_msix(bnad);
078086f3
RM
3415res_free:
3416 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3417drv_uninit:
7afc5dbd
KG
3418 /* Remove the debugfs node for this bnad */
3419 kfree(bnad->regdata);
3420 bnad_debugfs_uninit(bnad);
078086f3 3421 bnad_uninit(bnad);
8b230ed8
RM
3422pci_uninit:
3423 bnad_pci_uninit(pdev);
44861f44 3424unlock_mutex:
078086f3 3425 mutex_unlock(&bnad->conf_mutex);
72a9730b 3426 bnad_remove_from_list(bnad);
8b230ed8 3427 bnad_lock_uninit(bnad);
8b230ed8
RM
3428 free_netdev(netdev);
3429 return err;
3430}
3431
3432static void __devexit
3433bnad_pci_remove(struct pci_dev *pdev)
3434{
3435 struct net_device *netdev = pci_get_drvdata(pdev);
3436 struct bnad *bnad;
3437 struct bna *bna;
3438 unsigned long flags;
3439
3440 if (!netdev)
3441 return;
3442
3443 pr_info("%s bnad_pci_remove\n", netdev->name);
3444 bnad = netdev_priv(netdev);
3445 bna = &bnad->bna;
3446
078086f3
RM
3447 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3448 unregister_netdev(netdev);
8b230ed8
RM
3449
3450 mutex_lock(&bnad->conf_mutex);
078086f3
RM
3451 bnad_ioceth_disable(bnad);
3452 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3453 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3454 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
8b230ed8
RM
3455 spin_lock_irqsave(&bnad->bna_lock, flags);
3456 bna_uninit(bna);
3457 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 3458
078086f3
RM
3459 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3460 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3461 bnad_mbox_irq_free(bnad);
8b230ed8
RM
3462 bnad_disable_msix(bnad);
3463 bnad_pci_uninit(pdev);
078086f3 3464 mutex_unlock(&bnad->conf_mutex);
72a9730b 3465 bnad_remove_from_list(bnad);
8b230ed8 3466 bnad_lock_uninit(bnad);
7afc5dbd
KG
3467 /* Remove the debugfs node for this bnad */
3468 kfree(bnad->regdata);
3469 bnad_debugfs_uninit(bnad);
8b230ed8
RM
3470 bnad_uninit(bnad);
3471 free_netdev(netdev);
3472}
3473
0120b99c 3474static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
8b230ed8
RM
3475 {
3476 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3477 PCI_DEVICE_ID_BROCADE_CT),
3478 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3479 .class_mask = 0xffff00
586b2816
RM
3480 },
3481 {
3482 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3483 BFA_PCI_DEVICE_ID_CT2),
3484 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3485 .class_mask = 0xffff00
3486 },
3487 {0, },
8b230ed8
RM
3488};
3489
3490MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3491
3492static struct pci_driver bnad_pci_driver = {
3493 .name = BNAD_NAME,
3494 .id_table = bnad_pci_id_table,
3495 .probe = bnad_pci_probe,
3496 .remove = __devexit_p(bnad_pci_remove),
3497};
3498
3499static int __init
3500bnad_module_init(void)
3501{
3502 int err;
3503
5aad0011
RM
3504 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3505 BNAD_VERSION);
8b230ed8 3506
8a891429 3507 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
8b230ed8
RM
3508
3509 err = pci_register_driver(&bnad_pci_driver);
3510 if (err < 0) {
3511 pr_err("bna : PCI registration failed in module init "
3512 "(%d)\n", err);
3513 return err;
3514 }
3515
3516 return 0;
3517}
3518
3519static void __exit
3520bnad_module_exit(void)
3521{
3522 pci_unregister_driver(&bnad_pci_driver);
3523
3524 if (bfi_fw)
3525 release_firmware(bfi_fw);
3526}
3527
3528module_init(bnad_module_init);
3529module_exit(bnad_module_exit);
3530
3531MODULE_AUTHOR("Brocade");
3532MODULE_LICENSE("GPL");
3533MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3534MODULE_VERSION(BNAD_VERSION);
3535MODULE_FIRMWARE(CNA_FW_FILE_CT);
1bf9fd70 3536MODULE_FIRMWARE(CNA_FW_FILE_CT2);