]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/brocade/bna/bnad.c
bna: RX Processing and Config Changes
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / brocade / bna / bnad.c
CommitLineData
8b230ed8
RM
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
f859d7cb 18#include <linux/bitops.h>
8b230ed8
RM
19#include <linux/netdevice.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/in.h>
23#include <linux/ethtool.h>
24#include <linux/if_vlan.h>
25#include <linux/if_ether.h>
26#include <linux/ip.h>
70c71606 27#include <linux/prefetch.h>
9d9779e7 28#include <linux/module.h>
8b230ed8
RM
29
30#include "bnad.h"
31#include "bna.h"
32#include "cna.h"
33
b7ee31c5 34static DEFINE_MUTEX(bnad_fwimg_mutex);
8b230ed8
RM
35
36/*
37 * Module params
38 */
39static uint bnad_msix_disable;
40module_param(bnad_msix_disable, uint, 0444);
41MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
42
43static uint bnad_ioc_auto_recover = 1;
44module_param(bnad_ioc_auto_recover, uint, 0444);
45MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
46
7afc5dbd
KG
47static uint bna_debugfs_enable = 1;
48module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50 " Range[false:0|true:1]");
51
8b230ed8
RM
52/*
53 * Global variables
54 */
482da0fa 55static u32 bnad_rxqs_per_cq = 2;
e1e0918f 56static u32 bna_id;
57static struct mutex bnad_list_mutex;
58static LIST_HEAD(bnad_list);
b7ee31c5 59static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8b230ed8
RM
60
61/*
62 * Local MACROS
63 */
8b230ed8
RM
64#define BNAD_GET_MBOX_IRQ(_bnad) \
65 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
8811e267 66 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
8b230ed8
RM
67 ((_bnad)->pcidev->irq))
68
5216562a 69#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
8b230ed8
RM
70do { \
71 (_res_info)->res_type = BNA_RES_T_MEM; \
72 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
73 (_res_info)->res_u.mem_info.num = (_num); \
5216562a 74 (_res_info)->res_u.mem_info.len = (_size); \
8b230ed8
RM
75} while (0)
76
72a9730b
KG
77static void
78bnad_add_to_list(struct bnad *bnad)
79{
80 mutex_lock(&bnad_list_mutex);
81 list_add_tail(&bnad->list_entry, &bnad_list);
82 bnad->id = bna_id++;
83 mutex_unlock(&bnad_list_mutex);
84}
85
86static void
87bnad_remove_from_list(struct bnad *bnad)
88{
89 mutex_lock(&bnad_list_mutex);
90 list_del(&bnad->list_entry);
91 mutex_unlock(&bnad_list_mutex);
92}
93
8b230ed8
RM
94/*
95 * Reinitialize completions in CQ, once Rx is taken down
96 */
97static void
b3cc6e88 98bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
8b230ed8 99{
5216562a 100 struct bna_cq_entry *cmpl;
8b230ed8
RM
101 int i;
102
8b230ed8 103 for (i = 0; i < ccb->q_depth; i++) {
5216562a 104 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
8b230ed8 105 cmpl->valid = 0;
8b230ed8
RM
106 }
107}
108
5216562a
RM
109/* Tx Datapath functions */
110
111
112/* Caller should ensure that the entry at unmap_q[index] is valid */
271e8b79 113static u32
5216562a
RM
114bnad_tx_buff_unmap(struct bnad *bnad,
115 struct bnad_tx_unmap *unmap_q,
116 u32 q_depth, u32 index)
271e8b79 117{
5216562a
RM
118 struct bnad_tx_unmap *unmap;
119 struct sk_buff *skb;
120 int vector, nvecs;
121
122 unmap = &unmap_q[index];
123 nvecs = unmap->nvecs;
124
125 skb = unmap->skb;
126 unmap->skb = NULL;
127 unmap->nvecs = 0;
128 dma_unmap_single(&bnad->pcidev->dev,
129 dma_unmap_addr(&unmap->vectors[0], dma_addr),
130 skb_headlen(skb), DMA_TO_DEVICE);
131 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
132 nvecs--;
133
134 vector = 0;
135 while (nvecs) {
136 vector++;
137 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
138 vector = 0;
139 BNA_QE_INDX_INC(index, q_depth);
140 unmap = &unmap_q[index];
141 }
271e8b79 142
5216562a
RM
143 dma_unmap_page(&bnad->pcidev->dev,
144 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
145 skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
146 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
147 nvecs--;
271e8b79
RM
148 }
149
5216562a
RM
150 BNA_QE_INDX_INC(index, q_depth);
151
271e8b79
RM
152 return index;
153}
154
8b230ed8
RM
155/*
156 * Frees all pending Tx Bufs
157 * At this point no activity is expected on the Q,
158 * so DMA unmap & freeing is fine.
159 */
160static void
5216562a 161bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
8b230ed8 162{
5216562a
RM
163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
164 struct sk_buff *skb;
165 int i;
8b230ed8 166
5216562a
RM
167 for (i = 0; i < tcb->q_depth; i++) {
168 skb = unmap_q[i].skb;
938fa488 169 if (!skb)
8b230ed8 170 continue;
5216562a 171 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
938fa488 172
8b230ed8
RM
173 dev_kfree_skb_any(skb);
174 }
175}
176
8b230ed8 177/*
b3cc6e88 178 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
8b230ed8
RM
179 * Can be called in a) Interrupt context
180 * b) Sending context
8b230ed8
RM
181 */
182static u32
5216562a 183bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
8b230ed8 184{
5216562a
RM
185 u32 sent_packets = 0, sent_bytes = 0;
186 u32 wis, unmap_wis, hw_cons, cons, q_depth;
187 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
188 struct bnad_tx_unmap *unmap;
189 struct sk_buff *skb;
8b230ed8 190
d95d1081 191 /* Just return if TX is stopped */
be7fa326 192 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
8b230ed8
RM
193 return 0;
194
5216562a
RM
195 hw_cons = *(tcb->hw_consumer_index);
196 cons = tcb->consumer_index;
197 q_depth = tcb->q_depth;
8b230ed8 198
5216562a 199 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
8b230ed8
RM
200 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
201
8b230ed8 202 while (wis) {
5216562a
RM
203 unmap = &unmap_q[cons];
204
205 skb = unmap->skb;
8b230ed8 206
8b230ed8
RM
207 sent_packets++;
208 sent_bytes += skb->len;
8b230ed8 209
5216562a
RM
210 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
211 wis -= unmap_wis;
8b230ed8 212
5216562a 213 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
8b230ed8
RM
214 dev_kfree_skb_any(skb);
215 }
216
217 /* Update consumer pointers. */
5216562a 218 tcb->consumer_index = hw_cons;
8b230ed8
RM
219
220 tcb->txq->tx_packets += sent_packets;
221 tcb->txq->tx_bytes += sent_bytes;
222
223 return sent_packets;
224}
225
8b230ed8 226static u32
b3cc6e88 227bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
8b230ed8
RM
228{
229 struct net_device *netdev = bnad->netdev;
be7fa326 230 u32 sent = 0;
8b230ed8
RM
231
232 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
233 return 0;
234
b3cc6e88 235 sent = bnad_txcmpl_process(bnad, tcb);
8b230ed8
RM
236 if (sent) {
237 if (netif_queue_stopped(netdev) &&
238 netif_carrier_ok(netdev) &&
239 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
240 BNAD_NETIF_WAKE_THRESHOLD) {
be7fa326
RM
241 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
242 netif_wake_queue(netdev);
243 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
244 }
8b230ed8 245 }
be7fa326
RM
246 }
247
248 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
8b230ed8 249 bna_ib_ack(tcb->i_dbell, sent);
8b230ed8
RM
250
251 smp_mb__before_clear_bit();
252 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
253
254 return sent;
255}
256
257/* MSIX Tx Completion Handler */
258static irqreturn_t
259bnad_msix_tx(int irq, void *data)
260{
261 struct bna_tcb *tcb = (struct bna_tcb *)data;
262 struct bnad *bnad = tcb->bnad;
263
b3cc6e88 264 bnad_tx_complete(bnad, tcb);
8b230ed8
RM
265
266 return IRQ_HANDLED;
267}
268
30f9fc94
RM
269static inline void
270bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
271{
272 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
273
274 unmap_q->reuse_pi = -1;
275 unmap_q->alloc_order = -1;
276 unmap_q->map_size = 0;
277 unmap_q->type = BNAD_RXBUF_NONE;
278}
279
280/* Default is page-based allocation. Multi-buffer support - TBD */
281static int
282bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
283{
284 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
e29aa339 285 int order;
30f9fc94
RM
286
287 bnad_rxq_alloc_uninit(bnad, rcb);
288
e29aa339
RM
289 order = get_order(rcb->rxq->buffer_size);
290
291 unmap_q->type = BNAD_RXBUF_PAGE;
30f9fc94
RM
292
293 if (bna_is_small_rxq(rcb->id)) {
294 unmap_q->alloc_order = 0;
295 unmap_q->map_size = rcb->rxq->buffer_size;
296 } else {
e29aa339
RM
297 if (rcb->rxq->multi_buffer) {
298 unmap_q->alloc_order = 0;
299 unmap_q->map_size = rcb->rxq->buffer_size;
300 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
301 } else {
302 unmap_q->alloc_order = order;
303 unmap_q->map_size =
304 (rcb->rxq->buffer_size > 2048) ?
305 PAGE_SIZE << order : 2048;
306 }
30f9fc94
RM
307 }
308
309 BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
310
30f9fc94
RM
311 return 0;
312}
313
314static inline void
315bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
316{
317 if (!unmap->page)
318 return;
319
320 dma_unmap_page(&bnad->pcidev->dev,
321 dma_unmap_addr(&unmap->vector, dma_addr),
322 unmap->vector.len, DMA_FROM_DEVICE);
323 put_page(unmap->page);
324 unmap->page = NULL;
325 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
326 unmap->vector.len = 0;
327}
328
329static inline void
330bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
331{
332 if (!unmap->skb)
333 return;
334
335 dma_unmap_single(&bnad->pcidev->dev,
336 dma_unmap_addr(&unmap->vector, dma_addr),
337 unmap->vector.len, DMA_FROM_DEVICE);
338 dev_kfree_skb_any(unmap->skb);
339 unmap->skb = NULL;
340 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
341 unmap->vector.len = 0;
342}
343
8b230ed8 344static void
b3cc6e88 345bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
8b230ed8 346{
30f9fc94 347 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
5216562a
RM
348 int i;
349
350 for (i = 0; i < rcb->q_depth; i++) {
30f9fc94 351 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
8b230ed8 352
e29aa339 353 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
30f9fc94 354 bnad_rxq_cleanup_skb(bnad, unmap);
e29aa339
RM
355 else
356 bnad_rxq_cleanup_page(bnad, unmap);
30f9fc94
RM
357 }
358 bnad_rxq_alloc_uninit(bnad, rcb);
359}
5216562a 360
30f9fc94
RM
361static u32
362bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
363{
364 u32 alloced, prod, q_depth;
365 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
366 struct bnad_rx_unmap *unmap, *prev;
367 struct bna_rxq_entry *rxent;
368 struct page *page;
369 u32 page_offset, alloc_size;
370 dma_addr_t dma_addr;
371
372 prod = rcb->producer_index;
373 q_depth = rcb->q_depth;
374
375 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
376 alloced = 0;
377
378 while (nalloc--) {
379 unmap = &unmap_q->unmap[prod];
380
381 if (unmap_q->reuse_pi < 0) {
382 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
383 unmap_q->alloc_order);
384 page_offset = 0;
385 } else {
386 prev = &unmap_q->unmap[unmap_q->reuse_pi];
387 page = prev->page;
388 page_offset = prev->page_offset + unmap_q->map_size;
389 get_page(page);
390 }
391
392 if (unlikely(!page)) {
393 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
394 rcb->rxq->rxbuf_alloc_failed++;
395 goto finishing;
396 }
397
398 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
399 unmap_q->map_size, DMA_FROM_DEVICE);
400
401 unmap->page = page;
402 unmap->page_offset = page_offset;
403 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
404 unmap->vector.len = unmap_q->map_size;
405 page_offset += unmap_q->map_size;
406
407 if (page_offset < alloc_size)
408 unmap_q->reuse_pi = prod;
409 else
410 unmap_q->reuse_pi = -1;
411
412 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
413 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
414 BNA_QE_INDX_INC(prod, q_depth);
415 alloced++;
416 }
417
418finishing:
419 if (likely(alloced)) {
420 rcb->producer_index = prod;
421 smp_mb();
422 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
423 bna_rxq_prod_indx_doorbell(rcb);
8b230ed8 424 }
30f9fc94
RM
425
426 return alloced;
8b230ed8
RM
427}
428
30f9fc94
RM
429static u32
430bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
8b230ed8 431{
30f9fc94
RM
432 u32 alloced, prod, q_depth, buff_sz;
433 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
5216562a 434 struct bnad_rx_unmap *unmap;
8b230ed8
RM
435 struct bna_rxq_entry *rxent;
436 struct sk_buff *skb;
437 dma_addr_t dma_addr;
438
5216562a 439 buff_sz = rcb->rxq->buffer_size;
5216562a
RM
440 prod = rcb->producer_index;
441 q_depth = rcb->q_depth;
8b230ed8 442
30f9fc94
RM
443 alloced = 0;
444 while (nalloc--) {
445 unmap = &unmap_q->unmap[prod];
446
447 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
448
8b230ed8
RM
449 if (unlikely(!skb)) {
450 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
3caa1e95 451 rcb->rxq->rxbuf_alloc_failed++;
8b230ed8
RM
452 goto finishing;
453 }
5ea74318 454 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
5216562a 455 buff_sz, DMA_FROM_DEVICE);
8b230ed8 456
5216562a
RM
457 unmap->skb = skb;
458 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
459 unmap->vector.len = buff_sz;
30f9fc94
RM
460
461 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
462 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
5216562a 463 BNA_QE_INDX_INC(prod, q_depth);
8b230ed8
RM
464 alloced++;
465 }
466
467finishing:
468 if (likely(alloced)) {
5216562a 469 rcb->producer_index = prod;
8b230ed8 470 smp_mb();
5bcf6ac0 471 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
be7fa326 472 bna_rxq_prod_indx_doorbell(rcb);
8b230ed8 473 }
30f9fc94
RM
474
475 return alloced;
476}
477
478static inline void
479bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
480{
481 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
482 u32 to_alloc;
483
484 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
485 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
486 return;
487
e29aa339 488 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
30f9fc94 489 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
e29aa339
RM
490 else
491 bnad_rxq_refill_page(bnad, rcb, to_alloc);
8b230ed8
RM
492}
493
5e46631f
RM
494#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
495 BNA_CQ_EF_IPV6 | \
496 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
497 BNA_CQ_EF_L4_CKSUM_OK)
498
499#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
500 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
501#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
502 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
503#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
504 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
505#define flags_udp6 (BNA_CQ_EF_IPV6 | \
506 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
507
e29aa339
RM
508static void
509bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
510 u32 sop_ci, u32 nvecs)
30f9fc94 511{
e29aa339
RM
512 struct bnad_rx_unmap_q *unmap_q;
513 struct bnad_rx_unmap *unmap;
514 u32 ci, vec;
30f9fc94 515
e29aa339
RM
516 unmap_q = rcb->unmap_q;
517 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
518 unmap = &unmap_q->unmap[ci];
519 BNA_QE_INDX_INC(ci, rcb->q_depth);
520
521 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
522 bnad_rxq_cleanup_skb(bnad, unmap);
523 else
524 bnad_rxq_cleanup_page(bnad, unmap);
525 }
526}
527
528static void
529bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
530 u32 sop_ci, u32 nvecs, u32 last_fraglen)
531{
532 struct bnad *bnad;
533 u32 ci, vec, len, totlen = 0;
534 struct bnad_rx_unmap_q *unmap_q;
535 struct bnad_rx_unmap *unmap;
536
537 unmap_q = rcb->unmap_q;
538 bnad = rcb->bnad;
66f9513a
RM
539
540 /* prefetch header */
541 prefetch(page_address(unmap_q->unmap[sop_ci].page) +
542 unmap_q->unmap[sop_ci].page_offset);
543
e29aa339
RM
544 for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
545 unmap = &unmap_q->unmap[ci];
546 BNA_QE_INDX_INC(ci, rcb->q_depth);
30f9fc94
RM
547
548 dma_unmap_page(&bnad->pcidev->dev,
549 dma_unmap_addr(&unmap->vector, dma_addr),
550 unmap->vector.len, DMA_FROM_DEVICE);
e29aa339
RM
551
552 len = (vec == nvecs) ?
553 last_fraglen : unmap->vector.len;
554 totlen += len;
555
30f9fc94 556 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
e29aa339 557 unmap->page, unmap->page_offset, len);
30f9fc94
RM
558
559 unmap->page = NULL;
560 unmap->vector.len = 0;
30f9fc94
RM
561 }
562
e29aa339
RM
563 skb->len += totlen;
564 skb->data_len += totlen;
565 skb->truesize += totlen;
566}
567
568static inline void
569bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
570 struct bnad_rx_unmap *unmap, u32 len)
571{
572 prefetch(skb->data);
30f9fc94
RM
573
574 dma_unmap_single(&bnad->pcidev->dev,
575 dma_unmap_addr(&unmap->vector, dma_addr),
576 unmap->vector.len, DMA_FROM_DEVICE);
577
e29aa339 578 skb_put(skb, len);
30f9fc94
RM
579 skb->protocol = eth_type_trans(skb, bnad->netdev);
580
581 unmap->skb = NULL;
582 unmap->vector.len = 0;
30f9fc94
RM
583}
584
8b230ed8 585static u32
b3cc6e88 586bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
8b230ed8 587{
e29aa339 588 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
8b230ed8 589 struct bna_rcb *rcb = NULL;
30f9fc94 590 struct bnad_rx_unmap_q *unmap_q;
e29aa339
RM
591 struct bnad_rx_unmap *unmap = NULL;
592 struct sk_buff *skb = NULL;
8b230ed8 593 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
30f9fc94 594 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
e29aa339
RM
595 u32 packets = 0, len = 0, totlen = 0;
596 u32 pi, vec, sop_ci = 0, nvecs = 0;
597 u32 flags, masked_flags;
078086f3 598
8b230ed8 599 prefetch(bnad->netdev);
5216562a
RM
600
601 cq = ccb->sw_q;
602 cmpl = &cq[ccb->producer_index];
603
604 while (cmpl->valid && (packets < budget)) {
8b230ed8
RM
605 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
606
078086f3 607 if (bna_is_small_rxq(cmpl->rxq_id))
8b230ed8 608 rcb = ccb->rcb[1];
078086f3
RM
609 else
610 rcb = ccb->rcb[0];
8b230ed8
RM
611
612 unmap_q = rcb->unmap_q;
613
e29aa339
RM
614 /* start of packet ci */
615 sop_ci = rcb->consumer_index;
616
617 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
618 unmap = &unmap_q->unmap[sop_ci];
619 skb = unmap->skb;
620 } else {
621 skb = napi_get_frags(&rx_ctrl->napi);
622 if (unlikely(!skb))
623 break;
624 }
625 prefetch(skb);
626
627 flags = ntohl(cmpl->flags);
628 len = ntohs(cmpl->length);
629 totlen = len;
630 nvecs = 1;
631
632 /* Check all the completions for this frame.
633 * busy-wait doesn't help much, break here.
634 */
635 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
636 (flags & BNA_CQ_EF_EOP) == 0) {
637 pi = ccb->producer_index;
638 do {
639 BNA_QE_INDX_INC(pi, ccb->q_depth);
640 next_cmpl = &cq[pi];
641
642 if (!next_cmpl->valid)
643 break;
5216562a 644
e29aa339
RM
645 len = ntohs(next_cmpl->length);
646 flags = ntohl(next_cmpl->flags);
647
648 nvecs++;
649 totlen += len;
650 } while ((flags & BNA_CQ_EF_EOP) == 0);
651
652 if (!next_cmpl->valid)
653 break;
654 }
655
656 /* TODO: BNA_CQ_EF_LOCAL ? */
657 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
658 BNA_CQ_EF_FCS_ERROR |
659 BNA_CQ_EF_TOO_LONG))) {
660 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
8b230ed8 661 rcb->rxq->rx_packets_with_error++;
e29aa339 662
8b230ed8
RM
663 goto next;
664 }
665
e29aa339
RM
666 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
667 bnad_cq_setup_skb(bnad, skb, unmap, len);
668 else
669 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
30f9fc94 670
e29aa339
RM
671 packets++;
672 rcb->rxq->rx_packets++;
673 rcb->rxq->rx_bytes += totlen;
674 ccb->bytes_per_intr += totlen;
5e46631f
RM
675
676 masked_flags = flags & flags_cksum_prot_mask;
677
8b230ed8 678 if (likely
e5ee20e7 679 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
5e46631f
RM
680 ((masked_flags == flags_tcp4) ||
681 (masked_flags == flags_udp4) ||
682 (masked_flags == flags_tcp6) ||
683 (masked_flags == flags_udp6))))
8b230ed8
RM
684 skb->ip_summed = CHECKSUM_UNNECESSARY;
685 else
bc8acf2c 686 skb_checksum_none_assert(skb);
8b230ed8 687
f859d7cb 688 if (flags & BNA_CQ_EF_VLAN)
86a9bad3 689 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
f859d7cb 690
e29aa339 691 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
f859d7cb 692 netif_receive_skb(skb);
e29aa339
RM
693 else
694 napi_gro_frags(&rx_ctrl->napi);
8b230ed8
RM
695
696next:
e29aa339
RM
697 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
698 for (vec = 0; vec < nvecs; vec++) {
699 cmpl = &cq[ccb->producer_index];
700 cmpl->valid = 0;
701 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
702 }
30f9fc94 703 cmpl = &cq[ccb->producer_index];
8b230ed8
RM
704 }
705
30f9fc94 706 napi_gro_flush(&rx_ctrl->napi, false);
2be67144 707 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
271e8b79
RM
708 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
709
5216562a 710 bnad_rxq_post(bnad, ccb->rcb[0]);
2be67144 711 if (ccb->rcb[1])
5216562a 712 bnad_rxq_post(bnad, ccb->rcb[1]);
078086f3 713
8b230ed8
RM
714 return packets;
715}
716
8b230ed8
RM
717static void
718bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
719{
720 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
be7fa326
RM
721 struct napi_struct *napi = &rx_ctrl->napi;
722
723 if (likely(napi_schedule_prep(napi))) {
be7fa326 724 __napi_schedule(napi);
271e8b79 725 rx_ctrl->rx_schedule++;
8b230ed8 726 }
8b230ed8
RM
727}
728
729/* MSIX Rx Path Handler */
730static irqreturn_t
731bnad_msix_rx(int irq, void *data)
732{
733 struct bna_ccb *ccb = (struct bna_ccb *)data;
8b230ed8 734
271e8b79
RM
735 if (ccb) {
736 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
2be67144 737 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
271e8b79 738 }
8b230ed8
RM
739
740 return IRQ_HANDLED;
741}
742
743/* Interrupt handlers */
744
745/* Mbox Interrupt Handlers */
746static irqreturn_t
747bnad_msix_mbox_handler(int irq, void *data)
748{
749 u32 intr_status;
e2fa6f2e 750 unsigned long flags;
be7fa326 751 struct bnad *bnad = (struct bnad *)data;
8b230ed8 752
8b230ed8 753 spin_lock_irqsave(&bnad->bna_lock, flags);
dfee325a
RM
754 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
755 spin_unlock_irqrestore(&bnad->bna_lock, flags);
756 return IRQ_HANDLED;
757 }
8b230ed8
RM
758
759 bna_intr_status_get(&bnad->bna, intr_status);
760
078086f3 761 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
8b230ed8
RM
762 bna_mbox_handler(&bnad->bna, intr_status);
763
764 spin_unlock_irqrestore(&bnad->bna_lock, flags);
765
8b230ed8
RM
766 return IRQ_HANDLED;
767}
768
769static irqreturn_t
770bnad_isr(int irq, void *data)
771{
772 int i, j;
773 u32 intr_status;
774 unsigned long flags;
be7fa326 775 struct bnad *bnad = (struct bnad *)data;
8b230ed8
RM
776 struct bnad_rx_info *rx_info;
777 struct bnad_rx_ctrl *rx_ctrl;
078086f3 778 struct bna_tcb *tcb = NULL;
8b230ed8 779
dfee325a
RM
780 spin_lock_irqsave(&bnad->bna_lock, flags);
781 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
782 spin_unlock_irqrestore(&bnad->bna_lock, flags);
e2fa6f2e 783 return IRQ_NONE;
dfee325a 784 }
8b230ed8
RM
785
786 bna_intr_status_get(&bnad->bna, intr_status);
e2fa6f2e 787
dfee325a
RM
788 if (unlikely(!intr_status)) {
789 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 790 return IRQ_NONE;
dfee325a 791 }
8b230ed8 792
078086f3 793 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
8b230ed8 794 bna_mbox_handler(&bnad->bna, intr_status);
be7fa326 795
8b230ed8
RM
796 spin_unlock_irqrestore(&bnad->bna_lock, flags);
797
be7fa326
RM
798 if (!BNA_IS_INTX_DATA_INTR(intr_status))
799 return IRQ_HANDLED;
800
8b230ed8 801 /* Process data interrupts */
be7fa326
RM
802 /* Tx processing */
803 for (i = 0; i < bnad->num_tx; i++) {
078086f3
RM
804 for (j = 0; j < bnad->num_txq_per_tx; j++) {
805 tcb = bnad->tx_info[i].tcb[j];
806 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
b3cc6e88 807 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
078086f3 808 }
be7fa326
RM
809 }
810 /* Rx processing */
8b230ed8
RM
811 for (i = 0; i < bnad->num_rx; i++) {
812 rx_info = &bnad->rx_info[i];
813 if (!rx_info->rx)
814 continue;
815 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
816 rx_ctrl = &rx_info->rx_ctrl[j];
817 if (rx_ctrl->ccb)
818 bnad_netif_rx_schedule_poll(bnad,
819 rx_ctrl->ccb);
820 }
821 }
8b230ed8
RM
822 return IRQ_HANDLED;
823}
824
825/*
826 * Called in interrupt / callback context
827 * with bna_lock held, so cfg_flags access is OK
828 */
829static void
830bnad_enable_mbox_irq(struct bnad *bnad)
831{
be7fa326 832 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
e2fa6f2e 833
8b230ed8
RM
834 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
835}
836
837/*
838 * Called with bnad->bna_lock held b'cos of
839 * bnad->cfg_flags access.
840 */
b7ee31c5 841static void
8b230ed8
RM
842bnad_disable_mbox_irq(struct bnad *bnad)
843{
be7fa326 844 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
8b230ed8 845
be7fa326
RM
846 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
847}
8b230ed8 848
be7fa326
RM
849static void
850bnad_set_netdev_perm_addr(struct bnad *bnad)
851{
852 struct net_device *netdev = bnad->netdev;
e2fa6f2e 853
be7fa326
RM
854 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
855 if (is_zero_ether_addr(netdev->dev_addr))
856 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
8b230ed8
RM
857}
858
859/* Control Path Handlers */
860
861/* Callbacks */
862void
078086f3 863bnad_cb_mbox_intr_enable(struct bnad *bnad)
8b230ed8
RM
864{
865 bnad_enable_mbox_irq(bnad);
866}
867
868void
078086f3 869bnad_cb_mbox_intr_disable(struct bnad *bnad)
8b230ed8
RM
870{
871 bnad_disable_mbox_irq(bnad);
872}
873
874void
078086f3
RM
875bnad_cb_ioceth_ready(struct bnad *bnad)
876{
877 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
878 complete(&bnad->bnad_completions.ioc_comp);
879}
880
881void
882bnad_cb_ioceth_failed(struct bnad *bnad)
8b230ed8 883{
078086f3 884 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
8b230ed8 885 complete(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
886}
887
888void
078086f3 889bnad_cb_ioceth_disabled(struct bnad *bnad)
8b230ed8 890{
078086f3 891 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
8b230ed8 892 complete(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
893}
894
895static void
078086f3 896bnad_cb_enet_disabled(void *arg)
8b230ed8
RM
897{
898 struct bnad *bnad = (struct bnad *)arg;
899
8b230ed8 900 netif_carrier_off(bnad->netdev);
078086f3 901 complete(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
902}
903
904void
078086f3 905bnad_cb_ethport_link_status(struct bnad *bnad,
8b230ed8
RM
906 enum bna_link_status link_status)
907{
3db1cd5c 908 bool link_up = false;
8b230ed8
RM
909
910 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
911
912 if (link_status == BNA_CEE_UP) {
078086f3
RM
913 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
914 BNAD_UPDATE_CTR(bnad, cee_toggle);
8b230ed8 915 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
078086f3
RM
916 } else {
917 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
918 BNAD_UPDATE_CTR(bnad, cee_toggle);
8b230ed8 919 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
078086f3 920 }
8b230ed8
RM
921
922 if (link_up) {
923 if (!netif_carrier_ok(bnad->netdev)) {
078086f3
RM
924 uint tx_id, tcb_id;
925 printk(KERN_WARNING "bna: %s link up\n",
8b230ed8
RM
926 bnad->netdev->name);
927 netif_carrier_on(bnad->netdev);
928 BNAD_UPDATE_CTR(bnad, link_toggle);
078086f3
RM
929 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
930 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
931 tcb_id++) {
932 struct bna_tcb *tcb =
933 bnad->tx_info[tx_id].tcb[tcb_id];
934 u32 txq_id;
935 if (!tcb)
936 continue;
937
938 txq_id = tcb->id;
939
940 if (test_bit(BNAD_TXQ_TX_STARTED,
941 &tcb->flags)) {
942 /*
943 * Force an immediate
944 * Transmit Schedule */
945 printk(KERN_INFO "bna: %s %d "
946 "TXQ_STARTED\n",
947 bnad->netdev->name,
948 txq_id);
949 netif_wake_subqueue(
950 bnad->netdev,
951 txq_id);
952 BNAD_UPDATE_CTR(bnad,
953 netif_queue_wakeup);
954 } else {
955 netif_stop_subqueue(
956 bnad->netdev,
957 txq_id);
958 BNAD_UPDATE_CTR(bnad,
959 netif_queue_stop);
960 }
961 }
8b230ed8
RM
962 }
963 }
964 } else {
965 if (netif_carrier_ok(bnad->netdev)) {
078086f3 966 printk(KERN_WARNING "bna: %s link down\n",
8b230ed8
RM
967 bnad->netdev->name);
968 netif_carrier_off(bnad->netdev);
969 BNAD_UPDATE_CTR(bnad, link_toggle);
970 }
971 }
972}
973
974static void
078086f3 975bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
8b230ed8
RM
976{
977 struct bnad *bnad = (struct bnad *)arg;
978
979 complete(&bnad->bnad_completions.tx_comp);
980}
981
982static void
983bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
984{
985 struct bnad_tx_info *tx_info =
986 (struct bnad_tx_info *)tcb->txq->tx->priv;
8b230ed8 987
5216562a 988 tcb->priv = tcb;
8b230ed8 989 tx_info->tcb[tcb->id] = tcb;
8b230ed8
RM
990}
991
992static void
993bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
994{
995 struct bnad_tx_info *tx_info =
996 (struct bnad_tx_info *)tcb->txq->tx->priv;
997
998 tx_info->tcb[tcb->id] = NULL;
01b54b14 999 tcb->priv = NULL;
8b230ed8
RM
1000}
1001
8b230ed8
RM
1002static void
1003bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1004{
1005 struct bnad_rx_info *rx_info =
1006 (struct bnad_rx_info *)ccb->cq->rx->priv;
1007
1008 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1009 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1010}
1011
1012static void
1013bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1014{
1015 struct bnad_rx_info *rx_info =
1016 (struct bnad_rx_info *)ccb->cq->rx->priv;
1017
1018 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1019}
1020
1021static void
078086f3 1022bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
8b230ed8
RM
1023{
1024 struct bnad_tx_info *tx_info =
078086f3
RM
1025 (struct bnad_tx_info *)tx->priv;
1026 struct bna_tcb *tcb;
1027 u32 txq_id;
1028 int i;
8b230ed8 1029
078086f3
RM
1030 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1031 tcb = tx_info->tcb[i];
1032 if (!tcb)
1033 continue;
1034 txq_id = tcb->id;
1035 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1036 netif_stop_subqueue(bnad->netdev, txq_id);
1037 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
1038 bnad->netdev->name, txq_id);
1039 }
8b230ed8
RM
1040}
1041
1042static void
078086f3 1043bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
8b230ed8 1044{
078086f3
RM
1045 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1046 struct bna_tcb *tcb;
078086f3
RM
1047 u32 txq_id;
1048 int i;
8b230ed8 1049
078086f3
RM
1050 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1051 tcb = tx_info->tcb[i];
1052 if (!tcb)
1053 continue;
1054 txq_id = tcb->id;
8b230ed8 1055
01b54b14 1056 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
078086f3 1057 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
01b54b14 1058 BUG_ON(*(tcb->hw_consumer_index) != 0);
078086f3
RM
1059
1060 if (netif_carrier_ok(bnad->netdev)) {
1061 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
1062 bnad->netdev->name, txq_id);
1063 netif_wake_subqueue(bnad->netdev, txq_id);
1064 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1065 }
1066 }
be7fa326
RM
1067
1068 /*
078086f3 1069 * Workaround for first ioceth enable failure & we
be7fa326
RM
1070 * get a 0 MAC address. We try to get the MAC address
1071 * again here.
1072 */
1073 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
078086f3 1074 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
be7fa326
RM
1075 bnad_set_netdev_perm_addr(bnad);
1076 }
be7fa326
RM
1077}
1078
01b54b14
JH
1079/*
1080 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1081 */
1082static void
1083bnad_tx_cleanup(struct delayed_work *work)
1084{
1085 struct bnad_tx_info *tx_info =
1086 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1087 struct bnad *bnad = NULL;
01b54b14
JH
1088 struct bna_tcb *tcb;
1089 unsigned long flags;
5216562a 1090 u32 i, pending = 0;
01b54b14
JH
1091
1092 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1093 tcb = tx_info->tcb[i];
1094 if (!tcb)
1095 continue;
1096
1097 bnad = tcb->bnad;
1098
1099 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1100 pending++;
1101 continue;
1102 }
1103
b3cc6e88 1104 bnad_txq_cleanup(bnad, tcb);
01b54b14 1105
01b54b14
JH
1106 smp_mb__before_clear_bit();
1107 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1108 }
1109
1110 if (pending) {
1111 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1112 msecs_to_jiffies(1));
1113 return;
1114 }
1115
1116 spin_lock_irqsave(&bnad->bna_lock, flags);
1117 bna_tx_cleanup_complete(tx_info->tx);
1118 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1119}
1120
be7fa326 1121static void
078086f3 1122bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
be7fa326 1123{
078086f3
RM
1124 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1125 struct bna_tcb *tcb;
1126 int i;
1127
1128 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1129 tcb = tx_info->tcb[i];
1130 if (!tcb)
1131 continue;
1132 }
1133
01b54b14 1134 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
8b230ed8
RM
1135}
1136
5bcf6ac0
RM
1137static void
1138bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1139{
1140 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1141 struct bna_ccb *ccb;
1142 struct bnad_rx_ctrl *rx_ctrl;
1143 int i;
1144
1145 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1146 rx_ctrl = &rx_info->rx_ctrl[i];
1147 ccb = rx_ctrl->ccb;
1148 if (!ccb)
1149 continue;
1150
1151 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1152
1153 if (ccb->rcb[1])
1154 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1155 }
1156}
1157
01b54b14
JH
1158/*
1159 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1160 */
1161static void
1162bnad_rx_cleanup(void *work)
1163{
1164 struct bnad_rx_info *rx_info =
1165 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1166 struct bnad_rx_ctrl *rx_ctrl;
1167 struct bnad *bnad = NULL;
1168 unsigned long flags;
5216562a 1169 u32 i;
01b54b14
JH
1170
1171 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1172 rx_ctrl = &rx_info->rx_ctrl[i];
1173
1174 if (!rx_ctrl->ccb)
1175 continue;
1176
1177 bnad = rx_ctrl->ccb->bnad;
1178
1179 /*
1180 * Wait till the poll handler has exited
1181 * and nothing can be scheduled anymore
1182 */
1183 napi_disable(&rx_ctrl->napi);
1184
b3cc6e88
JH
1185 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1186 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
01b54b14 1187 if (rx_ctrl->ccb->rcb[1])
b3cc6e88 1188 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
01b54b14
JH
1189 }
1190
1191 spin_lock_irqsave(&bnad->bna_lock, flags);
1192 bna_rx_cleanup_complete(rx_info->rx);
1193 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1194}
1195
8b230ed8 1196static void
078086f3 1197bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1198{
078086f3
RM
1199 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1200 struct bna_ccb *ccb;
1201 struct bnad_rx_ctrl *rx_ctrl;
1202 int i;
1203
772b5235 1204 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
078086f3
RM
1205 rx_ctrl = &rx_info->rx_ctrl[i];
1206 ccb = rx_ctrl->ccb;
1207 if (!ccb)
1208 continue;
1209
1210 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1211
1212 if (ccb->rcb[1])
1213 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
078086f3 1214 }
be7fa326 1215
01b54b14 1216 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
8b230ed8
RM
1217}
1218
1219static void
078086f3 1220bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1221{
078086f3
RM
1222 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1223 struct bna_ccb *ccb;
1224 struct bna_rcb *rcb;
1225 struct bnad_rx_ctrl *rx_ctrl;
30f9fc94 1226 int i, j;
be7fa326 1227
772b5235 1228 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
078086f3
RM
1229 rx_ctrl = &rx_info->rx_ctrl[i];
1230 ccb = rx_ctrl->ccb;
1231 if (!ccb)
1232 continue;
be7fa326 1233
01b54b14 1234 napi_enable(&rx_ctrl->napi);
8b230ed8 1235
078086f3
RM
1236 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1237 rcb = ccb->rcb[j];
1238 if (!rcb)
1239 continue;
078086f3 1240
30f9fc94 1241 bnad_rxq_alloc_init(bnad, rcb);
078086f3 1242 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
5bcf6ac0 1243 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
5216562a 1244 bnad_rxq_post(bnad, rcb);
078086f3 1245 }
8b230ed8
RM
1246 }
1247}
1248
1249static void
078086f3 1250bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
8b230ed8
RM
1251{
1252 struct bnad *bnad = (struct bnad *)arg;
1253
1254 complete(&bnad->bnad_completions.rx_comp);
1255}
1256
1257static void
078086f3 1258bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1259{
078086f3 1260 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
8b230ed8
RM
1261 complete(&bnad->bnad_completions.mcast_comp);
1262}
1263
1264void
1265bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1266 struct bna_stats *stats)
1267{
1268 if (status == BNA_CB_SUCCESS)
1269 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1270
1271 if (!netif_running(bnad->netdev) ||
1272 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1273 return;
1274
1275 mod_timer(&bnad->stats_timer,
1276 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1277}
1278
078086f3
RM
1279static void
1280bnad_cb_enet_mtu_set(struct bnad *bnad)
1281{
1282 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1283 complete(&bnad->bnad_completions.mtu_comp);
1284}
1285
72a9730b
KG
1286void
1287bnad_cb_completion(void *arg, enum bfa_status status)
1288{
1289 struct bnad_iocmd_comp *iocmd_comp =
1290 (struct bnad_iocmd_comp *)arg;
1291
1292 iocmd_comp->comp_status = (u32) status;
1293 complete(&iocmd_comp->comp);
1294}
1295
8b230ed8
RM
1296/* Resource allocation, free functions */
1297
1298static void
1299bnad_mem_free(struct bnad *bnad,
1300 struct bna_mem_info *mem_info)
1301{
1302 int i;
1303 dma_addr_t dma_pa;
1304
1305 if (mem_info->mdl == NULL)
1306 return;
1307
1308 for (i = 0; i < mem_info->num; i++) {
1309 if (mem_info->mdl[i].kva != NULL) {
1310 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1311 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1312 dma_pa);
5ea74318
IV
1313 dma_free_coherent(&bnad->pcidev->dev,
1314 mem_info->mdl[i].len,
1315 mem_info->mdl[i].kva, dma_pa);
8b230ed8
RM
1316 } else
1317 kfree(mem_info->mdl[i].kva);
1318 }
1319 }
1320 kfree(mem_info->mdl);
1321 mem_info->mdl = NULL;
1322}
1323
1324static int
1325bnad_mem_alloc(struct bnad *bnad,
1326 struct bna_mem_info *mem_info)
1327{
1328 int i;
1329 dma_addr_t dma_pa;
1330
1331 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1332 mem_info->mdl = NULL;
1333 return 0;
1334 }
1335
1336 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1337 GFP_KERNEL);
1338 if (mem_info->mdl == NULL)
1339 return -ENOMEM;
1340
1341 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1342 for (i = 0; i < mem_info->num; i++) {
1343 mem_info->mdl[i].len = mem_info->len;
1344 mem_info->mdl[i].kva =
5ea74318 1345 dma_alloc_coherent(&bnad->pcidev->dev,
1f9061d2
JP
1346 mem_info->len, &dma_pa,
1347 GFP_KERNEL);
8b230ed8
RM
1348 if (mem_info->mdl[i].kva == NULL)
1349 goto err_return;
1350
1351 BNA_SET_DMA_ADDR(dma_pa,
1352 &(mem_info->mdl[i].dma));
1353 }
1354 } else {
1355 for (i = 0; i < mem_info->num; i++) {
1356 mem_info->mdl[i].len = mem_info->len;
1357 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1358 GFP_KERNEL);
1359 if (mem_info->mdl[i].kva == NULL)
1360 goto err_return;
1361 }
1362 }
1363
1364 return 0;
1365
1366err_return:
1367 bnad_mem_free(bnad, mem_info);
1368 return -ENOMEM;
1369}
1370
1371/* Free IRQ for Mailbox */
1372static void
078086f3 1373bnad_mbox_irq_free(struct bnad *bnad)
8b230ed8
RM
1374{
1375 int irq;
1376 unsigned long flags;
1377
8b230ed8 1378 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 1379 bnad_disable_mbox_irq(bnad);
e2fa6f2e 1380 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1381
1382 irq = BNAD_GET_MBOX_IRQ(bnad);
be7fa326 1383 free_irq(irq, bnad);
8b230ed8
RM
1384}
1385
1386/*
1387 * Allocates IRQ for Mailbox, but keep it disabled
1388 * This will be enabled once we get the mbox enable callback
1389 * from bna
1390 */
1391static int
078086f3 1392bnad_mbox_irq_alloc(struct bnad *bnad)
8b230ed8 1393{
0120b99c
RM
1394 int err = 0;
1395 unsigned long irq_flags, flags;
8b230ed8 1396 u32 irq;
0120b99c 1397 irq_handler_t irq_handler;
8b230ed8 1398
8b230ed8
RM
1399 spin_lock_irqsave(&bnad->bna_lock, flags);
1400 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1401 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
8811e267 1402 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
8279171a 1403 irq_flags = 0;
8b230ed8
RM
1404 } else {
1405 irq_handler = (irq_handler_t)bnad_isr;
1406 irq = bnad->pcidev->irq;
5f77898d 1407 irq_flags = IRQF_SHARED;
8b230ed8 1408 }
8811e267 1409
8b230ed8 1410 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1411 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1412
e2fa6f2e
RM
1413 /*
1414 * Set the Mbox IRQ disable flag, so that the IRQ handler
1415 * called from request_irq() for SHARED IRQs do not execute
1416 */
1417 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1418
be7fa326
RM
1419 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1420
8279171a 1421 err = request_irq(irq, irq_handler, irq_flags,
be7fa326 1422 bnad->mbox_irq_name, bnad);
e2fa6f2e 1423
be7fa326 1424 return err;
8b230ed8
RM
1425}
1426
1427static void
1428bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1429{
1430 kfree(intr_info->idl);
1431 intr_info->idl = NULL;
1432}
1433
1434/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1435static int
1436bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
078086f3 1437 u32 txrx_id, struct bna_intr_info *intr_info)
8b230ed8
RM
1438{
1439 int i, vector_start = 0;
1440 u32 cfg_flags;
1441 unsigned long flags;
1442
1443 spin_lock_irqsave(&bnad->bna_lock, flags);
1444 cfg_flags = bnad->cfg_flags;
1445 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1446
1447 if (cfg_flags & BNAD_CF_MSIX) {
1448 intr_info->intr_type = BNA_INTR_T_MSIX;
1449 intr_info->idl = kcalloc(intr_info->num,
1450 sizeof(struct bna_intr_descr),
1451 GFP_KERNEL);
1452 if (!intr_info->idl)
1453 return -ENOMEM;
1454
1455 switch (src) {
1456 case BNAD_INTR_TX:
8811e267 1457 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
8b230ed8
RM
1458 break;
1459
1460 case BNAD_INTR_RX:
8811e267
RM
1461 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1462 (bnad->num_tx * bnad->num_txq_per_tx) +
8b230ed8
RM
1463 txrx_id;
1464 break;
1465
1466 default:
1467 BUG();
1468 }
1469
1470 for (i = 0; i < intr_info->num; i++)
1471 intr_info->idl[i].vector = vector_start + i;
1472 } else {
1473 intr_info->intr_type = BNA_INTR_T_INTX;
1474 intr_info->num = 1;
1475 intr_info->idl = kcalloc(intr_info->num,
1476 sizeof(struct bna_intr_descr),
1477 GFP_KERNEL);
1478 if (!intr_info->idl)
1479 return -ENOMEM;
1480
1481 switch (src) {
1482 case BNAD_INTR_TX:
8811e267 1483 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
8b230ed8
RM
1484 break;
1485
1486 case BNAD_INTR_RX:
8811e267 1487 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
8b230ed8
RM
1488 break;
1489 }
1490 }
1491 return 0;
1492}
1493
1aa8b471 1494/* NOTE: Should be called for MSIX only
8b230ed8
RM
1495 * Unregisters Tx MSIX vector(s) from the kernel
1496 */
1497static void
1498bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1499 int num_txqs)
1500{
1501 int i;
1502 int vector_num;
1503
1504 for (i = 0; i < num_txqs; i++) {
1505 if (tx_info->tcb[i] == NULL)
1506 continue;
1507
1508 vector_num = tx_info->tcb[i]->intr_vector;
1509 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1510 }
1511}
1512
1aa8b471 1513/* NOTE: Should be called for MSIX only
8b230ed8
RM
1514 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1515 */
1516static int
1517bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
078086f3 1518 u32 tx_id, int num_txqs)
8b230ed8
RM
1519{
1520 int i;
1521 int err;
1522 int vector_num;
1523
1524 for (i = 0; i < num_txqs; i++) {
1525 vector_num = tx_info->tcb[i]->intr_vector;
1526 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1527 tx_id + tx_info->tcb[i]->id);
1528 err = request_irq(bnad->msix_table[vector_num].vector,
1529 (irq_handler_t)bnad_msix_tx, 0,
1530 tx_info->tcb[i]->name,
1531 tx_info->tcb[i]);
1532 if (err)
1533 goto err_return;
1534 }
1535
1536 return 0;
1537
1538err_return:
1539 if (i > 0)
1540 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1541 return -1;
1542}
1543
1aa8b471 1544/* NOTE: Should be called for MSIX only
8b230ed8
RM
1545 * Unregisters Rx MSIX vector(s) from the kernel
1546 */
1547static void
1548bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1549 int num_rxps)
1550{
1551 int i;
1552 int vector_num;
1553
1554 for (i = 0; i < num_rxps; i++) {
1555 if (rx_info->rx_ctrl[i].ccb == NULL)
1556 continue;
1557
1558 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1559 free_irq(bnad->msix_table[vector_num].vector,
1560 rx_info->rx_ctrl[i].ccb);
1561 }
1562}
1563
1aa8b471 1564/* NOTE: Should be called for MSIX only
8b230ed8
RM
1565 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1566 */
1567static int
1568bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
078086f3 1569 u32 rx_id, int num_rxps)
8b230ed8
RM
1570{
1571 int i;
1572 int err;
1573 int vector_num;
1574
1575 for (i = 0; i < num_rxps; i++) {
1576 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1577 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1578 bnad->netdev->name,
1579 rx_id + rx_info->rx_ctrl[i].ccb->id);
1580 err = request_irq(bnad->msix_table[vector_num].vector,
1581 (irq_handler_t)bnad_msix_rx, 0,
1582 rx_info->rx_ctrl[i].ccb->name,
1583 rx_info->rx_ctrl[i].ccb);
1584 if (err)
1585 goto err_return;
1586 }
1587
1588 return 0;
1589
1590err_return:
1591 if (i > 0)
1592 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1593 return -1;
1594}
1595
1596/* Free Tx object Resources */
1597static void
1598bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1599{
1600 int i;
1601
1602 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1603 if (res_info[i].res_type == BNA_RES_T_MEM)
1604 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1605 else if (res_info[i].res_type == BNA_RES_T_INTR)
1606 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1607 }
1608}
1609
1610/* Allocates memory and interrupt resources for Tx object */
1611static int
1612bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
078086f3 1613 u32 tx_id)
8b230ed8
RM
1614{
1615 int i, err = 0;
1616
1617 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1618 if (res_info[i].res_type == BNA_RES_T_MEM)
1619 err = bnad_mem_alloc(bnad,
1620 &res_info[i].res_u.mem_info);
1621 else if (res_info[i].res_type == BNA_RES_T_INTR)
1622 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1623 &res_info[i].res_u.intr_info);
1624 if (err)
1625 goto err_return;
1626 }
1627 return 0;
1628
1629err_return:
1630 bnad_tx_res_free(bnad, res_info);
1631 return err;
1632}
1633
1634/* Free Rx object Resources */
1635static void
1636bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1637{
1638 int i;
1639
1640 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1641 if (res_info[i].res_type == BNA_RES_T_MEM)
1642 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1643 else if (res_info[i].res_type == BNA_RES_T_INTR)
1644 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1645 }
1646}
1647
1648/* Allocates memory and interrupt resources for Rx object */
1649static int
1650bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1651 uint rx_id)
1652{
1653 int i, err = 0;
1654
1655 /* All memory needs to be allocated before setup_ccbs */
1656 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1657 if (res_info[i].res_type == BNA_RES_T_MEM)
1658 err = bnad_mem_alloc(bnad,
1659 &res_info[i].res_u.mem_info);
1660 else if (res_info[i].res_type == BNA_RES_T_INTR)
1661 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1662 &res_info[i].res_u.intr_info);
1663 if (err)
1664 goto err_return;
1665 }
1666 return 0;
1667
1668err_return:
1669 bnad_rx_res_free(bnad, res_info);
1670 return err;
1671}
1672
1673/* Timer callbacks */
1674/* a) IOC timer */
1675static void
1676bnad_ioc_timeout(unsigned long data)
1677{
1678 struct bnad *bnad = (struct bnad *)data;
1679 unsigned long flags;
1680
1681 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1682 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1683 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1684}
1685
1686static void
1687bnad_ioc_hb_check(unsigned long data)
1688{
1689 struct bnad *bnad = (struct bnad *)data;
1690 unsigned long flags;
1691
1692 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1693 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1694 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1695}
1696
1697static void
1d32f769 1698bnad_iocpf_timeout(unsigned long data)
8b230ed8
RM
1699{
1700 struct bnad *bnad = (struct bnad *)data;
1701 unsigned long flags;
1702
1703 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1704 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1d32f769
RM
1705 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1706}
1707
1708static void
1709bnad_iocpf_sem_timeout(unsigned long data)
1710{
1711 struct bnad *bnad = (struct bnad *)data;
1712 unsigned long flags;
1713
1714 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1715 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1716 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1717}
1718
1719/*
1720 * All timer routines use bnad->bna_lock to protect against
1721 * the following race, which may occur in case of no locking:
0120b99c 1722 * Time CPU m CPU n
8b230ed8
RM
1723 * 0 1 = test_bit
1724 * 1 clear_bit
1725 * 2 del_timer_sync
1726 * 3 mod_timer
1727 */
1728
1729/* b) Dynamic Interrupt Moderation Timer */
1730static void
1731bnad_dim_timeout(unsigned long data)
1732{
1733 struct bnad *bnad = (struct bnad *)data;
1734 struct bnad_rx_info *rx_info;
1735 struct bnad_rx_ctrl *rx_ctrl;
1736 int i, j;
1737 unsigned long flags;
1738
1739 if (!netif_carrier_ok(bnad->netdev))
1740 return;
1741
1742 spin_lock_irqsave(&bnad->bna_lock, flags);
1743 for (i = 0; i < bnad->num_rx; i++) {
1744 rx_info = &bnad->rx_info[i];
1745 if (!rx_info->rx)
1746 continue;
1747 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1748 rx_ctrl = &rx_info->rx_ctrl[j];
1749 if (!rx_ctrl->ccb)
1750 continue;
1751 bna_rx_dim_update(rx_ctrl->ccb);
1752 }
1753 }
1754
1755 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1756 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1757 mod_timer(&bnad->dim_timer,
1758 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1759 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1760}
1761
1762/* c) Statistics Timer */
1763static void
1764bnad_stats_timeout(unsigned long data)
1765{
1766 struct bnad *bnad = (struct bnad *)data;
1767 unsigned long flags;
1768
1769 if (!netif_running(bnad->netdev) ||
1770 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1771 return;
1772
1773 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1774 bna_hw_stats_get(&bnad->bna);
8b230ed8
RM
1775 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1776}
1777
1778/*
1779 * Set up timer for DIM
1780 * Called with bnad->bna_lock held
1781 */
1782void
1783bnad_dim_timer_start(struct bnad *bnad)
1784{
1785 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1786 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1787 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1788 (unsigned long)bnad);
1789 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1790 mod_timer(&bnad->dim_timer,
1791 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1792 }
1793}
1794
1795/*
1796 * Set up timer for statistics
1797 * Called with mutex_lock(&bnad->conf_mutex) held
1798 */
1799static void
1800bnad_stats_timer_start(struct bnad *bnad)
1801{
1802 unsigned long flags;
1803
1804 spin_lock_irqsave(&bnad->bna_lock, flags);
1805 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1806 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1807 (unsigned long)bnad);
1808 mod_timer(&bnad->stats_timer,
1809 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1810 }
1811 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1812}
1813
1814/*
1815 * Stops the stats timer
1816 * Called with mutex_lock(&bnad->conf_mutex) held
1817 */
1818static void
1819bnad_stats_timer_stop(struct bnad *bnad)
1820{
1821 int to_del = 0;
1822 unsigned long flags;
1823
1824 spin_lock_irqsave(&bnad->bna_lock, flags);
1825 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1826 to_del = 1;
1827 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1828 if (to_del)
1829 del_timer_sync(&bnad->stats_timer);
1830}
1831
1832/* Utilities */
1833
1834static void
1835bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1836{
1837 int i = 1; /* Index 0 has broadcast address */
1838 struct netdev_hw_addr *mc_addr;
1839
1840 netdev_for_each_mc_addr(mc_addr, netdev) {
1841 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1842 ETH_ALEN);
1843 i++;
1844 }
1845}
1846
1847static int
1848bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1849{
1850 struct bnad_rx_ctrl *rx_ctrl =
1851 container_of(napi, struct bnad_rx_ctrl, napi);
2be67144 1852 struct bnad *bnad = rx_ctrl->bnad;
8b230ed8
RM
1853 int rcvd = 0;
1854
271e8b79 1855 rx_ctrl->rx_poll_ctr++;
8b230ed8
RM
1856
1857 if (!netif_carrier_ok(bnad->netdev))
1858 goto poll_exit;
1859
b3cc6e88 1860 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
271e8b79 1861 if (rcvd >= budget)
8b230ed8
RM
1862 return rcvd;
1863
1864poll_exit:
19dbff9f 1865 napi_complete(napi);
8b230ed8 1866
271e8b79 1867 rx_ctrl->rx_complete++;
2be67144
RM
1868
1869 if (rx_ctrl->ccb)
271e8b79
RM
1870 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1871
8b230ed8
RM
1872 return rcvd;
1873}
1874
2be67144 1875#define BNAD_NAPI_POLL_QUOTA 64
8b230ed8 1876static void
01b54b14 1877bnad_napi_add(struct bnad *bnad, u32 rx_id)
8b230ed8 1878{
8b230ed8
RM
1879 struct bnad_rx_ctrl *rx_ctrl;
1880 int i;
8b230ed8
RM
1881
1882 /* Initialize & enable NAPI */
1883 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1884 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1885 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
2be67144
RM
1886 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1887 }
1888}
1889
1890static void
01b54b14 1891bnad_napi_delete(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
1892{
1893 int i;
1894
1895 /* First disable and then clean up */
01b54b14 1896 for (i = 0; i < bnad->num_rxp_per_rx; i++)
8b230ed8 1897 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
8b230ed8
RM
1898}
1899
1900/* Should be held with conf_lock held */
1901void
b3cc6e88 1902bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
8b230ed8
RM
1903{
1904 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1905 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1906 unsigned long flags;
1907
1908 if (!tx_info->tx)
1909 return;
1910
1911 init_completion(&bnad->bnad_completions.tx_comp);
1912 spin_lock_irqsave(&bnad->bna_lock, flags);
1913 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1914 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1915 wait_for_completion(&bnad->bnad_completions.tx_comp);
1916
1917 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1918 bnad_tx_msix_unregister(bnad, tx_info,
1919 bnad->num_txq_per_tx);
1920
1921 spin_lock_irqsave(&bnad->bna_lock, flags);
1922 bna_tx_destroy(tx_info->tx);
1923 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1924
1925 tx_info->tx = NULL;
078086f3 1926 tx_info->tx_id = 0;
8b230ed8 1927
8b230ed8
RM
1928 bnad_tx_res_free(bnad, res_info);
1929}
1930
1931/* Should be held with conf_lock held */
1932int
078086f3 1933bnad_setup_tx(struct bnad *bnad, u32 tx_id)
8b230ed8
RM
1934{
1935 int err;
1936 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1937 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1938 struct bna_intr_info *intr_info =
1939 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1940 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
d91d25d5 1941 static const struct bna_tx_event_cbfn tx_cbfn = {
1942 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1943 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1944 .tx_stall_cbfn = bnad_cb_tx_stall,
1945 .tx_resume_cbfn = bnad_cb_tx_resume,
1946 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1947 };
1948
8b230ed8
RM
1949 struct bna_tx *tx;
1950 unsigned long flags;
1951
078086f3
RM
1952 tx_info->tx_id = tx_id;
1953
8b230ed8
RM
1954 /* Initialize the Tx object configuration */
1955 tx_config->num_txq = bnad->num_txq_per_tx;
1956 tx_config->txq_depth = bnad->txq_depth;
1957 tx_config->tx_type = BNA_TX_T_REGULAR;
078086f3 1958 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
8b230ed8 1959
8b230ed8
RM
1960 /* Get BNA's resource requirement for one tx object */
1961 spin_lock_irqsave(&bnad->bna_lock, flags);
1962 bna_tx_res_req(bnad->num_txq_per_tx,
1963 bnad->txq_depth, res_info);
1964 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1965
1966 /* Fill Unmap Q memory requirements */
5216562a
RM
1967 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1968 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1969 bnad->txq_depth));
8b230ed8
RM
1970
1971 /* Allocate resources */
1972 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1973 if (err)
1974 return err;
1975
1976 /* Ask BNA to create one Tx object, supplying required resources */
1977 spin_lock_irqsave(&bnad->bna_lock, flags);
1978 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1979 tx_info);
1980 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1981 if (!tx)
1982 goto err_return;
1983 tx_info->tx = tx;
1984
01b54b14
JH
1985 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1986 (work_func_t)bnad_tx_cleanup);
1987
8b230ed8
RM
1988 /* Register ISR for the Tx object */
1989 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1990 err = bnad_tx_msix_register(bnad, tx_info,
1991 tx_id, bnad->num_txq_per_tx);
1992 if (err)
1993 goto err_return;
1994 }
1995
1996 spin_lock_irqsave(&bnad->bna_lock, flags);
1997 bna_tx_enable(tx);
1998 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1999
2000 return 0;
2001
2002err_return:
2003 bnad_tx_res_free(bnad, res_info);
2004 return err;
2005}
2006
2007/* Setup the rx config for bna_rx_create */
2008/* bnad decides the configuration */
2009static void
2010bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2011{
e29aa339 2012 memset(rx_config, 0, sizeof(*rx_config));
8b230ed8
RM
2013 rx_config->rx_type = BNA_RX_T_REGULAR;
2014 rx_config->num_paths = bnad->num_rxp_per_rx;
078086f3 2015 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
8b230ed8
RM
2016
2017 if (bnad->num_rxp_per_rx > 1) {
2018 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2019 rx_config->rss_config.hash_type =
078086f3
RM
2020 (BFI_ENET_RSS_IPV6 |
2021 BFI_ENET_RSS_IPV6_TCP |
2022 BFI_ENET_RSS_IPV4 |
2023 BFI_ENET_RSS_IPV4_TCP);
8b230ed8
RM
2024 rx_config->rss_config.hash_mask =
2025 bnad->num_rxp_per_rx - 1;
2026 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
2027 sizeof(rx_config->rss_config.toeplitz_hash_key));
2028 } else {
2029 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2030 memset(&rx_config->rss_config, 0,
2031 sizeof(rx_config->rss_config));
2032 }
e29aa339
RM
2033
2034 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2035 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2036
2037 /* BNA_RXP_SINGLE - one data-buffer queue
2038 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2039 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2040 */
2041 /* TODO: configurable param for queue type */
8b230ed8 2042 rx_config->rxp_type = BNA_RXP_SLR;
8b230ed8 2043
e29aa339
RM
2044 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2045 rx_config->frame_size > 4096) {
2046 /* though size_routing_enable is set in SLR,
2047 * small packets may get routed to same rxq.
2048 * set buf_size to 2048 instead of PAGE_SIZE.
2049 */
2050 rx_config->q0_buf_size = 2048;
2051 /* this should be in multiples of 2 */
2052 rx_config->q0_num_vecs = 4;
2053 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2054 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2055 } else {
2056 rx_config->q0_buf_size = rx_config->frame_size;
2057 rx_config->q0_num_vecs = 1;
2058 rx_config->q0_depth = bnad->rxq_depth;
2059 }
2060
2061 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2062 if (rx_config->rxp_type == BNA_RXP_SLR) {
2063 rx_config->q1_depth = bnad->rxq_depth;
2064 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2065 }
8b230ed8
RM
2066
2067 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
2068}
2069
2be67144
RM
2070static void
2071bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2072{
2073 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2074 int i;
2075
2076 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2077 rx_info->rx_ctrl[i].bnad = bnad;
2078}
2079
8b230ed8 2080/* Called with mutex_lock(&bnad->conf_mutex) held */
e29aa339
RM
2081u32
2082bnad_reinit_rx(struct bnad *bnad)
2083{
2084 struct net_device *netdev = bnad->netdev;
2085 u32 err = 0, current_err = 0;
2086 u32 rx_id = 0, count = 0;
2087 unsigned long flags;
2088
2089 /* destroy and create new rx objects */
2090 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2091 if (!bnad->rx_info[rx_id].rx)
2092 continue;
2093 bnad_destroy_rx(bnad, rx_id);
2094 }
2095
2096 spin_lock_irqsave(&bnad->bna_lock, flags);
2097 bna_enet_mtu_set(&bnad->bna.enet,
2098 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2099 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2100
2101 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2102 count++;
2103 current_err = bnad_setup_rx(bnad, rx_id);
2104 if (current_err && !err) {
2105 err = current_err;
2106 pr_err("RXQ:%u setup failed\n", rx_id);
2107 }
2108 }
2109
2110 /* restore rx configuration */
2111 if (bnad->rx_info[0].rx && !err) {
2112 bnad_restore_vlans(bnad, 0);
2113 bnad_enable_default_bcast(bnad);
2114 spin_lock_irqsave(&bnad->bna_lock, flags);
2115 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2116 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2117 bnad_set_rx_mode(netdev);
2118 }
2119
2120 return count;
2121}
2122
2123/* Called with bnad_conf_lock() held */
8b230ed8 2124void
b3cc6e88 2125bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
2126{
2127 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2128 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2129 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2130 unsigned long flags;
271e8b79 2131 int to_del = 0;
8b230ed8
RM
2132
2133 if (!rx_info->rx)
2134 return;
2135
2136 if (0 == rx_id) {
2137 spin_lock_irqsave(&bnad->bna_lock, flags);
271e8b79
RM
2138 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2139 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
8b230ed8 2140 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
271e8b79
RM
2141 to_del = 1;
2142 }
8b230ed8 2143 spin_unlock_irqrestore(&bnad->bna_lock, flags);
271e8b79 2144 if (to_del)
8b230ed8
RM
2145 del_timer_sync(&bnad->dim_timer);
2146 }
2147
8b230ed8
RM
2148 init_completion(&bnad->bnad_completions.rx_comp);
2149 spin_lock_irqsave(&bnad->bna_lock, flags);
2150 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2151 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2152 wait_for_completion(&bnad->bnad_completions.rx_comp);
2153
2154 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2155 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2156
01b54b14 2157 bnad_napi_delete(bnad, rx_id);
2be67144 2158
8b230ed8
RM
2159 spin_lock_irqsave(&bnad->bna_lock, flags);
2160 bna_rx_destroy(rx_info->rx);
8b230ed8
RM
2161
2162 rx_info->rx = NULL;
3caa1e95 2163 rx_info->rx_id = 0;
b9fa1fbf 2164 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
2165
2166 bnad_rx_res_free(bnad, res_info);
2167}
2168
2169/* Called with mutex_lock(&bnad->conf_mutex) held */
2170int
078086f3 2171bnad_setup_rx(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
2172{
2173 int err;
2174 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2175 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2176 struct bna_intr_info *intr_info =
2177 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2178 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
d91d25d5 2179 static const struct bna_rx_event_cbfn rx_cbfn = {
5216562a 2180 .rcb_setup_cbfn = NULL,
01b54b14 2181 .rcb_destroy_cbfn = NULL,
d91d25d5 2182 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2183 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
5bcf6ac0 2184 .rx_stall_cbfn = bnad_cb_rx_stall,
d91d25d5 2185 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2186 .rx_post_cbfn = bnad_cb_rx_post,
2187 };
8b230ed8
RM
2188 struct bna_rx *rx;
2189 unsigned long flags;
2190
078086f3
RM
2191 rx_info->rx_id = rx_id;
2192
8b230ed8
RM
2193 /* Initialize the Rx object configuration */
2194 bnad_init_rx_config(bnad, rx_config);
2195
8b230ed8
RM
2196 /* Get BNA's resource requirement for one Rx object */
2197 spin_lock_irqsave(&bnad->bna_lock, flags);
2198 bna_rx_res_req(rx_config, res_info);
2199 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2200
2201 /* Fill Unmap Q memory requirements */
e29aa339
RM
2202 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2203 rx_config->num_paths,
2204 (rx_config->q0_depth *
2205 sizeof(struct bnad_rx_unmap)) +
2206 sizeof(struct bnad_rx_unmap_q));
2207
2208 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2209 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2210 rx_config->num_paths,
2211 (rx_config->q1_depth *
2212 sizeof(struct bnad_rx_unmap) +
2213 sizeof(struct bnad_rx_unmap_q)));
2214 }
8b230ed8
RM
2215 /* Allocate resource */
2216 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2217 if (err)
2218 return err;
2219
2be67144
RM
2220 bnad_rx_ctrl_init(bnad, rx_id);
2221
8b230ed8
RM
2222 /* Ask BNA to create one Rx object, supplying required resources */
2223 spin_lock_irqsave(&bnad->bna_lock, flags);
2224 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2225 rx_info);
3caa1e95
RM
2226 if (!rx) {
2227 err = -ENOMEM;
b9fa1fbf 2228 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 2229 goto err_return;
3caa1e95 2230 }
8b230ed8 2231 rx_info->rx = rx;
b9fa1fbf 2232 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 2233
01b54b14
JH
2234 INIT_WORK(&rx_info->rx_cleanup_work,
2235 (work_func_t)(bnad_rx_cleanup));
2236
2be67144
RM
2237 /*
2238 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2239 * so that IRQ handler cannot schedule NAPI at this point.
2240 */
01b54b14 2241 bnad_napi_add(bnad, rx_id);
2be67144 2242
8b230ed8
RM
2243 /* Register ISR for the Rx object */
2244 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2245 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2246 rx_config->num_paths);
2247 if (err)
2248 goto err_return;
2249 }
2250
8b230ed8
RM
2251 spin_lock_irqsave(&bnad->bna_lock, flags);
2252 if (0 == rx_id) {
2253 /* Set up Dynamic Interrupt Moderation Vector */
2254 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2255 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2256
2257 /* Enable VLAN filtering only on the default Rx */
2258 bna_rx_vlanfilter_enable(rx);
2259
2260 /* Start the DIM timer */
2261 bnad_dim_timer_start(bnad);
2262 }
2263
2264 bna_rx_enable(rx);
2265 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2266
2267 return 0;
2268
2269err_return:
b3cc6e88 2270 bnad_destroy_rx(bnad, rx_id);
8b230ed8
RM
2271 return err;
2272}
2273
2274/* Called with conf_lock & bnad->bna_lock held */
2275void
2276bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2277{
2278 struct bnad_tx_info *tx_info;
2279
2280 tx_info = &bnad->tx_info[0];
2281 if (!tx_info->tx)
2282 return;
2283
2284 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2285}
2286
2287/* Called with conf_lock & bnad->bna_lock held */
2288void
2289bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2290{
2291 struct bnad_rx_info *rx_info;
0120b99c 2292 int i;
8b230ed8
RM
2293
2294 for (i = 0; i < bnad->num_rx; i++) {
2295 rx_info = &bnad->rx_info[i];
2296 if (!rx_info->rx)
2297 continue;
2298 bna_rx_coalescing_timeo_set(rx_info->rx,
2299 bnad->rx_coalescing_timeo);
2300 }
2301}
2302
2303/*
2304 * Called with bnad->bna_lock held
2305 */
a2122d95 2306int
8b230ed8
RM
2307bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2308{
2309 int ret;
2310
2311 if (!is_valid_ether_addr(mac_addr))
2312 return -EADDRNOTAVAIL;
2313
2314 /* If datapath is down, pretend everything went through */
2315 if (!bnad->rx_info[0].rx)
2316 return 0;
2317
2318 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2319 if (ret != BNA_CB_SUCCESS)
2320 return -EADDRNOTAVAIL;
2321
2322 return 0;
2323}
2324
2325/* Should be called with conf_lock held */
a2122d95 2326int
8b230ed8
RM
2327bnad_enable_default_bcast(struct bnad *bnad)
2328{
2329 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2330 int ret;
2331 unsigned long flags;
2332
2333 init_completion(&bnad->bnad_completions.mcast_comp);
2334
2335 spin_lock_irqsave(&bnad->bna_lock, flags);
2336 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2337 bnad_cb_rx_mcast_add);
2338 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2339
2340 if (ret == BNA_CB_SUCCESS)
2341 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2342 else
2343 return -ENODEV;
2344
2345 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2346 return -ENODEV;
2347
2348 return 0;
2349}
2350
19dbff9f 2351/* Called with mutex_lock(&bnad->conf_mutex) held */
a2122d95 2352void
aad75b66
RM
2353bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2354{
f859d7cb 2355 u16 vid;
aad75b66
RM
2356 unsigned long flags;
2357
f859d7cb 2358 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
aad75b66 2359 spin_lock_irqsave(&bnad->bna_lock, flags);
f859d7cb 2360 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
aad75b66
RM
2361 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2362 }
2363}
2364
8b230ed8
RM
2365/* Statistics utilities */
2366void
250e061e 2367bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 2368{
8b230ed8
RM
2369 int i, j;
2370
2371 for (i = 0; i < bnad->num_rx; i++) {
2372 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2373 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
250e061e 2374 stats->rx_packets += bnad->rx_info[i].
8b230ed8 2375 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
250e061e 2376 stats->rx_bytes += bnad->rx_info[i].
8b230ed8
RM
2377 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2378 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2379 bnad->rx_info[i].rx_ctrl[j].ccb->
2380 rcb[1]->rxq) {
250e061e 2381 stats->rx_packets +=
8b230ed8
RM
2382 bnad->rx_info[i].rx_ctrl[j].
2383 ccb->rcb[1]->rxq->rx_packets;
250e061e 2384 stats->rx_bytes +=
8b230ed8
RM
2385 bnad->rx_info[i].rx_ctrl[j].
2386 ccb->rcb[1]->rxq->rx_bytes;
2387 }
2388 }
2389 }
2390 }
2391 for (i = 0; i < bnad->num_tx; i++) {
2392 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2393 if (bnad->tx_info[i].tcb[j]) {
250e061e 2394 stats->tx_packets +=
8b230ed8 2395 bnad->tx_info[i].tcb[j]->txq->tx_packets;
250e061e 2396 stats->tx_bytes +=
8b230ed8
RM
2397 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2398 }
2399 }
2400 }
2401}
2402
2403/*
2404 * Must be called with the bna_lock held.
2405 */
2406void
250e061e 2407bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 2408{
078086f3
RM
2409 struct bfi_enet_stats_mac *mac_stats;
2410 u32 bmap;
8b230ed8
RM
2411 int i;
2412
078086f3 2413 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
250e061e 2414 stats->rx_errors =
8b230ed8
RM
2415 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2416 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2417 mac_stats->rx_undersize;
250e061e 2418 stats->tx_errors = mac_stats->tx_fcs_error +
8b230ed8 2419 mac_stats->tx_undersize;
250e061e
ED
2420 stats->rx_dropped = mac_stats->rx_drop;
2421 stats->tx_dropped = mac_stats->tx_drop;
2422 stats->multicast = mac_stats->rx_multicast;
2423 stats->collisions = mac_stats->tx_total_collision;
8b230ed8 2424
250e061e 2425 stats->rx_length_errors = mac_stats->rx_frame_length_error;
8b230ed8
RM
2426
2427 /* receive ring buffer overflow ?? */
2428
250e061e
ED
2429 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2430 stats->rx_frame_errors = mac_stats->rx_alignment_error;
8b230ed8 2431 /* recv'r fifo overrun */
078086f3
RM
2432 bmap = bna_rx_rid_mask(&bnad->bna);
2433 for (i = 0; bmap; i++) {
8b230ed8 2434 if (bmap & 1) {
250e061e 2435 stats->rx_fifo_errors +=
8b230ed8 2436 bnad->stats.bna_stats->
078086f3 2437 hw_stats.rxf_stats[i].frame_drops;
8b230ed8
RM
2438 break;
2439 }
2440 bmap >>= 1;
2441 }
2442}
2443
2444static void
2445bnad_mbox_irq_sync(struct bnad *bnad)
2446{
2447 u32 irq;
2448 unsigned long flags;
2449
2450 spin_lock_irqsave(&bnad->bna_lock, flags);
2451 if (bnad->cfg_flags & BNAD_CF_MSIX)
8811e267 2452 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
8b230ed8
RM
2453 else
2454 irq = bnad->pcidev->irq;
2455 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2456
2457 synchronize_irq(irq);
2458}
2459
2460/* Utility used by bnad_start_xmit, for doing TSO */
2461static int
2462bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2463{
2464 int err;
2465
8b230ed8
RM
2466 if (skb_header_cloned(skb)) {
2467 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2468 if (err) {
2469 BNAD_UPDATE_CTR(bnad, tso_err);
2470 return err;
2471 }
2472 }
2473
2474 /*
2475 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2476 * excluding the length field.
2477 */
2478 if (skb->protocol == htons(ETH_P_IP)) {
2479 struct iphdr *iph = ip_hdr(skb);
2480
2481 /* Do we really need these? */
2482 iph->tot_len = 0;
2483 iph->check = 0;
2484
2485 tcp_hdr(skb)->check =
2486 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2487 IPPROTO_TCP, 0);
2488 BNAD_UPDATE_CTR(bnad, tso4);
2489 } else {
2490 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2491
8b230ed8
RM
2492 ipv6h->payload_len = 0;
2493 tcp_hdr(skb)->check =
2494 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2495 IPPROTO_TCP, 0);
2496 BNAD_UPDATE_CTR(bnad, tso6);
2497 }
2498
2499 return 0;
2500}
2501
2502/*
2503 * Initialize Q numbers depending on Rx Paths
2504 * Called with bnad->bna_lock held, because of cfg_flags
2505 * access.
2506 */
2507static void
2508bnad_q_num_init(struct bnad *bnad)
2509{
2510 int rxps;
2511
2512 rxps = min((uint)num_online_cpus(),
772b5235 2513 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
8b230ed8
RM
2514
2515 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2516 rxps = 1; /* INTx */
2517
2518 bnad->num_rx = 1;
2519 bnad->num_tx = 1;
2520 bnad->num_rxp_per_rx = rxps;
2521 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2522}
2523
2524/*
2525 * Adjusts the Q numbers, given a number of msix vectors
2526 * Give preference to RSS as opposed to Tx priority Queues,
2527 * in such a case, just use 1 Tx Q
2528 * Called with bnad->bna_lock held b'cos of cfg_flags access
2529 */
2530static void
078086f3 2531bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
8b230ed8
RM
2532{
2533 bnad->num_txq_per_tx = 1;
2534 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2535 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2536 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2537 bnad->num_rxp_per_rx = msix_vectors -
2538 (bnad->num_tx * bnad->num_txq_per_tx) -
2539 BNAD_MAILBOX_MSIX_VECTORS;
2540 } else
2541 bnad->num_rxp_per_rx = 1;
2542}
2543
078086f3
RM
2544/* Enable / disable ioceth */
2545static int
2546bnad_ioceth_disable(struct bnad *bnad)
8b230ed8
RM
2547{
2548 unsigned long flags;
078086f3 2549 int err = 0;
8b230ed8
RM
2550
2551 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2552 init_completion(&bnad->bnad_completions.ioc_comp);
2553 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
8b230ed8
RM
2554 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2555
078086f3
RM
2556 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2557 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2558
2559 err = bnad->bnad_completions.ioc_comp_status;
2560 return err;
8b230ed8
RM
2561}
2562
2563static int
078086f3 2564bnad_ioceth_enable(struct bnad *bnad)
8b230ed8
RM
2565{
2566 int err = 0;
2567 unsigned long flags;
2568
8b230ed8 2569 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2570 init_completion(&bnad->bnad_completions.ioc_comp);
2571 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2572 bna_ioceth_enable(&bnad->bna.ioceth);
8b230ed8
RM
2573 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2574
078086f3
RM
2575 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2576 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
8b230ed8 2577
078086f3 2578 err = bnad->bnad_completions.ioc_comp_status;
8b230ed8
RM
2579
2580 return err;
2581}
2582
2583/* Free BNA resources */
2584static void
078086f3
RM
2585bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2586 u32 res_val_max)
8b230ed8
RM
2587{
2588 int i;
8b230ed8 2589
078086f3
RM
2590 for (i = 0; i < res_val_max; i++)
2591 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
8b230ed8
RM
2592}
2593
2594/* Allocates memory and interrupt resources for BNA */
2595static int
078086f3
RM
2596bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2597 u32 res_val_max)
8b230ed8
RM
2598{
2599 int i, err;
8b230ed8 2600
078086f3
RM
2601 for (i = 0; i < res_val_max; i++) {
2602 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
8b230ed8
RM
2603 if (err)
2604 goto err_return;
2605 }
2606 return 0;
2607
2608err_return:
078086f3 2609 bnad_res_free(bnad, res_info, res_val_max);
8b230ed8
RM
2610 return err;
2611}
2612
2613/* Interrupt enable / disable */
2614static void
2615bnad_enable_msix(struct bnad *bnad)
2616{
2617 int i, ret;
8b230ed8
RM
2618 unsigned long flags;
2619
2620 spin_lock_irqsave(&bnad->bna_lock, flags);
2621 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2622 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2623 return;
2624 }
2625 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2626
2627 if (bnad->msix_table)
2628 return;
2629
8b230ed8 2630 bnad->msix_table =
b7ee31c5 2631 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
8b230ed8
RM
2632
2633 if (!bnad->msix_table)
2634 goto intx_mode;
2635
b7ee31c5 2636 for (i = 0; i < bnad->msix_num; i++)
8b230ed8
RM
2637 bnad->msix_table[i].entry = i;
2638
b7ee31c5 2639 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
8b230ed8
RM
2640 if (ret > 0) {
2641 /* Not enough MSI-X vectors. */
19dbff9f
RM
2642 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2643 ret, bnad->msix_num);
8b230ed8
RM
2644
2645 spin_lock_irqsave(&bnad->bna_lock, flags);
2646 /* ret = #of vectors that we got */
271e8b79
RM
2647 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2648 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
8b230ed8
RM
2649 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2650
271e8b79 2651 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
8b230ed8 2652 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8 2653
078086f3
RM
2654 if (bnad->msix_num > ret)
2655 goto intx_mode;
2656
8b230ed8
RM
2657 /* Try once more with adjusted numbers */
2658 /* If this fails, fall back to INTx */
2659 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
b7ee31c5 2660 bnad->msix_num);
8b230ed8
RM
2661 if (ret)
2662 goto intx_mode;
2663
2664 } else if (ret < 0)
2665 goto intx_mode;
078086f3
RM
2666
2667 pci_intx(bnad->pcidev, 0);
2668
8b230ed8
RM
2669 return;
2670
2671intx_mode:
19dbff9f 2672 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
8b230ed8
RM
2673
2674 kfree(bnad->msix_table);
2675 bnad->msix_table = NULL;
2676 bnad->msix_num = 0;
8b230ed8
RM
2677 spin_lock_irqsave(&bnad->bna_lock, flags);
2678 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2679 bnad_q_num_init(bnad);
2680 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2681}
2682
2683static void
2684bnad_disable_msix(struct bnad *bnad)
2685{
2686 u32 cfg_flags;
2687 unsigned long flags;
2688
2689 spin_lock_irqsave(&bnad->bna_lock, flags);
2690 cfg_flags = bnad->cfg_flags;
2691 if (bnad->cfg_flags & BNAD_CF_MSIX)
2692 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2693 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2694
2695 if (cfg_flags & BNAD_CF_MSIX) {
2696 pci_disable_msix(bnad->pcidev);
2697 kfree(bnad->msix_table);
2698 bnad->msix_table = NULL;
2699 }
2700}
2701
2702/* Netdev entry points */
2703static int
2704bnad_open(struct net_device *netdev)
2705{
2706 int err;
2707 struct bnad *bnad = netdev_priv(netdev);
2708 struct bna_pause_config pause_config;
8b230ed8
RM
2709 unsigned long flags;
2710
2711 mutex_lock(&bnad->conf_mutex);
2712
2713 /* Tx */
2714 err = bnad_setup_tx(bnad, 0);
2715 if (err)
2716 goto err_return;
2717
2718 /* Rx */
2719 err = bnad_setup_rx(bnad, 0);
2720 if (err)
2721 goto cleanup_tx;
2722
2723 /* Port */
2724 pause_config.tx_pause = 0;
2725 pause_config.rx_pause = 0;
2726
8b230ed8 2727 spin_lock_irqsave(&bnad->bna_lock, flags);
e29aa339
RM
2728 bna_enet_mtu_set(&bnad->bna.enet,
2729 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
078086f3
RM
2730 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2731 bna_enet_enable(&bnad->bna.enet);
8b230ed8
RM
2732 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2733
2734 /* Enable broadcast */
2735 bnad_enable_default_bcast(bnad);
2736
aad75b66
RM
2737 /* Restore VLANs, if any */
2738 bnad_restore_vlans(bnad, 0);
2739
8b230ed8
RM
2740 /* Set the UCAST address */
2741 spin_lock_irqsave(&bnad->bna_lock, flags);
2742 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2743 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2744
2745 /* Start the stats timer */
2746 bnad_stats_timer_start(bnad);
2747
2748 mutex_unlock(&bnad->conf_mutex);
2749
2750 return 0;
2751
2752cleanup_tx:
b3cc6e88 2753 bnad_destroy_tx(bnad, 0);
8b230ed8
RM
2754
2755err_return:
2756 mutex_unlock(&bnad->conf_mutex);
2757 return err;
2758}
2759
2760static int
2761bnad_stop(struct net_device *netdev)
2762{
2763 struct bnad *bnad = netdev_priv(netdev);
2764 unsigned long flags;
2765
2766 mutex_lock(&bnad->conf_mutex);
2767
2768 /* Stop the stats timer */
2769 bnad_stats_timer_stop(bnad);
2770
078086f3 2771 init_completion(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
2772
2773 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2774 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2775 bnad_cb_enet_disabled);
8b230ed8
RM
2776 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2777
078086f3 2778 wait_for_completion(&bnad->bnad_completions.enet_comp);
8b230ed8 2779
b3cc6e88
JH
2780 bnad_destroy_tx(bnad, 0);
2781 bnad_destroy_rx(bnad, 0);
8b230ed8
RM
2782
2783 /* Synchronize mailbox IRQ */
2784 bnad_mbox_irq_sync(bnad);
2785
2786 mutex_unlock(&bnad->conf_mutex);
2787
2788 return 0;
2789}
2790
2791/* TX */
5216562a
RM
2792/* Returns 0 for success */
2793static int
2794bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2795 struct sk_buff *skb, struct bna_txq_entry *txqent)
8b230ed8 2796{
5216562a
RM
2797 u16 flags = 0;
2798 u32 gso_size;
2799 u16 vlan_tag = 0;
8b230ed8 2800
eab6d18d 2801 if (vlan_tx_tag_present(skb)) {
5216562a 2802 vlan_tag = (u16)vlan_tx_tag_get(skb);
8b230ed8
RM
2803 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2804 }
2805 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
5216562a
RM
2806 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2807 | (vlan_tag & 0x1fff);
8b230ed8
RM
2808 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2809 }
8b230ed8
RM
2810 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2811
2812 if (skb_is_gso(skb)) {
271e8b79 2813 gso_size = skb_shinfo(skb)->gso_size;
5216562a 2814 if (unlikely(gso_size > bnad->netdev->mtu)) {
271e8b79 2815 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
5216562a 2816 return -EINVAL;
271e8b79
RM
2817 }
2818 if (unlikely((gso_size + skb_transport_offset(skb) +
5216562a 2819 tcp_hdrlen(skb)) >= skb->len)) {
271e8b79
RM
2820 txqent->hdr.wi.opcode =
2821 __constant_htons(BNA_TXQ_WI_SEND);
2822 txqent->hdr.wi.lso_mss = 0;
2823 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2824 } else {
2825 txqent->hdr.wi.opcode =
2826 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2827 txqent->hdr.wi.lso_mss = htons(gso_size);
2828 }
2829
5216562a 2830 if (bnad_tso_prepare(bnad, skb)) {
271e8b79 2831 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
5216562a 2832 return -EINVAL;
8b230ed8 2833 }
5216562a 2834
8b230ed8
RM
2835 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2836 txqent->hdr.wi.l4_hdr_size_n_offset =
5216562a
RM
2837 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2838 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2839 } else {
271e8b79 2840 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
8b230ed8
RM
2841 txqent->hdr.wi.lso_mss = 0;
2842
5216562a 2843 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
271e8b79 2844 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
5216562a 2845 return -EINVAL;
8b230ed8 2846 }
8b230ed8 2847
271e8b79
RM
2848 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2849 u8 proto = 0;
8b230ed8 2850
271e8b79
RM
2851 if (skb->protocol == __constant_htons(ETH_P_IP))
2852 proto = ip_hdr(skb)->protocol;
5216562a 2853#ifdef NETIF_F_IPV6_CSUM
271e8b79
RM
2854 else if (skb->protocol ==
2855 __constant_htons(ETH_P_IPV6)) {
2856 /* nexthdr may not be TCP immediately. */
2857 proto = ipv6_hdr(skb)->nexthdr;
2858 }
5216562a 2859#endif
271e8b79
RM
2860 if (proto == IPPROTO_TCP) {
2861 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2862 txqent->hdr.wi.l4_hdr_size_n_offset =
2863 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2864 (0, skb_transport_offset(skb)));
2865
2866 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2867
2868 if (unlikely(skb_headlen(skb) <
5216562a
RM
2869 skb_transport_offset(skb) +
2870 tcp_hdrlen(skb))) {
271e8b79 2871 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
5216562a 2872 return -EINVAL;
271e8b79 2873 }
271e8b79
RM
2874 } else if (proto == IPPROTO_UDP) {
2875 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2876 txqent->hdr.wi.l4_hdr_size_n_offset =
2877 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2878 (0, skb_transport_offset(skb)));
2879
2880 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2881 if (unlikely(skb_headlen(skb) <
5216562a 2882 skb_transport_offset(skb) +
271e8b79 2883 sizeof(struct udphdr))) {
271e8b79 2884 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
5216562a 2885 return -EINVAL;
271e8b79
RM
2886 }
2887 } else {
5216562a 2888
271e8b79 2889 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
5216562a 2890 return -EINVAL;
8b230ed8 2891 }
5216562a 2892 } else
271e8b79 2893 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
8b230ed8
RM
2894 }
2895
2896 txqent->hdr.wi.flags = htons(flags);
8b230ed8
RM
2897 txqent->hdr.wi.frame_length = htonl(skb->len);
2898
5216562a
RM
2899 return 0;
2900}
2901
2902/*
2903 * bnad_start_xmit : Netdev entry point for Transmit
2904 * Called under lock held by net_device
2905 */
2906static netdev_tx_t
2907bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2908{
2909 struct bnad *bnad = netdev_priv(netdev);
2910 u32 txq_id = 0;
2911 struct bna_tcb *tcb = NULL;
2912 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2913 u32 prod, q_depth, vect_id;
2914 u32 wis, vectors, len;
2915 int i;
2916 dma_addr_t dma_addr;
2917 struct bna_txq_entry *txqent;
2918
271e8b79 2919 len = skb_headlen(skb);
8b230ed8 2920
5216562a
RM
2921 /* Sanity checks for the skb */
2922
2923 if (unlikely(skb->len <= ETH_HLEN)) {
2924 dev_kfree_skb(skb);
2925 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2926 return NETDEV_TX_OK;
2927 }
2928 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2929 dev_kfree_skb(skb);
2930 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2931 return NETDEV_TX_OK;
2932 }
2933 if (unlikely(len == 0)) {
2934 dev_kfree_skb(skb);
2935 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2936 return NETDEV_TX_OK;
2937 }
2938
2939 tcb = bnad->tx_info[0].tcb[txq_id];
2940 q_depth = tcb->q_depth;
2941 prod = tcb->producer_index;
8b230ed8 2942
5216562a 2943 unmap_q = tcb->unmap_q;
271e8b79 2944
5216562a
RM
2945 /*
2946 * Takes care of the Tx that is scheduled between clearing the flag
2947 * and the netif_tx_stop_all_queues() call.
2948 */
2949 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2950 dev_kfree_skb(skb);
2951 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2952 return NETDEV_TX_OK;
2953 }
2954
2955 vectors = 1 + skb_shinfo(skb)->nr_frags;
2956 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2957
2958 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2959 dev_kfree_skb(skb);
2960 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2961 return NETDEV_TX_OK;
2962 }
2963
2964 /* Check for available TxQ resources */
2965 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2966 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2967 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2968 u32 sent;
2969 sent = bnad_txcmpl_process(bnad, tcb);
2970 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2971 bna_ib_ack(tcb->i_dbell, sent);
2972 smp_mb__before_clear_bit();
2973 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2974 } else {
2975 netif_stop_queue(netdev);
2976 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2977 }
2978
2979 smp_mb();
2980 /*
2981 * Check again to deal with race condition between
2982 * netif_stop_queue here, and netif_wake_queue in
2983 * interrupt handler which is not inside netif tx lock.
2984 */
2985 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2986 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2987 return NETDEV_TX_BUSY;
2988 } else {
2989 netif_wake_queue(netdev);
2990 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2991 }
2992 }
2993
2994 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
2995 head_unmap = &unmap_q[prod];
2996
2997 /* Program the opcode, flags, frame_len, num_vectors in WI */
2998 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
2999 dev_kfree_skb(skb);
3000 return NETDEV_TX_OK;
3001 }
3002 txqent->hdr.wi.reserved = 0;
3003 txqent->hdr.wi.num_vectors = vectors;
3004
3005 head_unmap->skb = skb;
3006 head_unmap->nvecs = 0;
3007
3008 /* Program the vectors */
3009 unmap = head_unmap;
3010 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3011 len, DMA_TO_DEVICE);
3012 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3013 txqent->vector[0].length = htons(len);
3014 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3015 head_unmap->nvecs++;
3016
3017 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
9e903e08
ED
3018 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3019 u16 size = skb_frag_size(frag);
8b230ed8 3020
271e8b79 3021 if (unlikely(size == 0)) {
5216562a
RM
3022 /* Undo the changes starting at tcb->producer_index */
3023 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3024 tcb->producer_index);
271e8b79
RM
3025 dev_kfree_skb(skb);
3026 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3027 return NETDEV_TX_OK;
3028 }
3029
3030 len += size;
3031
5216562a
RM
3032 vect_id++;
3033 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
8b230ed8 3034 vect_id = 0;
5216562a
RM
3035 BNA_QE_INDX_INC(prod, q_depth);
3036 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
271e8b79
RM
3037 txqent->hdr.wi_ext.opcode =
3038 __constant_htons(BNA_TXQ_WI_EXTENSION);
5216562a 3039 unmap = &unmap_q[prod];
8b230ed8
RM
3040 }
3041
4d5b1a67
IC
3042 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3043 0, size, DMA_TO_DEVICE);
8b230ed8 3044 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
5216562a
RM
3045 txqent->vector[vect_id].length = htons(size);
3046 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3047 dma_addr);
3048 head_unmap->nvecs++;
8b230ed8
RM
3049 }
3050
271e8b79 3051 if (unlikely(len != skb->len)) {
5216562a
RM
3052 /* Undo the changes starting at tcb->producer_index */
3053 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
271e8b79
RM
3054 dev_kfree_skb(skb);
3055 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3056 return NETDEV_TX_OK;
3057 }
3058
5216562a
RM
3059 BNA_QE_INDX_INC(prod, q_depth);
3060 tcb->producer_index = prod;
8b230ed8
RM
3061
3062 smp_mb();
be7fa326
RM
3063
3064 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3065 return NETDEV_TX_OK;
3066
fee1253e
RM
3067 skb_tx_timestamp(skb);
3068
8b230ed8 3069 bna_txq_prod_indx_doorbell(tcb);
271e8b79 3070 smp_mb();
8b230ed8 3071
8b230ed8
RM
3072 return NETDEV_TX_OK;
3073}
3074
3075/*
3076 * Used spin_lock to synchronize reading of stats structures, which
3077 * is written by BNA under the same lock.
3078 */
250e061e
ED
3079static struct rtnl_link_stats64 *
3080bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
8b230ed8
RM
3081{
3082 struct bnad *bnad = netdev_priv(netdev);
3083 unsigned long flags;
3084
3085 spin_lock_irqsave(&bnad->bna_lock, flags);
3086
250e061e
ED
3087 bnad_netdev_qstats_fill(bnad, stats);
3088 bnad_netdev_hwstats_fill(bnad, stats);
8b230ed8
RM
3089
3090 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3091
250e061e 3092 return stats;
8b230ed8
RM
3093}
3094
fe1624cf
RM
3095static void
3096bnad_set_rx_ucast_fltr(struct bnad *bnad)
3097{
3098 struct net_device *netdev = bnad->netdev;
3099 int uc_count = netdev_uc_count(netdev);
3100 enum bna_cb_status ret;
3101 u8 *mac_list;
3102 struct netdev_hw_addr *ha;
3103 int entry;
3104
3105 if (netdev_uc_empty(bnad->netdev)) {
3106 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3107 return;
3108 }
3109
3110 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3111 goto mode_default;
3112
3113 mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3114 if (mac_list == NULL)
3115 goto mode_default;
3116
3117 entry = 0;
3118 netdev_for_each_uc_addr(ha, netdev) {
3119 memcpy(&mac_list[entry * ETH_ALEN],
3120 &ha->addr[0], ETH_ALEN);
3121 entry++;
3122 }
3123
3124 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
3125 mac_list, NULL);
3126 kfree(mac_list);
3127
3128 if (ret != BNA_CB_SUCCESS)
3129 goto mode_default;
3130
3131 return;
3132
3133 /* ucast packets not in UCAM are routed to default function */
3134mode_default:
3135 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3136 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3137}
3138
3139static void
3140bnad_set_rx_mcast_fltr(struct bnad *bnad)
3141{
3142 struct net_device *netdev = bnad->netdev;
3143 int mc_count = netdev_mc_count(netdev);
3144 enum bna_cb_status ret;
3145 u8 *mac_list;
3146
3147 if (netdev->flags & IFF_ALLMULTI)
3148 goto mode_allmulti;
3149
3150 if (netdev_mc_empty(netdev))
3151 return;
3152
3153 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3154 goto mode_allmulti;
3155
3156 mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3157
3158 if (mac_list == NULL)
3159 goto mode_allmulti;
3160
3161 memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
3162
3163 /* copy rest of the MCAST addresses */
3164 bnad_netdev_mc_list_get(netdev, mac_list);
3165 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
3166 mac_list, NULL);
3167 kfree(mac_list);
3168
3169 if (ret != BNA_CB_SUCCESS)
3170 goto mode_allmulti;
3171
3172 return;
3173
3174mode_allmulti:
3175 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3176 bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
3177}
3178
a2122d95 3179void
8b230ed8
RM
3180bnad_set_rx_mode(struct net_device *netdev)
3181{
3182 struct bnad *bnad = netdev_priv(netdev);
fe1624cf 3183 enum bna_rxmode new_mode, mode_mask;
8b230ed8
RM
3184 unsigned long flags;
3185
3186 spin_lock_irqsave(&bnad->bna_lock, flags);
3187
fe1624cf
RM
3188 if (bnad->rx_info[0].rx == NULL) {
3189 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3190 return;
8b230ed8
RM
3191 }
3192
fe1624cf
RM
3193 /* clear bnad flags to update it with new settings */
3194 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3195 BNAD_CF_ALLMULTI);
271e8b79 3196
fe1624cf
RM
3197 new_mode = 0;
3198 if (netdev->flags & IFF_PROMISC) {
3199 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3200 bnad->cfg_flags |= BNAD_CF_PROMISC;
3201 } else {
3202 bnad_set_rx_mcast_fltr(bnad);
8b230ed8 3203
fe1624cf
RM
3204 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3205 new_mode |= BNA_RXMODE_ALLMULTI;
8b230ed8 3206
fe1624cf 3207 bnad_set_rx_ucast_fltr(bnad);
8b230ed8 3208
fe1624cf
RM
3209 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3210 new_mode |= BNA_RXMODE_DEFAULT;
3211 }
8b230ed8 3212
fe1624cf
RM
3213 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3214 BNA_RXMODE_ALLMULTI;
3215 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
8b230ed8 3216
fe1624cf
RM
3217 if (bnad->cfg_flags & BNAD_CF_PROMISC)
3218 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3219 else
3220 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
8b230ed8 3221
8b230ed8
RM
3222 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3223}
3224
3225/*
3226 * bna_lock is used to sync writes to netdev->addr
3227 * conf_lock cannot be used since this call may be made
3228 * in a non-blocking context.
3229 */
3230static int
3231bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
3232{
3233 int err;
3234 struct bnad *bnad = netdev_priv(netdev);
3235 struct sockaddr *sa = (struct sockaddr *)mac_addr;
3236 unsigned long flags;
3237
3238 spin_lock_irqsave(&bnad->bna_lock, flags);
3239
3240 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3241
3242 if (!err)
3243 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
3244
3245 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3246
3247 return err;
3248}
3249
3250static int
e29aa339 3251bnad_mtu_set(struct bnad *bnad, int frame_size)
8b230ed8 3252{
8b230ed8
RM
3253 unsigned long flags;
3254
078086f3
RM
3255 init_completion(&bnad->bnad_completions.mtu_comp);
3256
3257 spin_lock_irqsave(&bnad->bna_lock, flags);
e29aa339 3258 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
078086f3
RM
3259 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3260
3261 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3262
3263 return bnad->bnad_completions.mtu_comp_status;
3264}
3265
3266static int
3267bnad_change_mtu(struct net_device *netdev, int new_mtu)
3268{
e29aa339 3269 int err, mtu;
8b230ed8 3270 struct bnad *bnad = netdev_priv(netdev);
e29aa339 3271 u32 rx_count = 0, frame, new_frame;
8b230ed8
RM
3272
3273 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3274 return -EINVAL;
3275
3276 mutex_lock(&bnad->conf_mutex);
3277
e29aa339 3278 mtu = netdev->mtu;
8b230ed8
RM
3279 netdev->mtu = new_mtu;
3280
e29aa339
RM
3281 frame = BNAD_FRAME_SIZE(mtu);
3282 new_frame = BNAD_FRAME_SIZE(new_mtu);
3283
3284 /* check if multi-buffer needs to be enabled */
3285 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3286 netif_running(bnad->netdev)) {
3287 /* only when transition is over 4K */
3288 if ((frame <= 4096 && new_frame > 4096) ||
3289 (frame > 4096 && new_frame <= 4096))
3290 rx_count = bnad_reinit_rx(bnad);
3291 }
3292
3293 /* rx_count > 0 - new rx created
3294 * - Linux set err = 0 and return
3295 */
3296 err = bnad_mtu_set(bnad, new_frame);
078086f3
RM
3297 if (err)
3298 err = -EBUSY;
8b230ed8
RM
3299
3300 mutex_unlock(&bnad->conf_mutex);
3301 return err;
3302}
3303
8e586137 3304static int
80d5c368 3305bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
8b230ed8
RM
3306{
3307 struct bnad *bnad = netdev_priv(netdev);
3308 unsigned long flags;
3309
3310 if (!bnad->rx_info[0].rx)
8e586137 3311 return 0;
8b230ed8
RM
3312
3313 mutex_lock(&bnad->conf_mutex);
3314
3315 spin_lock_irqsave(&bnad->bna_lock, flags);
3316 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
f859d7cb 3317 set_bit(vid, bnad->active_vlans);
8b230ed8
RM
3318 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3319
3320 mutex_unlock(&bnad->conf_mutex);
8e586137
JP
3321
3322 return 0;
8b230ed8
RM
3323}
3324
8e586137 3325static int
80d5c368 3326bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
8b230ed8
RM
3327{
3328 struct bnad *bnad = netdev_priv(netdev);
3329 unsigned long flags;
3330
3331 if (!bnad->rx_info[0].rx)
8e586137 3332 return 0;
8b230ed8
RM
3333
3334 mutex_lock(&bnad->conf_mutex);
3335
3336 spin_lock_irqsave(&bnad->bna_lock, flags);
f859d7cb 3337 clear_bit(vid, bnad->active_vlans);
8b230ed8
RM
3338 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3339 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3340
3341 mutex_unlock(&bnad->conf_mutex);
8e586137
JP
3342
3343 return 0;
8b230ed8
RM
3344}
3345
3346#ifdef CONFIG_NET_POLL_CONTROLLER
3347static void
3348bnad_netpoll(struct net_device *netdev)
3349{
3350 struct bnad *bnad = netdev_priv(netdev);
3351 struct bnad_rx_info *rx_info;
3352 struct bnad_rx_ctrl *rx_ctrl;
3353 u32 curr_mask;
3354 int i, j;
3355
3356 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3357 bna_intx_disable(&bnad->bna, curr_mask);
3358 bnad_isr(bnad->pcidev->irq, netdev);
3359 bna_intx_enable(&bnad->bna, curr_mask);
3360 } else {
19dbff9f
RM
3361 /*
3362 * Tx processing may happen in sending context, so no need
3363 * to explicitly process completions here
3364 */
3365
3366 /* Rx processing */
8b230ed8
RM
3367 for (i = 0; i < bnad->num_rx; i++) {
3368 rx_info = &bnad->rx_info[i];
3369 if (!rx_info->rx)
3370 continue;
3371 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3372 rx_ctrl = &rx_info->rx_ctrl[j];
271e8b79 3373 if (rx_ctrl->ccb)
8b230ed8
RM
3374 bnad_netif_rx_schedule_poll(bnad,
3375 rx_ctrl->ccb);
8b230ed8
RM
3376 }
3377 }
3378 }
3379}
3380#endif
3381
3382static const struct net_device_ops bnad_netdev_ops = {
3383 .ndo_open = bnad_open,
3384 .ndo_stop = bnad_stop,
3385 .ndo_start_xmit = bnad_start_xmit,
250e061e 3386 .ndo_get_stats64 = bnad_get_stats64,
8b230ed8 3387 .ndo_set_rx_mode = bnad_set_rx_mode,
8b230ed8
RM
3388 .ndo_validate_addr = eth_validate_addr,
3389 .ndo_set_mac_address = bnad_set_mac_address,
3390 .ndo_change_mtu = bnad_change_mtu,
8b230ed8
RM
3391 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3392 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3393#ifdef CONFIG_NET_POLL_CONTROLLER
3394 .ndo_poll_controller = bnad_netpoll
3395#endif
3396};
3397
3398static void
3399bnad_netdev_init(struct bnad *bnad, bool using_dac)
3400{
3401 struct net_device *netdev = bnad->netdev;
3402
e5ee20e7
MM
3403 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3404 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
f646968f 3405 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
8b230ed8 3406
e5ee20e7
MM
3407 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3408 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3409 NETIF_F_TSO | NETIF_F_TSO6;
8b230ed8 3410
e5ee20e7 3411 netdev->features |= netdev->hw_features |
f646968f 3412 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
8b230ed8
RM
3413
3414 if (using_dac)
3415 netdev->features |= NETIF_F_HIGHDMA;
3416
8b230ed8
RM
3417 netdev->mem_start = bnad->mmio_start;
3418 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3419
3420 netdev->netdev_ops = &bnad_netdev_ops;
3421 bnad_set_ethtool_ops(netdev);
3422}
3423
3424/*
3425 * 1. Initialize the bnad structure
3426 * 2. Setup netdev pointer in pci_dev
d95d1081
JH
3427 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3428 * 4. Initialize work queue.
8b230ed8
RM
3429 */
3430static int
3431bnad_init(struct bnad *bnad,
3432 struct pci_dev *pdev, struct net_device *netdev)
3433{
3434 unsigned long flags;
3435
3436 SET_NETDEV_DEV(netdev, &pdev->dev);
3437 pci_set_drvdata(pdev, netdev);
3438
3439 bnad->netdev = netdev;
3440 bnad->pcidev = pdev;
3441 bnad->mmio_start = pci_resource_start(pdev, 0);
3442 bnad->mmio_len = pci_resource_len(pdev, 0);
3443 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3444 if (!bnad->bar0) {
3445 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
8b230ed8
RM
3446 return -ENOMEM;
3447 }
3448 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3449 (unsigned long long) bnad->mmio_len);
3450
3451 spin_lock_irqsave(&bnad->bna_lock, flags);
3452 if (!bnad_msix_disable)
3453 bnad->cfg_flags = BNAD_CF_MSIX;
3454
3455 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3456
3457 bnad_q_num_init(bnad);
3458 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3459
3460 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3461 (bnad->num_rx * bnad->num_rxp_per_rx) +
3462 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8
RM
3463
3464 bnad->txq_depth = BNAD_TXQ_DEPTH;
3465 bnad->rxq_depth = BNAD_RXQ_DEPTH;
8b230ed8
RM
3466
3467 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3468 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3469
01b54b14
JH
3470 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3471 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
ba21fc69
WY
3472 if (!bnad->work_q) {
3473 iounmap(bnad->bar0);
01b54b14 3474 return -ENOMEM;
ba21fc69 3475 }
01b54b14 3476
8b230ed8
RM
3477 return 0;
3478}
3479
3480/*
3481 * Must be called after bnad_pci_uninit()
3482 * so that iounmap() and pci_set_drvdata(NULL)
3483 * happens only after PCI uninitialization.
3484 */
3485static void
3486bnad_uninit(struct bnad *bnad)
3487{
01b54b14
JH
3488 if (bnad->work_q) {
3489 flush_workqueue(bnad->work_q);
3490 destroy_workqueue(bnad->work_q);
3491 bnad->work_q = NULL;
3492 }
3493
8b230ed8
RM
3494 if (bnad->bar0)
3495 iounmap(bnad->bar0);
8b230ed8
RM
3496}
3497
3498/*
3499 * Initialize locks
078086f3 3500 a) Per ioceth mutes used for serializing configuration
8b230ed8
RM
3501 changes from OS interface
3502 b) spin lock used to protect bna state machine
3503 */
3504static void
3505bnad_lock_init(struct bnad *bnad)
3506{
3507 spin_lock_init(&bnad->bna_lock);
3508 mutex_init(&bnad->conf_mutex);
72a9730b 3509 mutex_init(&bnad_list_mutex);
8b230ed8
RM
3510}
3511
3512static void
3513bnad_lock_uninit(struct bnad *bnad)
3514{
3515 mutex_destroy(&bnad->conf_mutex);
72a9730b 3516 mutex_destroy(&bnad_list_mutex);
8b230ed8
RM
3517}
3518
3519/* PCI Initialization */
3520static int
3521bnad_pci_init(struct bnad *bnad,
3522 struct pci_dev *pdev, bool *using_dac)
3523{
3524 int err;
3525
3526 err = pci_enable_device(pdev);
3527 if (err)
3528 return err;
3529 err = pci_request_regions(pdev, BNAD_NAME);
3530 if (err)
3531 goto disable_device;
3e548079 3532 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3db1cd5c 3533 *using_dac = true;
8b230ed8 3534 } else {
3e548079
RK
3535 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3536 if (err)
3537 goto release_regions;
3db1cd5c 3538 *using_dac = false;
8b230ed8
RM
3539 }
3540 pci_set_master(pdev);
3541 return 0;
3542
3543release_regions:
3544 pci_release_regions(pdev);
3545disable_device:
3546 pci_disable_device(pdev);
3547
3548 return err;
3549}
3550
3551static void
3552bnad_pci_uninit(struct pci_dev *pdev)
3553{
3554 pci_release_regions(pdev);
3555 pci_disable_device(pdev);
3556}
3557
c4eef189 3558static int
8b230ed8
RM
3559bnad_pci_probe(struct pci_dev *pdev,
3560 const struct pci_device_id *pcidev_id)
3561{
3caa1e95 3562 bool using_dac;
0120b99c 3563 int err;
8b230ed8
RM
3564 struct bnad *bnad;
3565 struct bna *bna;
3566 struct net_device *netdev;
3567 struct bfa_pcidev pcidev_info;
3568 unsigned long flags;
3569
3570 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3571 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3572
3573 mutex_lock(&bnad_fwimg_mutex);
3574 if (!cna_get_firmware_buf(pdev)) {
3575 mutex_unlock(&bnad_fwimg_mutex);
3576 pr_warn("Failed to load Firmware Image!\n");
3577 return -ENODEV;
3578 }
3579 mutex_unlock(&bnad_fwimg_mutex);
3580
3581 /*
3582 * Allocates sizeof(struct net_device + struct bnad)
3583 * bnad = netdev->priv
3584 */
3585 netdev = alloc_etherdev(sizeof(struct bnad));
3586 if (!netdev) {
8b230ed8
RM
3587 err = -ENOMEM;
3588 return err;
3589 }
3590 bnad = netdev_priv(netdev);
078086f3 3591 bnad_lock_init(bnad);
72a9730b 3592 bnad_add_to_list(bnad);
078086f3
RM
3593
3594 mutex_lock(&bnad->conf_mutex);
8b230ed8
RM
3595 /*
3596 * PCI initialization
0120b99c 3597 * Output : using_dac = 1 for 64 bit DMA
be7fa326 3598 * = 0 for 32 bit DMA
8b230ed8 3599 */
e905ed57 3600 using_dac = false;
8b230ed8
RM
3601 err = bnad_pci_init(bnad, pdev, &using_dac);
3602 if (err)
44861f44 3603 goto unlock_mutex;
8b230ed8 3604
8b230ed8
RM
3605 /*
3606 * Initialize bnad structure
3607 * Setup relation between pci_dev & netdev
8b230ed8
RM
3608 */
3609 err = bnad_init(bnad, pdev, netdev);
3610 if (err)
3611 goto pci_uninit;
078086f3 3612
8b230ed8
RM
3613 /* Initialize netdev structure, set up ethtool ops */
3614 bnad_netdev_init(bnad, using_dac);
3615
815f41e7
RM
3616 /* Set link to down state */
3617 netif_carrier_off(netdev);
3618
7afc5dbd
KG
3619 /* Setup the debugfs node for this bfad */
3620 if (bna_debugfs_enable)
3621 bnad_debugfs_init(bnad);
3622
8b230ed8 3623 /* Get resource requirement form bna */
078086f3 3624 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 3625 bna_res_req(&bnad->res_info[0]);
078086f3 3626 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
3627
3628 /* Allocate resources from bna */
078086f3 3629 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
8b230ed8 3630 if (err)
078086f3 3631 goto drv_uninit;
8b230ed8
RM
3632
3633 bna = &bnad->bna;
3634
3635 /* Setup pcidev_info for bna_init() */
3636 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3637 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3638 pcidev_info.device_id = bnad->pcidev->device;
3639 pcidev_info.pci_bar_kva = bnad->bar0;
3640
8b230ed8
RM
3641 spin_lock_irqsave(&bnad->bna_lock, flags);
3642 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
8b230ed8
RM
3643 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3644
3645 bnad->stats.bna_stats = &bna->stats;
3646
078086f3
RM
3647 bnad_enable_msix(bnad);
3648 err = bnad_mbox_irq_alloc(bnad);
3649 if (err)
3650 goto res_free;
3651
8b230ed8 3652 /* Set up timers */
078086f3 3653 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
8b230ed8 3654 ((unsigned long)bnad));
078086f3 3655 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
8b230ed8 3656 ((unsigned long)bnad));
078086f3 3657 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
1d32f769 3658 ((unsigned long)bnad));
078086f3 3659 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
8b230ed8
RM
3660 ((unsigned long)bnad));
3661
3662 /* Now start the timer before calling IOC */
078086f3 3663 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
8b230ed8
RM
3664 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3665
3666 /*
3667 * Start the chip
078086f3
RM
3668 * If the call back comes with error, we bail out.
3669 * This is a catastrophic error.
8b230ed8 3670 */
078086f3
RM
3671 err = bnad_ioceth_enable(bnad);
3672 if (err) {
3673 pr_err("BNA: Initialization failed err=%d\n",
3674 err);
3675 goto probe_success;
3676 }
3677
3678 spin_lock_irqsave(&bnad->bna_lock, flags);
3679 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3680 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3681 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3682 bna_attr(bna)->num_rxp - 1);
3683 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3684 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3685 err = -EIO;
3686 }
3caa1e95
RM
3687 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3688 if (err)
3689 goto disable_ioceth;
3690
3691 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
3692 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3693 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3694
3695 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
0caa9aae
RM
3696 if (err) {
3697 err = -EIO;
078086f3 3698 goto disable_ioceth;
0caa9aae 3699 }
078086f3
RM
3700
3701 spin_lock_irqsave(&bnad->bna_lock, flags);
3702 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3703 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
3704
3705 /* Get the burnt-in mac */
3706 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 3707 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
8b230ed8
RM
3708 bnad_set_netdev_perm_addr(bnad);
3709 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3710
0caa9aae
RM
3711 mutex_unlock(&bnad->conf_mutex);
3712
8b230ed8
RM
3713 /* Finally, reguister with net_device layer */
3714 err = register_netdev(netdev);
3715 if (err) {
3716 pr_err("BNA : Registering with netdev failed\n");
078086f3 3717 goto probe_uninit;
8b230ed8 3718 }
078086f3 3719 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
8b230ed8 3720
0caa9aae
RM
3721 return 0;
3722
078086f3
RM
3723probe_success:
3724 mutex_unlock(&bnad->conf_mutex);
8b230ed8
RM
3725 return 0;
3726
078086f3 3727probe_uninit:
3fc72370 3728 mutex_lock(&bnad->conf_mutex);
078086f3
RM
3729 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3730disable_ioceth:
3731 bnad_ioceth_disable(bnad);
3732 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3733 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3734 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
8b230ed8
RM
3735 spin_lock_irqsave(&bnad->bna_lock, flags);
3736 bna_uninit(bna);
3737 spin_unlock_irqrestore(&bnad->bna_lock, flags);
078086f3 3738 bnad_mbox_irq_free(bnad);
8b230ed8 3739 bnad_disable_msix(bnad);
078086f3
RM
3740res_free:
3741 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3742drv_uninit:
7afc5dbd
KG
3743 /* Remove the debugfs node for this bnad */
3744 kfree(bnad->regdata);
3745 bnad_debugfs_uninit(bnad);
078086f3 3746 bnad_uninit(bnad);
8b230ed8
RM
3747pci_uninit:
3748 bnad_pci_uninit(pdev);
44861f44 3749unlock_mutex:
078086f3 3750 mutex_unlock(&bnad->conf_mutex);
72a9730b 3751 bnad_remove_from_list(bnad);
8b230ed8 3752 bnad_lock_uninit(bnad);
8b230ed8
RM
3753 free_netdev(netdev);
3754 return err;
3755}
3756
c4eef189 3757static void
8b230ed8
RM
3758bnad_pci_remove(struct pci_dev *pdev)
3759{
3760 struct net_device *netdev = pci_get_drvdata(pdev);
3761 struct bnad *bnad;
3762 struct bna *bna;
3763 unsigned long flags;
3764
3765 if (!netdev)
3766 return;
3767
3768 pr_info("%s bnad_pci_remove\n", netdev->name);
3769 bnad = netdev_priv(netdev);
3770 bna = &bnad->bna;
3771
078086f3
RM
3772 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3773 unregister_netdev(netdev);
8b230ed8
RM
3774
3775 mutex_lock(&bnad->conf_mutex);
078086f3
RM
3776 bnad_ioceth_disable(bnad);
3777 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3778 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3779 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
8b230ed8
RM
3780 spin_lock_irqsave(&bnad->bna_lock, flags);
3781 bna_uninit(bna);
3782 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 3783
078086f3
RM
3784 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3785 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3786 bnad_mbox_irq_free(bnad);
8b230ed8
RM
3787 bnad_disable_msix(bnad);
3788 bnad_pci_uninit(pdev);
078086f3 3789 mutex_unlock(&bnad->conf_mutex);
72a9730b 3790 bnad_remove_from_list(bnad);
8b230ed8 3791 bnad_lock_uninit(bnad);
7afc5dbd
KG
3792 /* Remove the debugfs node for this bnad */
3793 kfree(bnad->regdata);
3794 bnad_debugfs_uninit(bnad);
8b230ed8
RM
3795 bnad_uninit(bnad);
3796 free_netdev(netdev);
3797}
3798
0120b99c 3799static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
8b230ed8
RM
3800 {
3801 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3802 PCI_DEVICE_ID_BROCADE_CT),
3803 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3804 .class_mask = 0xffff00
586b2816
RM
3805 },
3806 {
3807 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3808 BFA_PCI_DEVICE_ID_CT2),
3809 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3810 .class_mask = 0xffff00
3811 },
3812 {0, },
8b230ed8
RM
3813};
3814
3815MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3816
3817static struct pci_driver bnad_pci_driver = {
3818 .name = BNAD_NAME,
3819 .id_table = bnad_pci_id_table,
3820 .probe = bnad_pci_probe,
c4eef189 3821 .remove = bnad_pci_remove,
8b230ed8
RM
3822};
3823
3824static int __init
3825bnad_module_init(void)
3826{
3827 int err;
3828
5aad0011
RM
3829 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3830 BNAD_VERSION);
8b230ed8 3831
8a891429 3832 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
8b230ed8
RM
3833
3834 err = pci_register_driver(&bnad_pci_driver);
3835 if (err < 0) {
3836 pr_err("bna : PCI registration failed in module init "
3837 "(%d)\n", err);
3838 return err;
3839 }
3840
3841 return 0;
3842}
3843
3844static void __exit
3845bnad_module_exit(void)
3846{
3847 pci_unregister_driver(&bnad_pci_driver);
294ca868 3848 release_firmware(bfi_fw);
8b230ed8
RM
3849}
3850
3851module_init(bnad_module_init);
3852module_exit(bnad_module_exit);
3853
3854MODULE_AUTHOR("Brocade");
3855MODULE_LICENSE("GPL");
3856MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3857MODULE_VERSION(BNAD_VERSION);
3858MODULE_FIRMWARE(CNA_FW_FILE_CT);
1bf9fd70 3859MODULE_FIRMWARE(CNA_FW_FILE_CT2);