]>
Commit | Line | Data |
---|---|---|
8b230ed8 RM |
1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | |
6 | * published by the Free Software Foundation | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | /* | |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | |
15 | * All rights reserved | |
16 | * www.brocade.com | |
17 | */ | |
f859d7cb | 18 | #include <linux/bitops.h> |
8b230ed8 RM |
19 | #include <linux/netdevice.h> |
20 | #include <linux/skbuff.h> | |
21 | #include <linux/etherdevice.h> | |
22 | #include <linux/in.h> | |
23 | #include <linux/ethtool.h> | |
24 | #include <linux/if_vlan.h> | |
25 | #include <linux/if_ether.h> | |
26 | #include <linux/ip.h> | |
70c71606 | 27 | #include <linux/prefetch.h> |
9d9779e7 | 28 | #include <linux/module.h> |
8b230ed8 RM |
29 | |
30 | #include "bnad.h" | |
31 | #include "bna.h" | |
32 | #include "cna.h" | |
33 | ||
b7ee31c5 | 34 | static DEFINE_MUTEX(bnad_fwimg_mutex); |
8b230ed8 RM |
35 | |
36 | /* | |
37 | * Module params | |
38 | */ | |
39 | static uint bnad_msix_disable; | |
40 | module_param(bnad_msix_disable, uint, 0444); | |
41 | MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode"); | |
42 | ||
43 | static uint bnad_ioc_auto_recover = 1; | |
44 | module_param(bnad_ioc_auto_recover, uint, 0444); | |
45 | MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); | |
46 | ||
47 | /* | |
48 | * Global variables | |
49 | */ | |
50 | u32 bnad_rxqs_per_cq = 2; | |
51 | ||
b7ee31c5 | 52 | static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
8b230ed8 RM |
53 | |
54 | /* | |
55 | * Local MACROS | |
56 | */ | |
57 | #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2) | |
58 | ||
59 | #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth) | |
60 | ||
61 | #define BNAD_GET_MBOX_IRQ(_bnad) \ | |
62 | (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \ | |
8811e267 | 63 | ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \ |
8b230ed8 RM |
64 | ((_bnad)->pcidev->irq)) |
65 | ||
66 | #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \ | |
67 | do { \ | |
68 | (_res_info)->res_type = BNA_RES_T_MEM; \ | |
69 | (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \ | |
70 | (_res_info)->res_u.mem_info.num = (_num); \ | |
71 | (_res_info)->res_u.mem_info.len = \ | |
72 | sizeof(struct bnad_unmap_q) + \ | |
73 | (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ | |
74 | } while (0) | |
75 | ||
be7fa326 RM |
76 | #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */ |
77 | ||
8b230ed8 RM |
78 | /* |
79 | * Reinitialize completions in CQ, once Rx is taken down | |
80 | */ | |
81 | static void | |
82 | bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb) | |
83 | { | |
84 | struct bna_cq_entry *cmpl, *next_cmpl; | |
85 | unsigned int wi_range, wis = 0, ccb_prod = 0; | |
86 | int i; | |
87 | ||
88 | BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl, | |
89 | wi_range); | |
90 | ||
91 | for (i = 0; i < ccb->q_depth; i++) { | |
92 | wis++; | |
93 | if (likely(--wi_range)) | |
94 | next_cmpl = cmpl + 1; | |
95 | else { | |
96 | BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth); | |
97 | wis = 0; | |
98 | BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, | |
99 | next_cmpl, wi_range); | |
100 | } | |
101 | cmpl->valid = 0; | |
102 | cmpl = next_cmpl; | |
103 | } | |
104 | } | |
105 | ||
271e8b79 RM |
106 | static u32 |
107 | bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array, | |
108 | u32 index, u32 depth, struct sk_buff *skb, u32 frag) | |
109 | { | |
110 | int j; | |
111 | array[index].skb = NULL; | |
112 | ||
113 | dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr), | |
114 | skb_headlen(skb), DMA_TO_DEVICE); | |
115 | dma_unmap_addr_set(&array[index], dma_addr, 0); | |
116 | BNA_QE_INDX_ADD(index, 1, depth); | |
117 | ||
118 | for (j = 0; j < frag; j++) { | |
119 | dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr), | |
9e903e08 | 120 | skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE); |
271e8b79 RM |
121 | dma_unmap_addr_set(&array[index], dma_addr, 0); |
122 | BNA_QE_INDX_ADD(index, 1, depth); | |
123 | } | |
124 | ||
125 | return index; | |
126 | } | |
127 | ||
8b230ed8 RM |
128 | /* |
129 | * Frees all pending Tx Bufs | |
130 | * At this point no activity is expected on the Q, | |
131 | * so DMA unmap & freeing is fine. | |
132 | */ | |
133 | static void | |
134 | bnad_free_all_txbufs(struct bnad *bnad, | |
135 | struct bna_tcb *tcb) | |
136 | { | |
0120b99c | 137 | u32 unmap_cons; |
8b230ed8 RM |
138 | struct bnad_unmap_q *unmap_q = tcb->unmap_q; |
139 | struct bnad_skb_unmap *unmap_array; | |
0120b99c | 140 | struct sk_buff *skb = NULL; |
938fa488 | 141 | int q; |
8b230ed8 RM |
142 | |
143 | unmap_array = unmap_q->unmap_array; | |
144 | ||
938fa488 RM |
145 | for (q = 0; q < unmap_q->q_depth; q++) { |
146 | skb = unmap_array[q].skb; | |
147 | if (!skb) | |
8b230ed8 | 148 | continue; |
938fa488 RM |
149 | |
150 | unmap_cons = q; | |
151 | unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array, | |
152 | unmap_cons, unmap_q->q_depth, skb, | |
153 | skb_shinfo(skb)->nr_frags); | |
154 | ||
8b230ed8 RM |
155 | dev_kfree_skb_any(skb); |
156 | } | |
157 | } | |
158 | ||
159 | /* Data Path Handlers */ | |
160 | ||
161 | /* | |
162 | * bnad_free_txbufs : Frees the Tx bufs on Tx completion | |
163 | * Can be called in a) Interrupt context | |
164 | * b) Sending context | |
165 | * c) Tasklet context | |
166 | */ | |
167 | static u32 | |
168 | bnad_free_txbufs(struct bnad *bnad, | |
169 | struct bna_tcb *tcb) | |
170 | { | |
271e8b79 RM |
171 | u32 unmap_cons, sent_packets = 0, sent_bytes = 0; |
172 | u16 wis, updated_hw_cons; | |
8b230ed8 RM |
173 | struct bnad_unmap_q *unmap_q = tcb->unmap_q; |
174 | struct bnad_skb_unmap *unmap_array; | |
0120b99c | 175 | struct sk_buff *skb; |
8b230ed8 RM |
176 | |
177 | /* | |
178 | * Just return if TX is stopped. This check is useful | |
179 | * when bnad_free_txbufs() runs out of a tasklet scheduled | |
be7fa326 | 180 | * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit |
8b230ed8 RM |
181 | * but this routine runs actually after the cleanup has been |
182 | * executed. | |
183 | */ | |
be7fa326 | 184 | if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) |
8b230ed8 RM |
185 | return 0; |
186 | ||
187 | updated_hw_cons = *(tcb->hw_consumer_index); | |
188 | ||
189 | wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index, | |
190 | updated_hw_cons, tcb->q_depth); | |
191 | ||
192 | BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); | |
193 | ||
194 | unmap_array = unmap_q->unmap_array; | |
195 | unmap_cons = unmap_q->consumer_index; | |
196 | ||
197 | prefetch(&unmap_array[unmap_cons + 1]); | |
198 | while (wis) { | |
199 | skb = unmap_array[unmap_cons].skb; | |
200 | ||
8b230ed8 RM |
201 | sent_packets++; |
202 | sent_bytes += skb->len; | |
203 | wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); | |
204 | ||
271e8b79 RM |
205 | unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array, |
206 | unmap_cons, unmap_q->q_depth, skb, | |
207 | skb_shinfo(skb)->nr_frags); | |
8b230ed8 | 208 | |
8b230ed8 RM |
209 | dev_kfree_skb_any(skb); |
210 | } | |
211 | ||
212 | /* Update consumer pointers. */ | |
213 | tcb->consumer_index = updated_hw_cons; | |
214 | unmap_q->consumer_index = unmap_cons; | |
215 | ||
216 | tcb->txq->tx_packets += sent_packets; | |
217 | tcb->txq->tx_bytes += sent_bytes; | |
218 | ||
219 | return sent_packets; | |
220 | } | |
221 | ||
222 | /* Tx Free Tasklet function */ | |
223 | /* Frees for all the tcb's in all the Tx's */ | |
224 | /* | |
225 | * Scheduled from sending context, so that | |
226 | * the fat Tx lock is not held for too long | |
227 | * in the sending context. | |
228 | */ | |
229 | static void | |
230 | bnad_tx_free_tasklet(unsigned long bnad_ptr) | |
231 | { | |
232 | struct bnad *bnad = (struct bnad *)bnad_ptr; | |
233 | struct bna_tcb *tcb; | |
0120b99c | 234 | u32 acked = 0; |
8b230ed8 RM |
235 | int i, j; |
236 | ||
237 | for (i = 0; i < bnad->num_tx; i++) { | |
238 | for (j = 0; j < bnad->num_txq_per_tx; j++) { | |
239 | tcb = bnad->tx_info[i].tcb[j]; | |
240 | if (!tcb) | |
241 | continue; | |
242 | if (((u16) (*tcb->hw_consumer_index) != | |
243 | tcb->consumer_index) && | |
244 | (!test_and_set_bit(BNAD_TXQ_FREE_SENT, | |
245 | &tcb->flags))) { | |
246 | acked = bnad_free_txbufs(bnad, tcb); | |
be7fa326 RM |
247 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, |
248 | &tcb->flags))) | |
249 | bna_ib_ack(tcb->i_dbell, acked); | |
8b230ed8 RM |
250 | smp_mb__before_clear_bit(); |
251 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | |
252 | } | |
f7c0fa4c RM |
253 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, |
254 | &tcb->flags))) | |
255 | continue; | |
256 | if (netif_queue_stopped(bnad->netdev)) { | |
257 | if (acked && netif_carrier_ok(bnad->netdev) && | |
258 | BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= | |
259 | BNAD_NETIF_WAKE_THRESHOLD) { | |
260 | netif_wake_queue(bnad->netdev); | |
261 | /* TODO */ | |
262 | /* Counters for individual TxQs? */ | |
263 | BNAD_UPDATE_CTR(bnad, | |
264 | netif_queue_wakeup); | |
265 | } | |
266 | } | |
8b230ed8 RM |
267 | } |
268 | } | |
269 | } | |
270 | ||
271 | static u32 | |
272 | bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) | |
273 | { | |
274 | struct net_device *netdev = bnad->netdev; | |
be7fa326 | 275 | u32 sent = 0; |
8b230ed8 RM |
276 | |
277 | if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | |
278 | return 0; | |
279 | ||
280 | sent = bnad_free_txbufs(bnad, tcb); | |
281 | if (sent) { | |
282 | if (netif_queue_stopped(netdev) && | |
283 | netif_carrier_ok(netdev) && | |
284 | BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= | |
285 | BNAD_NETIF_WAKE_THRESHOLD) { | |
be7fa326 RM |
286 | if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { |
287 | netif_wake_queue(netdev); | |
288 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | |
289 | } | |
8b230ed8 | 290 | } |
be7fa326 RM |
291 | } |
292 | ||
293 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | |
8b230ed8 | 294 | bna_ib_ack(tcb->i_dbell, sent); |
8b230ed8 RM |
295 | |
296 | smp_mb__before_clear_bit(); | |
297 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | |
298 | ||
299 | return sent; | |
300 | } | |
301 | ||
302 | /* MSIX Tx Completion Handler */ | |
303 | static irqreturn_t | |
304 | bnad_msix_tx(int irq, void *data) | |
305 | { | |
306 | struct bna_tcb *tcb = (struct bna_tcb *)data; | |
307 | struct bnad *bnad = tcb->bnad; | |
308 | ||
309 | bnad_tx(bnad, tcb); | |
310 | ||
311 | return IRQ_HANDLED; | |
312 | } | |
313 | ||
314 | static void | |
315 | bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) | |
316 | { | |
317 | struct bnad_unmap_q *unmap_q = rcb->unmap_q; | |
318 | ||
319 | rcb->producer_index = 0; | |
320 | rcb->consumer_index = 0; | |
321 | ||
322 | unmap_q->producer_index = 0; | |
323 | unmap_q->consumer_index = 0; | |
324 | } | |
325 | ||
326 | static void | |
be7fa326 | 327 | bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) |
8b230ed8 RM |
328 | { |
329 | struct bnad_unmap_q *unmap_q; | |
5ea74318 | 330 | struct bnad_skb_unmap *unmap_array; |
8b230ed8 | 331 | struct sk_buff *skb; |
be7fa326 | 332 | int unmap_cons; |
8b230ed8 RM |
333 | |
334 | unmap_q = rcb->unmap_q; | |
5ea74318 | 335 | unmap_array = unmap_q->unmap_array; |
be7fa326 | 336 | for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { |
5ea74318 | 337 | skb = unmap_array[unmap_cons].skb; |
be7fa326 RM |
338 | if (!skb) |
339 | continue; | |
5ea74318 IV |
340 | unmap_array[unmap_cons].skb = NULL; |
341 | dma_unmap_single(&bnad->pcidev->dev, | |
342 | dma_unmap_addr(&unmap_array[unmap_cons], | |
343 | dma_addr), | |
344 | rcb->rxq->buffer_size, | |
345 | DMA_FROM_DEVICE); | |
8b230ed8 | 346 | dev_kfree_skb(skb); |
8b230ed8 | 347 | } |
8b230ed8 RM |
348 | bnad_reset_rcb(bnad, rcb); |
349 | } | |
350 | ||
351 | static void | |
352 | bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) | |
353 | { | |
354 | u16 to_alloc, alloced, unmap_prod, wi_range; | |
355 | struct bnad_unmap_q *unmap_q = rcb->unmap_q; | |
356 | struct bnad_skb_unmap *unmap_array; | |
357 | struct bna_rxq_entry *rxent; | |
358 | struct sk_buff *skb; | |
359 | dma_addr_t dma_addr; | |
360 | ||
361 | alloced = 0; | |
362 | to_alloc = | |
363 | BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth); | |
364 | ||
365 | unmap_array = unmap_q->unmap_array; | |
366 | unmap_prod = unmap_q->producer_index; | |
367 | ||
368 | BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range); | |
369 | ||
370 | while (to_alloc--) { | |
19dbff9f | 371 | if (!wi_range) |
8b230ed8 RM |
372 | BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, |
373 | wi_range); | |
0a0e2344 ED |
374 | skb = netdev_alloc_skb_ip_align(bnad->netdev, |
375 | rcb->rxq->buffer_size); | |
8b230ed8 RM |
376 | if (unlikely(!skb)) { |
377 | BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); | |
3caa1e95 | 378 | rcb->rxq->rxbuf_alloc_failed++; |
8b230ed8 RM |
379 | goto finishing; |
380 | } | |
8b230ed8 | 381 | unmap_array[unmap_prod].skb = skb; |
5ea74318 IV |
382 | dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, |
383 | rcb->rxq->buffer_size, | |
384 | DMA_FROM_DEVICE); | |
385 | dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, | |
8b230ed8 RM |
386 | dma_addr); |
387 | BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); | |
388 | BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); | |
389 | ||
390 | rxent++; | |
391 | wi_range--; | |
392 | alloced++; | |
393 | } | |
394 | ||
395 | finishing: | |
396 | if (likely(alloced)) { | |
397 | unmap_q->producer_index = unmap_prod; | |
398 | rcb->producer_index = unmap_prod; | |
399 | smp_mb(); | |
5bcf6ac0 | 400 | if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) |
be7fa326 | 401 | bna_rxq_prod_indx_doorbell(rcb); |
8b230ed8 | 402 | } |
8b230ed8 RM |
403 | } |
404 | ||
405 | static inline void | |
406 | bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) | |
407 | { | |
408 | struct bnad_unmap_q *unmap_q = rcb->unmap_q; | |
409 | ||
410 | if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { | |
411 | if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) | |
412 | >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) | |
413 | bnad_alloc_n_post_rxbufs(bnad, rcb); | |
414 | smp_mb__before_clear_bit(); | |
415 | clear_bit(BNAD_RXQ_REFILL, &rcb->flags); | |
416 | } | |
417 | } | |
418 | ||
419 | static u32 | |
420 | bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) | |
421 | { | |
422 | struct bna_cq_entry *cmpl, *next_cmpl; | |
423 | struct bna_rcb *rcb = NULL; | |
424 | unsigned int wi_range, packets = 0, wis = 0; | |
425 | struct bnad_unmap_q *unmap_q; | |
5ea74318 | 426 | struct bnad_skb_unmap *unmap_array; |
8b230ed8 | 427 | struct sk_buff *skb; |
5ea74318 | 428 | u32 flags, unmap_cons; |
8b230ed8 | 429 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; |
078086f3 RM |
430 | struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); |
431 | ||
432 | set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); | |
8b230ed8 | 433 | |
078086f3 RM |
434 | if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) { |
435 | clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); | |
be7fa326 | 436 | return 0; |
078086f3 | 437 | } |
be7fa326 | 438 | |
8b230ed8 RM |
439 | prefetch(bnad->netdev); |
440 | BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, | |
441 | wi_range); | |
442 | BUG_ON(!(wi_range <= ccb->q_depth)); | |
443 | while (cmpl->valid && packets < budget) { | |
444 | packets++; | |
445 | BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); | |
446 | ||
078086f3 | 447 | if (bna_is_small_rxq(cmpl->rxq_id)) |
8b230ed8 | 448 | rcb = ccb->rcb[1]; |
078086f3 RM |
449 | else |
450 | rcb = ccb->rcb[0]; | |
8b230ed8 RM |
451 | |
452 | unmap_q = rcb->unmap_q; | |
5ea74318 IV |
453 | unmap_array = unmap_q->unmap_array; |
454 | unmap_cons = unmap_q->consumer_index; | |
8b230ed8 | 455 | |
5ea74318 | 456 | skb = unmap_array[unmap_cons].skb; |
8b230ed8 | 457 | BUG_ON(!(skb)); |
5ea74318 IV |
458 | unmap_array[unmap_cons].skb = NULL; |
459 | dma_unmap_single(&bnad->pcidev->dev, | |
460 | dma_unmap_addr(&unmap_array[unmap_cons], | |
8b230ed8 | 461 | dma_addr), |
5ea74318 IV |
462 | rcb->rxq->buffer_size, |
463 | DMA_FROM_DEVICE); | |
8b230ed8 RM |
464 | BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); |
465 | ||
466 | /* Should be more efficient ? Performance ? */ | |
467 | BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth); | |
468 | ||
469 | wis++; | |
470 | if (likely(--wi_range)) | |
471 | next_cmpl = cmpl + 1; | |
472 | else { | |
473 | BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); | |
474 | wis = 0; | |
475 | BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, | |
476 | next_cmpl, wi_range); | |
477 | BUG_ON(!(wi_range <= ccb->q_depth)); | |
478 | } | |
479 | prefetch(next_cmpl); | |
480 | ||
481 | flags = ntohl(cmpl->flags); | |
482 | if (unlikely | |
483 | (flags & | |
484 | (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR | | |
485 | BNA_CQ_EF_TOO_LONG))) { | |
486 | dev_kfree_skb_any(skb); | |
487 | rcb->rxq->rx_packets_with_error++; | |
488 | goto next; | |
489 | } | |
490 | ||
491 | skb_put(skb, ntohs(cmpl->length)); | |
492 | if (likely | |
e5ee20e7 | 493 | ((bnad->netdev->features & NETIF_F_RXCSUM) && |
8b230ed8 RM |
494 | (((flags & BNA_CQ_EF_IPV4) && |
495 | (flags & BNA_CQ_EF_L3_CKSUM_OK)) || | |
496 | (flags & BNA_CQ_EF_IPV6)) && | |
497 | (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) && | |
498 | (flags & BNA_CQ_EF_L4_CKSUM_OK))) | |
499 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
500 | else | |
bc8acf2c | 501 | skb_checksum_none_assert(skb); |
8b230ed8 RM |
502 | |
503 | rcb->rxq->rx_packets++; | |
504 | rcb->rxq->rx_bytes += skb->len; | |
505 | skb->protocol = eth_type_trans(skb, bnad->netdev); | |
506 | ||
f859d7cb JP |
507 | if (flags & BNA_CQ_EF_VLAN) |
508 | __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag)); | |
509 | ||
078086f3 | 510 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) |
f859d7cb | 511 | napi_gro_receive(&rx_ctrl->napi, skb); |
078086f3 | 512 | else { |
f859d7cb | 513 | netif_receive_skb(skb); |
8b230ed8 RM |
514 | } |
515 | ||
516 | next: | |
517 | cmpl->valid = 0; | |
518 | cmpl = next_cmpl; | |
519 | } | |
520 | ||
521 | BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); | |
522 | ||
2be67144 | 523 | if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) |
271e8b79 RM |
524 | bna_ib_ack_disable_irq(ccb->i_dbell, packets); |
525 | ||
2be67144 RM |
526 | bnad_refill_rxq(bnad, ccb->rcb[0]); |
527 | if (ccb->rcb[1]) | |
528 | bnad_refill_rxq(bnad, ccb->rcb[1]); | |
8b230ed8 | 529 | |
078086f3 RM |
530 | clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); |
531 | ||
8b230ed8 RM |
532 | return packets; |
533 | } | |
534 | ||
8b230ed8 RM |
535 | static void |
536 | bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) | |
537 | { | |
538 | struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); | |
be7fa326 RM |
539 | struct napi_struct *napi = &rx_ctrl->napi; |
540 | ||
541 | if (likely(napi_schedule_prep(napi))) { | |
be7fa326 | 542 | __napi_schedule(napi); |
271e8b79 | 543 | rx_ctrl->rx_schedule++; |
8b230ed8 | 544 | } |
8b230ed8 RM |
545 | } |
546 | ||
547 | /* MSIX Rx Path Handler */ | |
548 | static irqreturn_t | |
549 | bnad_msix_rx(int irq, void *data) | |
550 | { | |
551 | struct bna_ccb *ccb = (struct bna_ccb *)data; | |
8b230ed8 | 552 | |
271e8b79 RM |
553 | if (ccb) { |
554 | ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++; | |
2be67144 | 555 | bnad_netif_rx_schedule_poll(ccb->bnad, ccb); |
271e8b79 | 556 | } |
8b230ed8 RM |
557 | |
558 | return IRQ_HANDLED; | |
559 | } | |
560 | ||
561 | /* Interrupt handlers */ | |
562 | ||
563 | /* Mbox Interrupt Handlers */ | |
564 | static irqreturn_t | |
565 | bnad_msix_mbox_handler(int irq, void *data) | |
566 | { | |
567 | u32 intr_status; | |
e2fa6f2e | 568 | unsigned long flags; |
be7fa326 | 569 | struct bnad *bnad = (struct bnad *)data; |
8b230ed8 | 570 | |
8b230ed8 | 571 | spin_lock_irqsave(&bnad->bna_lock, flags); |
dfee325a RM |
572 | if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { |
573 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
574 | return IRQ_HANDLED; | |
575 | } | |
8b230ed8 RM |
576 | |
577 | bna_intr_status_get(&bnad->bna, intr_status); | |
578 | ||
078086f3 | 579 | if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) |
8b230ed8 RM |
580 | bna_mbox_handler(&bnad->bna, intr_status); |
581 | ||
582 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
583 | ||
8b230ed8 RM |
584 | return IRQ_HANDLED; |
585 | } | |
586 | ||
587 | static irqreturn_t | |
588 | bnad_isr(int irq, void *data) | |
589 | { | |
590 | int i, j; | |
591 | u32 intr_status; | |
592 | unsigned long flags; | |
be7fa326 | 593 | struct bnad *bnad = (struct bnad *)data; |
8b230ed8 RM |
594 | struct bnad_rx_info *rx_info; |
595 | struct bnad_rx_ctrl *rx_ctrl; | |
078086f3 | 596 | struct bna_tcb *tcb = NULL; |
8b230ed8 | 597 | |
dfee325a RM |
598 | spin_lock_irqsave(&bnad->bna_lock, flags); |
599 | if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { | |
600 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
e2fa6f2e | 601 | return IRQ_NONE; |
dfee325a | 602 | } |
8b230ed8 RM |
603 | |
604 | bna_intr_status_get(&bnad->bna, intr_status); | |
e2fa6f2e | 605 | |
dfee325a RM |
606 | if (unlikely(!intr_status)) { |
607 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
8b230ed8 | 608 | return IRQ_NONE; |
dfee325a | 609 | } |
8b230ed8 | 610 | |
078086f3 | 611 | if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) |
8b230ed8 | 612 | bna_mbox_handler(&bnad->bna, intr_status); |
be7fa326 | 613 | |
8b230ed8 RM |
614 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
615 | ||
be7fa326 RM |
616 | if (!BNA_IS_INTX_DATA_INTR(intr_status)) |
617 | return IRQ_HANDLED; | |
618 | ||
8b230ed8 | 619 | /* Process data interrupts */ |
be7fa326 RM |
620 | /* Tx processing */ |
621 | for (i = 0; i < bnad->num_tx; i++) { | |
078086f3 RM |
622 | for (j = 0; j < bnad->num_txq_per_tx; j++) { |
623 | tcb = bnad->tx_info[i].tcb[j]; | |
624 | if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) | |
625 | bnad_tx(bnad, bnad->tx_info[i].tcb[j]); | |
626 | } | |
be7fa326 RM |
627 | } |
628 | /* Rx processing */ | |
8b230ed8 RM |
629 | for (i = 0; i < bnad->num_rx; i++) { |
630 | rx_info = &bnad->rx_info[i]; | |
631 | if (!rx_info->rx) | |
632 | continue; | |
633 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
634 | rx_ctrl = &rx_info->rx_ctrl[j]; | |
635 | if (rx_ctrl->ccb) | |
636 | bnad_netif_rx_schedule_poll(bnad, | |
637 | rx_ctrl->ccb); | |
638 | } | |
639 | } | |
8b230ed8 RM |
640 | return IRQ_HANDLED; |
641 | } | |
642 | ||
643 | /* | |
644 | * Called in interrupt / callback context | |
645 | * with bna_lock held, so cfg_flags access is OK | |
646 | */ | |
647 | static void | |
648 | bnad_enable_mbox_irq(struct bnad *bnad) | |
649 | { | |
be7fa326 | 650 | clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); |
e2fa6f2e | 651 | |
8b230ed8 RM |
652 | BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); |
653 | } | |
654 | ||
655 | /* | |
656 | * Called with bnad->bna_lock held b'cos of | |
657 | * bnad->cfg_flags access. | |
658 | */ | |
b7ee31c5 | 659 | static void |
8b230ed8 RM |
660 | bnad_disable_mbox_irq(struct bnad *bnad) |
661 | { | |
be7fa326 | 662 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); |
8b230ed8 | 663 | |
be7fa326 RM |
664 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); |
665 | } | |
8b230ed8 | 666 | |
be7fa326 RM |
667 | static void |
668 | bnad_set_netdev_perm_addr(struct bnad *bnad) | |
669 | { | |
670 | struct net_device *netdev = bnad->netdev; | |
e2fa6f2e | 671 | |
be7fa326 RM |
672 | memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); |
673 | if (is_zero_ether_addr(netdev->dev_addr)) | |
674 | memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); | |
8b230ed8 RM |
675 | } |
676 | ||
677 | /* Control Path Handlers */ | |
678 | ||
679 | /* Callbacks */ | |
680 | void | |
078086f3 | 681 | bnad_cb_mbox_intr_enable(struct bnad *bnad) |
8b230ed8 RM |
682 | { |
683 | bnad_enable_mbox_irq(bnad); | |
684 | } | |
685 | ||
686 | void | |
078086f3 | 687 | bnad_cb_mbox_intr_disable(struct bnad *bnad) |
8b230ed8 RM |
688 | { |
689 | bnad_disable_mbox_irq(bnad); | |
690 | } | |
691 | ||
692 | void | |
078086f3 RM |
693 | bnad_cb_ioceth_ready(struct bnad *bnad) |
694 | { | |
695 | bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; | |
696 | complete(&bnad->bnad_completions.ioc_comp); | |
697 | } | |
698 | ||
699 | void | |
700 | bnad_cb_ioceth_failed(struct bnad *bnad) | |
8b230ed8 | 701 | { |
078086f3 | 702 | bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL; |
8b230ed8 | 703 | complete(&bnad->bnad_completions.ioc_comp); |
8b230ed8 RM |
704 | } |
705 | ||
706 | void | |
078086f3 | 707 | bnad_cb_ioceth_disabled(struct bnad *bnad) |
8b230ed8 | 708 | { |
078086f3 | 709 | bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; |
8b230ed8 | 710 | complete(&bnad->bnad_completions.ioc_comp); |
8b230ed8 RM |
711 | } |
712 | ||
713 | static void | |
078086f3 | 714 | bnad_cb_enet_disabled(void *arg) |
8b230ed8 RM |
715 | { |
716 | struct bnad *bnad = (struct bnad *)arg; | |
717 | ||
8b230ed8 | 718 | netif_carrier_off(bnad->netdev); |
078086f3 | 719 | complete(&bnad->bnad_completions.enet_comp); |
8b230ed8 RM |
720 | } |
721 | ||
722 | void | |
078086f3 | 723 | bnad_cb_ethport_link_status(struct bnad *bnad, |
8b230ed8 RM |
724 | enum bna_link_status link_status) |
725 | { | |
726 | bool link_up = 0; | |
727 | ||
728 | link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); | |
729 | ||
730 | if (link_status == BNA_CEE_UP) { | |
078086f3 RM |
731 | if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) |
732 | BNAD_UPDATE_CTR(bnad, cee_toggle); | |
8b230ed8 | 733 | set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); |
078086f3 RM |
734 | } else { |
735 | if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) | |
736 | BNAD_UPDATE_CTR(bnad, cee_toggle); | |
8b230ed8 | 737 | clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); |
078086f3 | 738 | } |
8b230ed8 RM |
739 | |
740 | if (link_up) { | |
741 | if (!netif_carrier_ok(bnad->netdev)) { | |
078086f3 RM |
742 | uint tx_id, tcb_id; |
743 | printk(KERN_WARNING "bna: %s link up\n", | |
8b230ed8 RM |
744 | bnad->netdev->name); |
745 | netif_carrier_on(bnad->netdev); | |
746 | BNAD_UPDATE_CTR(bnad, link_toggle); | |
078086f3 RM |
747 | for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) { |
748 | for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx; | |
749 | tcb_id++) { | |
750 | struct bna_tcb *tcb = | |
751 | bnad->tx_info[tx_id].tcb[tcb_id]; | |
752 | u32 txq_id; | |
753 | if (!tcb) | |
754 | continue; | |
755 | ||
756 | txq_id = tcb->id; | |
757 | ||
758 | if (test_bit(BNAD_TXQ_TX_STARTED, | |
759 | &tcb->flags)) { | |
760 | /* | |
761 | * Force an immediate | |
762 | * Transmit Schedule */ | |
763 | printk(KERN_INFO "bna: %s %d " | |
764 | "TXQ_STARTED\n", | |
765 | bnad->netdev->name, | |
766 | txq_id); | |
767 | netif_wake_subqueue( | |
768 | bnad->netdev, | |
769 | txq_id); | |
770 | BNAD_UPDATE_CTR(bnad, | |
771 | netif_queue_wakeup); | |
772 | } else { | |
773 | netif_stop_subqueue( | |
774 | bnad->netdev, | |
775 | txq_id); | |
776 | BNAD_UPDATE_CTR(bnad, | |
777 | netif_queue_stop); | |
778 | } | |
779 | } | |
8b230ed8 RM |
780 | } |
781 | } | |
782 | } else { | |
783 | if (netif_carrier_ok(bnad->netdev)) { | |
078086f3 | 784 | printk(KERN_WARNING "bna: %s link down\n", |
8b230ed8 RM |
785 | bnad->netdev->name); |
786 | netif_carrier_off(bnad->netdev); | |
787 | BNAD_UPDATE_CTR(bnad, link_toggle); | |
788 | } | |
789 | } | |
790 | } | |
791 | ||
792 | static void | |
078086f3 | 793 | bnad_cb_tx_disabled(void *arg, struct bna_tx *tx) |
8b230ed8 RM |
794 | { |
795 | struct bnad *bnad = (struct bnad *)arg; | |
796 | ||
797 | complete(&bnad->bnad_completions.tx_comp); | |
798 | } | |
799 | ||
800 | static void | |
801 | bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) | |
802 | { | |
803 | struct bnad_tx_info *tx_info = | |
804 | (struct bnad_tx_info *)tcb->txq->tx->priv; | |
805 | struct bnad_unmap_q *unmap_q = tcb->unmap_q; | |
806 | ||
807 | tx_info->tcb[tcb->id] = tcb; | |
808 | unmap_q->producer_index = 0; | |
809 | unmap_q->consumer_index = 0; | |
810 | unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH; | |
811 | } | |
812 | ||
813 | static void | |
814 | bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) | |
815 | { | |
816 | struct bnad_tx_info *tx_info = | |
817 | (struct bnad_tx_info *)tcb->txq->tx->priv; | |
be7fa326 RM |
818 | struct bnad_unmap_q *unmap_q = tcb->unmap_q; |
819 | ||
820 | while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | |
821 | cpu_relax(); | |
822 | ||
823 | bnad_free_all_txbufs(bnad, tcb); | |
824 | ||
825 | unmap_q->producer_index = 0; | |
826 | unmap_q->consumer_index = 0; | |
827 | ||
828 | smp_mb__before_clear_bit(); | |
829 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | |
8b230ed8 RM |
830 | |
831 | tx_info->tcb[tcb->id] = NULL; | |
832 | } | |
833 | ||
834 | static void | |
835 | bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb) | |
836 | { | |
837 | struct bnad_unmap_q *unmap_q = rcb->unmap_q; | |
838 | ||
839 | unmap_q->producer_index = 0; | |
840 | unmap_q->consumer_index = 0; | |
841 | unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH; | |
842 | } | |
843 | ||
be7fa326 RM |
844 | static void |
845 | bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb) | |
846 | { | |
847 | bnad_free_all_rxbufs(bnad, rcb); | |
848 | } | |
849 | ||
8b230ed8 RM |
850 | static void |
851 | bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) | |
852 | { | |
853 | struct bnad_rx_info *rx_info = | |
854 | (struct bnad_rx_info *)ccb->cq->rx->priv; | |
855 | ||
856 | rx_info->rx_ctrl[ccb->id].ccb = ccb; | |
857 | ccb->ctrl = &rx_info->rx_ctrl[ccb->id]; | |
858 | } | |
859 | ||
860 | static void | |
861 | bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) | |
862 | { | |
863 | struct bnad_rx_info *rx_info = | |
864 | (struct bnad_rx_info *)ccb->cq->rx->priv; | |
865 | ||
866 | rx_info->rx_ctrl[ccb->id].ccb = NULL; | |
867 | } | |
868 | ||
869 | static void | |
078086f3 | 870 | bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx) |
8b230ed8 RM |
871 | { |
872 | struct bnad_tx_info *tx_info = | |
078086f3 RM |
873 | (struct bnad_tx_info *)tx->priv; |
874 | struct bna_tcb *tcb; | |
875 | u32 txq_id; | |
876 | int i; | |
8b230ed8 | 877 | |
078086f3 RM |
878 | for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { |
879 | tcb = tx_info->tcb[i]; | |
880 | if (!tcb) | |
881 | continue; | |
882 | txq_id = tcb->id; | |
883 | clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); | |
884 | netif_stop_subqueue(bnad->netdev, txq_id); | |
885 | printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n", | |
886 | bnad->netdev->name, txq_id); | |
887 | } | |
8b230ed8 RM |
888 | } |
889 | ||
890 | static void | |
078086f3 | 891 | bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) |
8b230ed8 | 892 | { |
078086f3 RM |
893 | struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; |
894 | struct bna_tcb *tcb; | |
895 | struct bnad_unmap_q *unmap_q; | |
896 | u32 txq_id; | |
897 | int i; | |
8b230ed8 | 898 | |
078086f3 RM |
899 | for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { |
900 | tcb = tx_info->tcb[i]; | |
901 | if (!tcb) | |
902 | continue; | |
903 | txq_id = tcb->id; | |
8b230ed8 | 904 | |
078086f3 | 905 | unmap_q = tcb->unmap_q; |
8b230ed8 | 906 | |
078086f3 RM |
907 | if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) |
908 | continue; | |
8b230ed8 | 909 | |
078086f3 RM |
910 | while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) |
911 | cpu_relax(); | |
8b230ed8 | 912 | |
078086f3 | 913 | bnad_free_all_txbufs(bnad, tcb); |
8b230ed8 | 914 | |
078086f3 RM |
915 | unmap_q->producer_index = 0; |
916 | unmap_q->consumer_index = 0; | |
917 | ||
918 | smp_mb__before_clear_bit(); | |
919 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | |
920 | ||
921 | set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); | |
922 | ||
923 | if (netif_carrier_ok(bnad->netdev)) { | |
924 | printk(KERN_INFO "bna: %s %d TXQ_STARTED\n", | |
925 | bnad->netdev->name, txq_id); | |
926 | netif_wake_subqueue(bnad->netdev, txq_id); | |
927 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | |
928 | } | |
929 | } | |
be7fa326 RM |
930 | |
931 | /* | |
078086f3 | 932 | * Workaround for first ioceth enable failure & we |
be7fa326 RM |
933 | * get a 0 MAC address. We try to get the MAC address |
934 | * again here. | |
935 | */ | |
936 | if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { | |
078086f3 | 937 | bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr); |
be7fa326 RM |
938 | bnad_set_netdev_perm_addr(bnad); |
939 | } | |
be7fa326 RM |
940 | } |
941 | ||
942 | static void | |
078086f3 | 943 | bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) |
be7fa326 | 944 | { |
078086f3 RM |
945 | struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; |
946 | struct bna_tcb *tcb; | |
947 | int i; | |
948 | ||
949 | for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { | |
950 | tcb = tx_info->tcb[i]; | |
951 | if (!tcb) | |
952 | continue; | |
953 | } | |
954 | ||
955 | mdelay(BNAD_TXRX_SYNC_MDELAY); | |
956 | bna_tx_cleanup_complete(tx); | |
8b230ed8 RM |
957 | } |
958 | ||
5bcf6ac0 RM |
959 | static void |
960 | bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) | |
961 | { | |
962 | struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; | |
963 | struct bna_ccb *ccb; | |
964 | struct bnad_rx_ctrl *rx_ctrl; | |
965 | int i; | |
966 | ||
967 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { | |
968 | rx_ctrl = &rx_info->rx_ctrl[i]; | |
969 | ccb = rx_ctrl->ccb; | |
970 | if (!ccb) | |
971 | continue; | |
972 | ||
973 | clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags); | |
974 | ||
975 | if (ccb->rcb[1]) | |
976 | clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags); | |
977 | } | |
978 | } | |
979 | ||
8b230ed8 | 980 | static void |
078086f3 | 981 | bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) |
8b230ed8 | 982 | { |
078086f3 RM |
983 | struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; |
984 | struct bna_ccb *ccb; | |
985 | struct bnad_rx_ctrl *rx_ctrl; | |
986 | int i; | |
987 | ||
988 | mdelay(BNAD_TXRX_SYNC_MDELAY); | |
989 | ||
772b5235 | 990 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { |
078086f3 RM |
991 | rx_ctrl = &rx_info->rx_ctrl[i]; |
992 | ccb = rx_ctrl->ccb; | |
993 | if (!ccb) | |
994 | continue; | |
995 | ||
996 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); | |
997 | ||
998 | if (ccb->rcb[1]) | |
999 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); | |
8b230ed8 | 1000 | |
078086f3 RM |
1001 | while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags)) |
1002 | cpu_relax(); | |
1003 | } | |
be7fa326 | 1004 | |
078086f3 | 1005 | bna_rx_cleanup_complete(rx); |
8b230ed8 RM |
1006 | } |
1007 | ||
1008 | static void | |
078086f3 | 1009 | bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) |
8b230ed8 | 1010 | { |
078086f3 RM |
1011 | struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; |
1012 | struct bna_ccb *ccb; | |
1013 | struct bna_rcb *rcb; | |
1014 | struct bnad_rx_ctrl *rx_ctrl; | |
1015 | struct bnad_unmap_q *unmap_q; | |
1016 | int i; | |
1017 | int j; | |
be7fa326 | 1018 | |
772b5235 | 1019 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { |
078086f3 RM |
1020 | rx_ctrl = &rx_info->rx_ctrl[i]; |
1021 | ccb = rx_ctrl->ccb; | |
1022 | if (!ccb) | |
1023 | continue; | |
be7fa326 | 1024 | |
078086f3 | 1025 | bnad_cq_cmpl_init(bnad, ccb); |
8b230ed8 | 1026 | |
078086f3 RM |
1027 | for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { |
1028 | rcb = ccb->rcb[j]; | |
1029 | if (!rcb) | |
1030 | continue; | |
1031 | bnad_free_all_rxbufs(bnad, rcb); | |
1032 | ||
1033 | set_bit(BNAD_RXQ_STARTED, &rcb->flags); | |
5bcf6ac0 | 1034 | set_bit(BNAD_RXQ_POST_OK, &rcb->flags); |
078086f3 RM |
1035 | unmap_q = rcb->unmap_q; |
1036 | ||
1037 | /* Now allocate & post buffers for this RCB */ | |
1038 | /* !!Allocation in callback context */ | |
1039 | if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { | |
1040 | if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) | |
1041 | >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) | |
1042 | bnad_alloc_n_post_rxbufs(bnad, rcb); | |
1043 | smp_mb__before_clear_bit(); | |
1044 | clear_bit(BNAD_RXQ_REFILL, &rcb->flags); | |
1045 | } | |
1046 | } | |
8b230ed8 RM |
1047 | } |
1048 | } | |
1049 | ||
1050 | static void | |
078086f3 | 1051 | bnad_cb_rx_disabled(void *arg, struct bna_rx *rx) |
8b230ed8 RM |
1052 | { |
1053 | struct bnad *bnad = (struct bnad *)arg; | |
1054 | ||
1055 | complete(&bnad->bnad_completions.rx_comp); | |
1056 | } | |
1057 | ||
1058 | static void | |
078086f3 | 1059 | bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx) |
8b230ed8 | 1060 | { |
078086f3 | 1061 | bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS; |
8b230ed8 RM |
1062 | complete(&bnad->bnad_completions.mcast_comp); |
1063 | } | |
1064 | ||
1065 | void | |
1066 | bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, | |
1067 | struct bna_stats *stats) | |
1068 | { | |
1069 | if (status == BNA_CB_SUCCESS) | |
1070 | BNAD_UPDATE_CTR(bnad, hw_stats_updates); | |
1071 | ||
1072 | if (!netif_running(bnad->netdev) || | |
1073 | !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) | |
1074 | return; | |
1075 | ||
1076 | mod_timer(&bnad->stats_timer, | |
1077 | jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); | |
1078 | } | |
1079 | ||
078086f3 RM |
1080 | static void |
1081 | bnad_cb_enet_mtu_set(struct bnad *bnad) | |
1082 | { | |
1083 | bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS; | |
1084 | complete(&bnad->bnad_completions.mtu_comp); | |
1085 | } | |
1086 | ||
8b230ed8 RM |
1087 | /* Resource allocation, free functions */ |
1088 | ||
1089 | static void | |
1090 | bnad_mem_free(struct bnad *bnad, | |
1091 | struct bna_mem_info *mem_info) | |
1092 | { | |
1093 | int i; | |
1094 | dma_addr_t dma_pa; | |
1095 | ||
1096 | if (mem_info->mdl == NULL) | |
1097 | return; | |
1098 | ||
1099 | for (i = 0; i < mem_info->num; i++) { | |
1100 | if (mem_info->mdl[i].kva != NULL) { | |
1101 | if (mem_info->mem_type == BNA_MEM_T_DMA) { | |
1102 | BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), | |
1103 | dma_pa); | |
5ea74318 IV |
1104 | dma_free_coherent(&bnad->pcidev->dev, |
1105 | mem_info->mdl[i].len, | |
1106 | mem_info->mdl[i].kva, dma_pa); | |
8b230ed8 RM |
1107 | } else |
1108 | kfree(mem_info->mdl[i].kva); | |
1109 | } | |
1110 | } | |
1111 | kfree(mem_info->mdl); | |
1112 | mem_info->mdl = NULL; | |
1113 | } | |
1114 | ||
1115 | static int | |
1116 | bnad_mem_alloc(struct bnad *bnad, | |
1117 | struct bna_mem_info *mem_info) | |
1118 | { | |
1119 | int i; | |
1120 | dma_addr_t dma_pa; | |
1121 | ||
1122 | if ((mem_info->num == 0) || (mem_info->len == 0)) { | |
1123 | mem_info->mdl = NULL; | |
1124 | return 0; | |
1125 | } | |
1126 | ||
1127 | mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr), | |
1128 | GFP_KERNEL); | |
1129 | if (mem_info->mdl == NULL) | |
1130 | return -ENOMEM; | |
1131 | ||
1132 | if (mem_info->mem_type == BNA_MEM_T_DMA) { | |
1133 | for (i = 0; i < mem_info->num; i++) { | |
1134 | mem_info->mdl[i].len = mem_info->len; | |
1135 | mem_info->mdl[i].kva = | |
5ea74318 IV |
1136 | dma_alloc_coherent(&bnad->pcidev->dev, |
1137 | mem_info->len, &dma_pa, | |
1138 | GFP_KERNEL); | |
8b230ed8 RM |
1139 | |
1140 | if (mem_info->mdl[i].kva == NULL) | |
1141 | goto err_return; | |
1142 | ||
1143 | BNA_SET_DMA_ADDR(dma_pa, | |
1144 | &(mem_info->mdl[i].dma)); | |
1145 | } | |
1146 | } else { | |
1147 | for (i = 0; i < mem_info->num; i++) { | |
1148 | mem_info->mdl[i].len = mem_info->len; | |
1149 | mem_info->mdl[i].kva = kzalloc(mem_info->len, | |
1150 | GFP_KERNEL); | |
1151 | if (mem_info->mdl[i].kva == NULL) | |
1152 | goto err_return; | |
1153 | } | |
1154 | } | |
1155 | ||
1156 | return 0; | |
1157 | ||
1158 | err_return: | |
1159 | bnad_mem_free(bnad, mem_info); | |
1160 | return -ENOMEM; | |
1161 | } | |
1162 | ||
1163 | /* Free IRQ for Mailbox */ | |
1164 | static void | |
078086f3 | 1165 | bnad_mbox_irq_free(struct bnad *bnad) |
8b230ed8 RM |
1166 | { |
1167 | int irq; | |
1168 | unsigned long flags; | |
1169 | ||
8b230ed8 | 1170 | spin_lock_irqsave(&bnad->bna_lock, flags); |
8b230ed8 | 1171 | bnad_disable_mbox_irq(bnad); |
e2fa6f2e | 1172 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 RM |
1173 | |
1174 | irq = BNAD_GET_MBOX_IRQ(bnad); | |
be7fa326 | 1175 | free_irq(irq, bnad); |
8b230ed8 RM |
1176 | } |
1177 | ||
1178 | /* | |
1179 | * Allocates IRQ for Mailbox, but keep it disabled | |
1180 | * This will be enabled once we get the mbox enable callback | |
1181 | * from bna | |
1182 | */ | |
1183 | static int | |
078086f3 | 1184 | bnad_mbox_irq_alloc(struct bnad *bnad) |
8b230ed8 | 1185 | { |
0120b99c RM |
1186 | int err = 0; |
1187 | unsigned long irq_flags, flags; | |
8b230ed8 | 1188 | u32 irq; |
0120b99c | 1189 | irq_handler_t irq_handler; |
8b230ed8 | 1190 | |
8b230ed8 RM |
1191 | spin_lock_irqsave(&bnad->bna_lock, flags); |
1192 | if (bnad->cfg_flags & BNAD_CF_MSIX) { | |
1193 | irq_handler = (irq_handler_t)bnad_msix_mbox_handler; | |
8811e267 | 1194 | irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; |
8279171a | 1195 | irq_flags = 0; |
8b230ed8 RM |
1196 | } else { |
1197 | irq_handler = (irq_handler_t)bnad_isr; | |
1198 | irq = bnad->pcidev->irq; | |
5f77898d | 1199 | irq_flags = IRQF_SHARED; |
8b230ed8 | 1200 | } |
8811e267 | 1201 | |
8b230ed8 | 1202 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 RM |
1203 | sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); |
1204 | ||
e2fa6f2e RM |
1205 | /* |
1206 | * Set the Mbox IRQ disable flag, so that the IRQ handler | |
1207 | * called from request_irq() for SHARED IRQs do not execute | |
1208 | */ | |
1209 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); | |
1210 | ||
be7fa326 RM |
1211 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); |
1212 | ||
8279171a | 1213 | err = request_irq(irq, irq_handler, irq_flags, |
be7fa326 | 1214 | bnad->mbox_irq_name, bnad); |
e2fa6f2e | 1215 | |
be7fa326 | 1216 | return err; |
8b230ed8 RM |
1217 | } |
1218 | ||
1219 | static void | |
1220 | bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info) | |
1221 | { | |
1222 | kfree(intr_info->idl); | |
1223 | intr_info->idl = NULL; | |
1224 | } | |
1225 | ||
1226 | /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */ | |
1227 | static int | |
1228 | bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, | |
078086f3 | 1229 | u32 txrx_id, struct bna_intr_info *intr_info) |
8b230ed8 RM |
1230 | { |
1231 | int i, vector_start = 0; | |
1232 | u32 cfg_flags; | |
1233 | unsigned long flags; | |
1234 | ||
1235 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1236 | cfg_flags = bnad->cfg_flags; | |
1237 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1238 | ||
1239 | if (cfg_flags & BNAD_CF_MSIX) { | |
1240 | intr_info->intr_type = BNA_INTR_T_MSIX; | |
1241 | intr_info->idl = kcalloc(intr_info->num, | |
1242 | sizeof(struct bna_intr_descr), | |
1243 | GFP_KERNEL); | |
1244 | if (!intr_info->idl) | |
1245 | return -ENOMEM; | |
1246 | ||
1247 | switch (src) { | |
1248 | case BNAD_INTR_TX: | |
8811e267 | 1249 | vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id; |
8b230ed8 RM |
1250 | break; |
1251 | ||
1252 | case BNAD_INTR_RX: | |
8811e267 RM |
1253 | vector_start = BNAD_MAILBOX_MSIX_VECTORS + |
1254 | (bnad->num_tx * bnad->num_txq_per_tx) + | |
8b230ed8 RM |
1255 | txrx_id; |
1256 | break; | |
1257 | ||
1258 | default: | |
1259 | BUG(); | |
1260 | } | |
1261 | ||
1262 | for (i = 0; i < intr_info->num; i++) | |
1263 | intr_info->idl[i].vector = vector_start + i; | |
1264 | } else { | |
1265 | intr_info->intr_type = BNA_INTR_T_INTX; | |
1266 | intr_info->num = 1; | |
1267 | intr_info->idl = kcalloc(intr_info->num, | |
1268 | sizeof(struct bna_intr_descr), | |
1269 | GFP_KERNEL); | |
1270 | if (!intr_info->idl) | |
1271 | return -ENOMEM; | |
1272 | ||
1273 | switch (src) { | |
1274 | case BNAD_INTR_TX: | |
8811e267 | 1275 | intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK; |
8b230ed8 RM |
1276 | break; |
1277 | ||
1278 | case BNAD_INTR_RX: | |
8811e267 | 1279 | intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK; |
8b230ed8 RM |
1280 | break; |
1281 | } | |
1282 | } | |
1283 | return 0; | |
1284 | } | |
1285 | ||
1286 | /** | |
1287 | * NOTE: Should be called for MSIX only | |
1288 | * Unregisters Tx MSIX vector(s) from the kernel | |
1289 | */ | |
1290 | static void | |
1291 | bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, | |
1292 | int num_txqs) | |
1293 | { | |
1294 | int i; | |
1295 | int vector_num; | |
1296 | ||
1297 | for (i = 0; i < num_txqs; i++) { | |
1298 | if (tx_info->tcb[i] == NULL) | |
1299 | continue; | |
1300 | ||
1301 | vector_num = tx_info->tcb[i]->intr_vector; | |
1302 | free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]); | |
1303 | } | |
1304 | } | |
1305 | ||
1306 | /** | |
1307 | * NOTE: Should be called for MSIX only | |
1308 | * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel | |
1309 | */ | |
1310 | static int | |
1311 | bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, | |
078086f3 | 1312 | u32 tx_id, int num_txqs) |
8b230ed8 RM |
1313 | { |
1314 | int i; | |
1315 | int err; | |
1316 | int vector_num; | |
1317 | ||
1318 | for (i = 0; i < num_txqs; i++) { | |
1319 | vector_num = tx_info->tcb[i]->intr_vector; | |
1320 | sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, | |
1321 | tx_id + tx_info->tcb[i]->id); | |
1322 | err = request_irq(bnad->msix_table[vector_num].vector, | |
1323 | (irq_handler_t)bnad_msix_tx, 0, | |
1324 | tx_info->tcb[i]->name, | |
1325 | tx_info->tcb[i]); | |
1326 | if (err) | |
1327 | goto err_return; | |
1328 | } | |
1329 | ||
1330 | return 0; | |
1331 | ||
1332 | err_return: | |
1333 | if (i > 0) | |
1334 | bnad_tx_msix_unregister(bnad, tx_info, (i - 1)); | |
1335 | return -1; | |
1336 | } | |
1337 | ||
1338 | /** | |
1339 | * NOTE: Should be called for MSIX only | |
1340 | * Unregisters Rx MSIX vector(s) from the kernel | |
1341 | */ | |
1342 | static void | |
1343 | bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, | |
1344 | int num_rxps) | |
1345 | { | |
1346 | int i; | |
1347 | int vector_num; | |
1348 | ||
1349 | for (i = 0; i < num_rxps; i++) { | |
1350 | if (rx_info->rx_ctrl[i].ccb == NULL) | |
1351 | continue; | |
1352 | ||
1353 | vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; | |
1354 | free_irq(bnad->msix_table[vector_num].vector, | |
1355 | rx_info->rx_ctrl[i].ccb); | |
1356 | } | |
1357 | } | |
1358 | ||
1359 | /** | |
1360 | * NOTE: Should be called for MSIX only | |
1361 | * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel | |
1362 | */ | |
1363 | static int | |
1364 | bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, | |
078086f3 | 1365 | u32 rx_id, int num_rxps) |
8b230ed8 RM |
1366 | { |
1367 | int i; | |
1368 | int err; | |
1369 | int vector_num; | |
1370 | ||
1371 | for (i = 0; i < num_rxps; i++) { | |
1372 | vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; | |
1373 | sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", | |
1374 | bnad->netdev->name, | |
1375 | rx_id + rx_info->rx_ctrl[i].ccb->id); | |
1376 | err = request_irq(bnad->msix_table[vector_num].vector, | |
1377 | (irq_handler_t)bnad_msix_rx, 0, | |
1378 | rx_info->rx_ctrl[i].ccb->name, | |
1379 | rx_info->rx_ctrl[i].ccb); | |
1380 | if (err) | |
1381 | goto err_return; | |
1382 | } | |
1383 | ||
1384 | return 0; | |
1385 | ||
1386 | err_return: | |
1387 | if (i > 0) | |
1388 | bnad_rx_msix_unregister(bnad, rx_info, (i - 1)); | |
1389 | return -1; | |
1390 | } | |
1391 | ||
1392 | /* Free Tx object Resources */ | |
1393 | static void | |
1394 | bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info) | |
1395 | { | |
1396 | int i; | |
1397 | ||
1398 | for (i = 0; i < BNA_TX_RES_T_MAX; i++) { | |
1399 | if (res_info[i].res_type == BNA_RES_T_MEM) | |
1400 | bnad_mem_free(bnad, &res_info[i].res_u.mem_info); | |
1401 | else if (res_info[i].res_type == BNA_RES_T_INTR) | |
1402 | bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); | |
1403 | } | |
1404 | } | |
1405 | ||
1406 | /* Allocates memory and interrupt resources for Tx object */ | |
1407 | static int | |
1408 | bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, | |
078086f3 | 1409 | u32 tx_id) |
8b230ed8 RM |
1410 | { |
1411 | int i, err = 0; | |
1412 | ||
1413 | for (i = 0; i < BNA_TX_RES_T_MAX; i++) { | |
1414 | if (res_info[i].res_type == BNA_RES_T_MEM) | |
1415 | err = bnad_mem_alloc(bnad, | |
1416 | &res_info[i].res_u.mem_info); | |
1417 | else if (res_info[i].res_type == BNA_RES_T_INTR) | |
1418 | err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id, | |
1419 | &res_info[i].res_u.intr_info); | |
1420 | if (err) | |
1421 | goto err_return; | |
1422 | } | |
1423 | return 0; | |
1424 | ||
1425 | err_return: | |
1426 | bnad_tx_res_free(bnad, res_info); | |
1427 | return err; | |
1428 | } | |
1429 | ||
1430 | /* Free Rx object Resources */ | |
1431 | static void | |
1432 | bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info) | |
1433 | { | |
1434 | int i; | |
1435 | ||
1436 | for (i = 0; i < BNA_RX_RES_T_MAX; i++) { | |
1437 | if (res_info[i].res_type == BNA_RES_T_MEM) | |
1438 | bnad_mem_free(bnad, &res_info[i].res_u.mem_info); | |
1439 | else if (res_info[i].res_type == BNA_RES_T_INTR) | |
1440 | bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); | |
1441 | } | |
1442 | } | |
1443 | ||
1444 | /* Allocates memory and interrupt resources for Rx object */ | |
1445 | static int | |
1446 | bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, | |
1447 | uint rx_id) | |
1448 | { | |
1449 | int i, err = 0; | |
1450 | ||
1451 | /* All memory needs to be allocated before setup_ccbs */ | |
1452 | for (i = 0; i < BNA_RX_RES_T_MAX; i++) { | |
1453 | if (res_info[i].res_type == BNA_RES_T_MEM) | |
1454 | err = bnad_mem_alloc(bnad, | |
1455 | &res_info[i].res_u.mem_info); | |
1456 | else if (res_info[i].res_type == BNA_RES_T_INTR) | |
1457 | err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id, | |
1458 | &res_info[i].res_u.intr_info); | |
1459 | if (err) | |
1460 | goto err_return; | |
1461 | } | |
1462 | return 0; | |
1463 | ||
1464 | err_return: | |
1465 | bnad_rx_res_free(bnad, res_info); | |
1466 | return err; | |
1467 | } | |
1468 | ||
1469 | /* Timer callbacks */ | |
1470 | /* a) IOC timer */ | |
1471 | static void | |
1472 | bnad_ioc_timeout(unsigned long data) | |
1473 | { | |
1474 | struct bnad *bnad = (struct bnad *)data; | |
1475 | unsigned long flags; | |
1476 | ||
1477 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 1478 | bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc); |
8b230ed8 RM |
1479 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1480 | } | |
1481 | ||
1482 | static void | |
1483 | bnad_ioc_hb_check(unsigned long data) | |
1484 | { | |
1485 | struct bnad *bnad = (struct bnad *)data; | |
1486 | unsigned long flags; | |
1487 | ||
1488 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 1489 | bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc); |
8b230ed8 RM |
1490 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1491 | } | |
1492 | ||
1493 | static void | |
1d32f769 | 1494 | bnad_iocpf_timeout(unsigned long data) |
8b230ed8 RM |
1495 | { |
1496 | struct bnad *bnad = (struct bnad *)data; | |
1497 | unsigned long flags; | |
1498 | ||
1499 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 1500 | bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc); |
1d32f769 RM |
1501 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1502 | } | |
1503 | ||
1504 | static void | |
1505 | bnad_iocpf_sem_timeout(unsigned long data) | |
1506 | { | |
1507 | struct bnad *bnad = (struct bnad *)data; | |
1508 | unsigned long flags; | |
1509 | ||
1510 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 1511 | bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc); |
8b230ed8 RM |
1512 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1513 | } | |
1514 | ||
1515 | /* | |
1516 | * All timer routines use bnad->bna_lock to protect against | |
1517 | * the following race, which may occur in case of no locking: | |
0120b99c | 1518 | * Time CPU m CPU n |
8b230ed8 RM |
1519 | * 0 1 = test_bit |
1520 | * 1 clear_bit | |
1521 | * 2 del_timer_sync | |
1522 | * 3 mod_timer | |
1523 | */ | |
1524 | ||
1525 | /* b) Dynamic Interrupt Moderation Timer */ | |
1526 | static void | |
1527 | bnad_dim_timeout(unsigned long data) | |
1528 | { | |
1529 | struct bnad *bnad = (struct bnad *)data; | |
1530 | struct bnad_rx_info *rx_info; | |
1531 | struct bnad_rx_ctrl *rx_ctrl; | |
1532 | int i, j; | |
1533 | unsigned long flags; | |
1534 | ||
1535 | if (!netif_carrier_ok(bnad->netdev)) | |
1536 | return; | |
1537 | ||
1538 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1539 | for (i = 0; i < bnad->num_rx; i++) { | |
1540 | rx_info = &bnad->rx_info[i]; | |
1541 | if (!rx_info->rx) | |
1542 | continue; | |
1543 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
1544 | rx_ctrl = &rx_info->rx_ctrl[j]; | |
1545 | if (!rx_ctrl->ccb) | |
1546 | continue; | |
1547 | bna_rx_dim_update(rx_ctrl->ccb); | |
1548 | } | |
1549 | } | |
1550 | ||
1551 | /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */ | |
1552 | if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) | |
1553 | mod_timer(&bnad->dim_timer, | |
1554 | jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); | |
1555 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1556 | } | |
1557 | ||
1558 | /* c) Statistics Timer */ | |
1559 | static void | |
1560 | bnad_stats_timeout(unsigned long data) | |
1561 | { | |
1562 | struct bnad *bnad = (struct bnad *)data; | |
1563 | unsigned long flags; | |
1564 | ||
1565 | if (!netif_running(bnad->netdev) || | |
1566 | !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) | |
1567 | return; | |
1568 | ||
1569 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 1570 | bna_hw_stats_get(&bnad->bna); |
8b230ed8 RM |
1571 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1572 | } | |
1573 | ||
1574 | /* | |
1575 | * Set up timer for DIM | |
1576 | * Called with bnad->bna_lock held | |
1577 | */ | |
1578 | void | |
1579 | bnad_dim_timer_start(struct bnad *bnad) | |
1580 | { | |
1581 | if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && | |
1582 | !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { | |
1583 | setup_timer(&bnad->dim_timer, bnad_dim_timeout, | |
1584 | (unsigned long)bnad); | |
1585 | set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); | |
1586 | mod_timer(&bnad->dim_timer, | |
1587 | jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); | |
1588 | } | |
1589 | } | |
1590 | ||
1591 | /* | |
1592 | * Set up timer for statistics | |
1593 | * Called with mutex_lock(&bnad->conf_mutex) held | |
1594 | */ | |
1595 | static void | |
1596 | bnad_stats_timer_start(struct bnad *bnad) | |
1597 | { | |
1598 | unsigned long flags; | |
1599 | ||
1600 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1601 | if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { | |
1602 | setup_timer(&bnad->stats_timer, bnad_stats_timeout, | |
1603 | (unsigned long)bnad); | |
1604 | mod_timer(&bnad->stats_timer, | |
1605 | jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); | |
1606 | } | |
1607 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
8b230ed8 RM |
1608 | } |
1609 | ||
1610 | /* | |
1611 | * Stops the stats timer | |
1612 | * Called with mutex_lock(&bnad->conf_mutex) held | |
1613 | */ | |
1614 | static void | |
1615 | bnad_stats_timer_stop(struct bnad *bnad) | |
1616 | { | |
1617 | int to_del = 0; | |
1618 | unsigned long flags; | |
1619 | ||
1620 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1621 | if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) | |
1622 | to_del = 1; | |
1623 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1624 | if (to_del) | |
1625 | del_timer_sync(&bnad->stats_timer); | |
1626 | } | |
1627 | ||
1628 | /* Utilities */ | |
1629 | ||
1630 | static void | |
1631 | bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list) | |
1632 | { | |
1633 | int i = 1; /* Index 0 has broadcast address */ | |
1634 | struct netdev_hw_addr *mc_addr; | |
1635 | ||
1636 | netdev_for_each_mc_addr(mc_addr, netdev) { | |
1637 | memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0], | |
1638 | ETH_ALEN); | |
1639 | i++; | |
1640 | } | |
1641 | } | |
1642 | ||
1643 | static int | |
1644 | bnad_napi_poll_rx(struct napi_struct *napi, int budget) | |
1645 | { | |
1646 | struct bnad_rx_ctrl *rx_ctrl = | |
1647 | container_of(napi, struct bnad_rx_ctrl, napi); | |
2be67144 | 1648 | struct bnad *bnad = rx_ctrl->bnad; |
8b230ed8 RM |
1649 | int rcvd = 0; |
1650 | ||
271e8b79 | 1651 | rx_ctrl->rx_poll_ctr++; |
8b230ed8 RM |
1652 | |
1653 | if (!netif_carrier_ok(bnad->netdev)) | |
1654 | goto poll_exit; | |
1655 | ||
2be67144 | 1656 | rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget); |
271e8b79 | 1657 | if (rcvd >= budget) |
8b230ed8 RM |
1658 | return rcvd; |
1659 | ||
1660 | poll_exit: | |
19dbff9f | 1661 | napi_complete(napi); |
8b230ed8 | 1662 | |
271e8b79 | 1663 | rx_ctrl->rx_complete++; |
2be67144 RM |
1664 | |
1665 | if (rx_ctrl->ccb) | |
271e8b79 RM |
1666 | bnad_enable_rx_irq_unsafe(rx_ctrl->ccb); |
1667 | ||
8b230ed8 RM |
1668 | return rcvd; |
1669 | } | |
1670 | ||
2be67144 | 1671 | #define BNAD_NAPI_POLL_QUOTA 64 |
8b230ed8 | 1672 | static void |
2be67144 | 1673 | bnad_napi_init(struct bnad *bnad, u32 rx_id) |
8b230ed8 | 1674 | { |
8b230ed8 RM |
1675 | struct bnad_rx_ctrl *rx_ctrl; |
1676 | int i; | |
8b230ed8 RM |
1677 | |
1678 | /* Initialize & enable NAPI */ | |
1679 | for (i = 0; i < bnad->num_rxp_per_rx; i++) { | |
1680 | rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; | |
1681 | netif_napi_add(bnad->netdev, &rx_ctrl->napi, | |
2be67144 RM |
1682 | bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA); |
1683 | } | |
1684 | } | |
1685 | ||
1686 | static void | |
1687 | bnad_napi_enable(struct bnad *bnad, u32 rx_id) | |
1688 | { | |
1689 | struct bnad_rx_ctrl *rx_ctrl; | |
1690 | int i; | |
1691 | ||
1692 | /* Initialize & enable NAPI */ | |
1693 | for (i = 0; i < bnad->num_rxp_per_rx; i++) { | |
1694 | rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; | |
be7fa326 | 1695 | |
8b230ed8 RM |
1696 | napi_enable(&rx_ctrl->napi); |
1697 | } | |
1698 | } | |
1699 | ||
1700 | static void | |
1701 | bnad_napi_disable(struct bnad *bnad, u32 rx_id) | |
1702 | { | |
1703 | int i; | |
1704 | ||
1705 | /* First disable and then clean up */ | |
1706 | for (i = 0; i < bnad->num_rxp_per_rx; i++) { | |
1707 | napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi); | |
1708 | netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); | |
1709 | } | |
1710 | } | |
1711 | ||
1712 | /* Should be held with conf_lock held */ | |
1713 | void | |
078086f3 | 1714 | bnad_cleanup_tx(struct bnad *bnad, u32 tx_id) |
8b230ed8 RM |
1715 | { |
1716 | struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; | |
1717 | struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; | |
1718 | unsigned long flags; | |
1719 | ||
1720 | if (!tx_info->tx) | |
1721 | return; | |
1722 | ||
1723 | init_completion(&bnad->bnad_completions.tx_comp); | |
1724 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1725 | bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled); | |
1726 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1727 | wait_for_completion(&bnad->bnad_completions.tx_comp); | |
1728 | ||
1729 | if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX) | |
1730 | bnad_tx_msix_unregister(bnad, tx_info, | |
1731 | bnad->num_txq_per_tx); | |
1732 | ||
2be67144 RM |
1733 | if (0 == tx_id) |
1734 | tasklet_kill(&bnad->tx_free_tasklet); | |
1735 | ||
8b230ed8 RM |
1736 | spin_lock_irqsave(&bnad->bna_lock, flags); |
1737 | bna_tx_destroy(tx_info->tx); | |
1738 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1739 | ||
1740 | tx_info->tx = NULL; | |
078086f3 | 1741 | tx_info->tx_id = 0; |
8b230ed8 | 1742 | |
8b230ed8 RM |
1743 | bnad_tx_res_free(bnad, res_info); |
1744 | } | |
1745 | ||
1746 | /* Should be held with conf_lock held */ | |
1747 | int | |
078086f3 | 1748 | bnad_setup_tx(struct bnad *bnad, u32 tx_id) |
8b230ed8 RM |
1749 | { |
1750 | int err; | |
1751 | struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; | |
1752 | struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; | |
1753 | struct bna_intr_info *intr_info = | |
1754 | &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; | |
1755 | struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; | |
d91d25d5 | 1756 | static const struct bna_tx_event_cbfn tx_cbfn = { |
1757 | .tcb_setup_cbfn = bnad_cb_tcb_setup, | |
1758 | .tcb_destroy_cbfn = bnad_cb_tcb_destroy, | |
1759 | .tx_stall_cbfn = bnad_cb_tx_stall, | |
1760 | .tx_resume_cbfn = bnad_cb_tx_resume, | |
1761 | .tx_cleanup_cbfn = bnad_cb_tx_cleanup, | |
1762 | }; | |
1763 | ||
8b230ed8 RM |
1764 | struct bna_tx *tx; |
1765 | unsigned long flags; | |
1766 | ||
078086f3 RM |
1767 | tx_info->tx_id = tx_id; |
1768 | ||
8b230ed8 RM |
1769 | /* Initialize the Tx object configuration */ |
1770 | tx_config->num_txq = bnad->num_txq_per_tx; | |
1771 | tx_config->txq_depth = bnad->txq_depth; | |
1772 | tx_config->tx_type = BNA_TX_T_REGULAR; | |
078086f3 | 1773 | tx_config->coalescing_timeo = bnad->tx_coalescing_timeo; |
8b230ed8 | 1774 | |
8b230ed8 RM |
1775 | /* Get BNA's resource requirement for one tx object */ |
1776 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1777 | bna_tx_res_req(bnad->num_txq_per_tx, | |
1778 | bnad->txq_depth, res_info); | |
1779 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1780 | ||
1781 | /* Fill Unmap Q memory requirements */ | |
1782 | BNAD_FILL_UNMAPQ_MEM_REQ( | |
1783 | &res_info[BNA_TX_RES_MEM_T_UNMAPQ], | |
1784 | bnad->num_txq_per_tx, | |
1785 | BNAD_TX_UNMAPQ_DEPTH); | |
1786 | ||
1787 | /* Allocate resources */ | |
1788 | err = bnad_tx_res_alloc(bnad, res_info, tx_id); | |
1789 | if (err) | |
1790 | return err; | |
1791 | ||
1792 | /* Ask BNA to create one Tx object, supplying required resources */ | |
1793 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1794 | tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, | |
1795 | tx_info); | |
1796 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1797 | if (!tx) | |
1798 | goto err_return; | |
1799 | tx_info->tx = tx; | |
1800 | ||
1801 | /* Register ISR for the Tx object */ | |
1802 | if (intr_info->intr_type == BNA_INTR_T_MSIX) { | |
1803 | err = bnad_tx_msix_register(bnad, tx_info, | |
1804 | tx_id, bnad->num_txq_per_tx); | |
1805 | if (err) | |
1806 | goto err_return; | |
1807 | } | |
1808 | ||
1809 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1810 | bna_tx_enable(tx); | |
1811 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1812 | ||
1813 | return 0; | |
1814 | ||
1815 | err_return: | |
1816 | bnad_tx_res_free(bnad, res_info); | |
1817 | return err; | |
1818 | } | |
1819 | ||
1820 | /* Setup the rx config for bna_rx_create */ | |
1821 | /* bnad decides the configuration */ | |
1822 | static void | |
1823 | bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) | |
1824 | { | |
1825 | rx_config->rx_type = BNA_RX_T_REGULAR; | |
1826 | rx_config->num_paths = bnad->num_rxp_per_rx; | |
078086f3 | 1827 | rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; |
8b230ed8 RM |
1828 | |
1829 | if (bnad->num_rxp_per_rx > 1) { | |
1830 | rx_config->rss_status = BNA_STATUS_T_ENABLED; | |
1831 | rx_config->rss_config.hash_type = | |
078086f3 RM |
1832 | (BFI_ENET_RSS_IPV6 | |
1833 | BFI_ENET_RSS_IPV6_TCP | | |
1834 | BFI_ENET_RSS_IPV4 | | |
1835 | BFI_ENET_RSS_IPV4_TCP); | |
8b230ed8 RM |
1836 | rx_config->rss_config.hash_mask = |
1837 | bnad->num_rxp_per_rx - 1; | |
1838 | get_random_bytes(rx_config->rss_config.toeplitz_hash_key, | |
1839 | sizeof(rx_config->rss_config.toeplitz_hash_key)); | |
1840 | } else { | |
1841 | rx_config->rss_status = BNA_STATUS_T_DISABLED; | |
1842 | memset(&rx_config->rss_config, 0, | |
1843 | sizeof(rx_config->rss_config)); | |
1844 | } | |
1845 | rx_config->rxp_type = BNA_RXP_SLR; | |
1846 | rx_config->q_depth = bnad->rxq_depth; | |
1847 | ||
1848 | rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE; | |
1849 | ||
1850 | rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; | |
1851 | } | |
1852 | ||
2be67144 RM |
1853 | static void |
1854 | bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) | |
1855 | { | |
1856 | struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; | |
1857 | int i; | |
1858 | ||
1859 | for (i = 0; i < bnad->num_rxp_per_rx; i++) | |
1860 | rx_info->rx_ctrl[i].bnad = bnad; | |
1861 | } | |
1862 | ||
8b230ed8 RM |
1863 | /* Called with mutex_lock(&bnad->conf_mutex) held */ |
1864 | void | |
078086f3 | 1865 | bnad_cleanup_rx(struct bnad *bnad, u32 rx_id) |
8b230ed8 RM |
1866 | { |
1867 | struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; | |
1868 | struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; | |
1869 | struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; | |
1870 | unsigned long flags; | |
271e8b79 | 1871 | int to_del = 0; |
8b230ed8 RM |
1872 | |
1873 | if (!rx_info->rx) | |
1874 | return; | |
1875 | ||
1876 | if (0 == rx_id) { | |
1877 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
271e8b79 RM |
1878 | if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && |
1879 | test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { | |
8b230ed8 | 1880 | clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); |
271e8b79 RM |
1881 | to_del = 1; |
1882 | } | |
8b230ed8 | 1883 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
271e8b79 | 1884 | if (to_del) |
8b230ed8 RM |
1885 | del_timer_sync(&bnad->dim_timer); |
1886 | } | |
1887 | ||
8b230ed8 RM |
1888 | init_completion(&bnad->bnad_completions.rx_comp); |
1889 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1890 | bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled); | |
1891 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1892 | wait_for_completion(&bnad->bnad_completions.rx_comp); | |
1893 | ||
1894 | if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) | |
1895 | bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); | |
1896 | ||
2be67144 RM |
1897 | bnad_napi_disable(bnad, rx_id); |
1898 | ||
8b230ed8 RM |
1899 | spin_lock_irqsave(&bnad->bna_lock, flags); |
1900 | bna_rx_destroy(rx_info->rx); | |
8b230ed8 RM |
1901 | |
1902 | rx_info->rx = NULL; | |
3caa1e95 | 1903 | rx_info->rx_id = 0; |
b9fa1fbf | 1904 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 RM |
1905 | |
1906 | bnad_rx_res_free(bnad, res_info); | |
1907 | } | |
1908 | ||
1909 | /* Called with mutex_lock(&bnad->conf_mutex) held */ | |
1910 | int | |
078086f3 | 1911 | bnad_setup_rx(struct bnad *bnad, u32 rx_id) |
8b230ed8 RM |
1912 | { |
1913 | int err; | |
1914 | struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; | |
1915 | struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; | |
1916 | struct bna_intr_info *intr_info = | |
1917 | &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; | |
1918 | struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; | |
d91d25d5 | 1919 | static const struct bna_rx_event_cbfn rx_cbfn = { |
1920 | .rcb_setup_cbfn = bnad_cb_rcb_setup, | |
1921 | .rcb_destroy_cbfn = bnad_cb_rcb_destroy, | |
1922 | .ccb_setup_cbfn = bnad_cb_ccb_setup, | |
1923 | .ccb_destroy_cbfn = bnad_cb_ccb_destroy, | |
5bcf6ac0 | 1924 | .rx_stall_cbfn = bnad_cb_rx_stall, |
d91d25d5 | 1925 | .rx_cleanup_cbfn = bnad_cb_rx_cleanup, |
1926 | .rx_post_cbfn = bnad_cb_rx_post, | |
1927 | }; | |
8b230ed8 RM |
1928 | struct bna_rx *rx; |
1929 | unsigned long flags; | |
1930 | ||
078086f3 RM |
1931 | rx_info->rx_id = rx_id; |
1932 | ||
8b230ed8 RM |
1933 | /* Initialize the Rx object configuration */ |
1934 | bnad_init_rx_config(bnad, rx_config); | |
1935 | ||
8b230ed8 RM |
1936 | /* Get BNA's resource requirement for one Rx object */ |
1937 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1938 | bna_rx_res_req(rx_config, res_info); | |
1939 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1940 | ||
1941 | /* Fill Unmap Q memory requirements */ | |
1942 | BNAD_FILL_UNMAPQ_MEM_REQ( | |
1943 | &res_info[BNA_RX_RES_MEM_T_UNMAPQ], | |
1944 | rx_config->num_paths + | |
1945 | ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 : | |
1946 | rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH); | |
1947 | ||
1948 | /* Allocate resource */ | |
1949 | err = bnad_rx_res_alloc(bnad, res_info, rx_id); | |
1950 | if (err) | |
1951 | return err; | |
1952 | ||
2be67144 RM |
1953 | bnad_rx_ctrl_init(bnad, rx_id); |
1954 | ||
8b230ed8 RM |
1955 | /* Ask BNA to create one Rx object, supplying required resources */ |
1956 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1957 | rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, | |
1958 | rx_info); | |
3caa1e95 RM |
1959 | if (!rx) { |
1960 | err = -ENOMEM; | |
b9fa1fbf | 1961 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 | 1962 | goto err_return; |
3caa1e95 | 1963 | } |
8b230ed8 | 1964 | rx_info->rx = rx; |
b9fa1fbf | 1965 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 | 1966 | |
2be67144 RM |
1967 | /* |
1968 | * Init NAPI, so that state is set to NAPI_STATE_SCHED, | |
1969 | * so that IRQ handler cannot schedule NAPI at this point. | |
1970 | */ | |
1971 | bnad_napi_init(bnad, rx_id); | |
1972 | ||
8b230ed8 RM |
1973 | /* Register ISR for the Rx object */ |
1974 | if (intr_info->intr_type == BNA_INTR_T_MSIX) { | |
1975 | err = bnad_rx_msix_register(bnad, rx_info, rx_id, | |
1976 | rx_config->num_paths); | |
1977 | if (err) | |
1978 | goto err_return; | |
1979 | } | |
1980 | ||
8b230ed8 RM |
1981 | spin_lock_irqsave(&bnad->bna_lock, flags); |
1982 | if (0 == rx_id) { | |
1983 | /* Set up Dynamic Interrupt Moderation Vector */ | |
1984 | if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) | |
1985 | bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector); | |
1986 | ||
1987 | /* Enable VLAN filtering only on the default Rx */ | |
1988 | bna_rx_vlanfilter_enable(rx); | |
1989 | ||
1990 | /* Start the DIM timer */ | |
1991 | bnad_dim_timer_start(bnad); | |
1992 | } | |
1993 | ||
1994 | bna_rx_enable(rx); | |
1995 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1996 | ||
2be67144 RM |
1997 | /* Enable scheduling of NAPI */ |
1998 | bnad_napi_enable(bnad, rx_id); | |
1999 | ||
8b230ed8 RM |
2000 | return 0; |
2001 | ||
2002 | err_return: | |
2003 | bnad_cleanup_rx(bnad, rx_id); | |
2004 | return err; | |
2005 | } | |
2006 | ||
2007 | /* Called with conf_lock & bnad->bna_lock held */ | |
2008 | void | |
2009 | bnad_tx_coalescing_timeo_set(struct bnad *bnad) | |
2010 | { | |
2011 | struct bnad_tx_info *tx_info; | |
2012 | ||
2013 | tx_info = &bnad->tx_info[0]; | |
2014 | if (!tx_info->tx) | |
2015 | return; | |
2016 | ||
2017 | bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo); | |
2018 | } | |
2019 | ||
2020 | /* Called with conf_lock & bnad->bna_lock held */ | |
2021 | void | |
2022 | bnad_rx_coalescing_timeo_set(struct bnad *bnad) | |
2023 | { | |
2024 | struct bnad_rx_info *rx_info; | |
0120b99c | 2025 | int i; |
8b230ed8 RM |
2026 | |
2027 | for (i = 0; i < bnad->num_rx; i++) { | |
2028 | rx_info = &bnad->rx_info[i]; | |
2029 | if (!rx_info->rx) | |
2030 | continue; | |
2031 | bna_rx_coalescing_timeo_set(rx_info->rx, | |
2032 | bnad->rx_coalescing_timeo); | |
2033 | } | |
2034 | } | |
2035 | ||
2036 | /* | |
2037 | * Called with bnad->bna_lock held | |
2038 | */ | |
a2122d95 | 2039 | int |
8b230ed8 RM |
2040 | bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr) |
2041 | { | |
2042 | int ret; | |
2043 | ||
2044 | if (!is_valid_ether_addr(mac_addr)) | |
2045 | return -EADDRNOTAVAIL; | |
2046 | ||
2047 | /* If datapath is down, pretend everything went through */ | |
2048 | if (!bnad->rx_info[0].rx) | |
2049 | return 0; | |
2050 | ||
2051 | ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL); | |
2052 | if (ret != BNA_CB_SUCCESS) | |
2053 | return -EADDRNOTAVAIL; | |
2054 | ||
2055 | return 0; | |
2056 | } | |
2057 | ||
2058 | /* Should be called with conf_lock held */ | |
a2122d95 | 2059 | int |
8b230ed8 RM |
2060 | bnad_enable_default_bcast(struct bnad *bnad) |
2061 | { | |
2062 | struct bnad_rx_info *rx_info = &bnad->rx_info[0]; | |
2063 | int ret; | |
2064 | unsigned long flags; | |
2065 | ||
2066 | init_completion(&bnad->bnad_completions.mcast_comp); | |
2067 | ||
2068 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2069 | ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr, | |
2070 | bnad_cb_rx_mcast_add); | |
2071 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2072 | ||
2073 | if (ret == BNA_CB_SUCCESS) | |
2074 | wait_for_completion(&bnad->bnad_completions.mcast_comp); | |
2075 | else | |
2076 | return -ENODEV; | |
2077 | ||
2078 | if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS) | |
2079 | return -ENODEV; | |
2080 | ||
2081 | return 0; | |
2082 | } | |
2083 | ||
19dbff9f | 2084 | /* Called with mutex_lock(&bnad->conf_mutex) held */ |
a2122d95 | 2085 | void |
aad75b66 RM |
2086 | bnad_restore_vlans(struct bnad *bnad, u32 rx_id) |
2087 | { | |
f859d7cb | 2088 | u16 vid; |
aad75b66 RM |
2089 | unsigned long flags; |
2090 | ||
f859d7cb | 2091 | for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) { |
aad75b66 | 2092 | spin_lock_irqsave(&bnad->bna_lock, flags); |
f859d7cb | 2093 | bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid); |
aad75b66 RM |
2094 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2095 | } | |
2096 | } | |
2097 | ||
8b230ed8 RM |
2098 | /* Statistics utilities */ |
2099 | void | |
250e061e | 2100 | bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) |
8b230ed8 | 2101 | { |
8b230ed8 RM |
2102 | int i, j; |
2103 | ||
2104 | for (i = 0; i < bnad->num_rx; i++) { | |
2105 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
2106 | if (bnad->rx_info[i].rx_ctrl[j].ccb) { | |
250e061e | 2107 | stats->rx_packets += bnad->rx_info[i]. |
8b230ed8 | 2108 | rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; |
250e061e | 2109 | stats->rx_bytes += bnad->rx_info[i]. |
8b230ed8 RM |
2110 | rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; |
2111 | if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && | |
2112 | bnad->rx_info[i].rx_ctrl[j].ccb-> | |
2113 | rcb[1]->rxq) { | |
250e061e | 2114 | stats->rx_packets += |
8b230ed8 RM |
2115 | bnad->rx_info[i].rx_ctrl[j]. |
2116 | ccb->rcb[1]->rxq->rx_packets; | |
250e061e | 2117 | stats->rx_bytes += |
8b230ed8 RM |
2118 | bnad->rx_info[i].rx_ctrl[j]. |
2119 | ccb->rcb[1]->rxq->rx_bytes; | |
2120 | } | |
2121 | } | |
2122 | } | |
2123 | } | |
2124 | for (i = 0; i < bnad->num_tx; i++) { | |
2125 | for (j = 0; j < bnad->num_txq_per_tx; j++) { | |
2126 | if (bnad->tx_info[i].tcb[j]) { | |
250e061e | 2127 | stats->tx_packets += |
8b230ed8 | 2128 | bnad->tx_info[i].tcb[j]->txq->tx_packets; |
250e061e | 2129 | stats->tx_bytes += |
8b230ed8 RM |
2130 | bnad->tx_info[i].tcb[j]->txq->tx_bytes; |
2131 | } | |
2132 | } | |
2133 | } | |
2134 | } | |
2135 | ||
2136 | /* | |
2137 | * Must be called with the bna_lock held. | |
2138 | */ | |
2139 | void | |
250e061e | 2140 | bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) |
8b230ed8 | 2141 | { |
078086f3 RM |
2142 | struct bfi_enet_stats_mac *mac_stats; |
2143 | u32 bmap; | |
8b230ed8 RM |
2144 | int i; |
2145 | ||
078086f3 | 2146 | mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats; |
250e061e | 2147 | stats->rx_errors = |
8b230ed8 RM |
2148 | mac_stats->rx_fcs_error + mac_stats->rx_alignment_error + |
2149 | mac_stats->rx_frame_length_error + mac_stats->rx_code_error + | |
2150 | mac_stats->rx_undersize; | |
250e061e | 2151 | stats->tx_errors = mac_stats->tx_fcs_error + |
8b230ed8 | 2152 | mac_stats->tx_undersize; |
250e061e ED |
2153 | stats->rx_dropped = mac_stats->rx_drop; |
2154 | stats->tx_dropped = mac_stats->tx_drop; | |
2155 | stats->multicast = mac_stats->rx_multicast; | |
2156 | stats->collisions = mac_stats->tx_total_collision; | |
8b230ed8 | 2157 | |
250e061e | 2158 | stats->rx_length_errors = mac_stats->rx_frame_length_error; |
8b230ed8 RM |
2159 | |
2160 | /* receive ring buffer overflow ?? */ | |
2161 | ||
250e061e ED |
2162 | stats->rx_crc_errors = mac_stats->rx_fcs_error; |
2163 | stats->rx_frame_errors = mac_stats->rx_alignment_error; | |
8b230ed8 | 2164 | /* recv'r fifo overrun */ |
078086f3 RM |
2165 | bmap = bna_rx_rid_mask(&bnad->bna); |
2166 | for (i = 0; bmap; i++) { | |
8b230ed8 | 2167 | if (bmap & 1) { |
250e061e | 2168 | stats->rx_fifo_errors += |
8b230ed8 | 2169 | bnad->stats.bna_stats-> |
078086f3 | 2170 | hw_stats.rxf_stats[i].frame_drops; |
8b230ed8 RM |
2171 | break; |
2172 | } | |
2173 | bmap >>= 1; | |
2174 | } | |
2175 | } | |
2176 | ||
2177 | static void | |
2178 | bnad_mbox_irq_sync(struct bnad *bnad) | |
2179 | { | |
2180 | u32 irq; | |
2181 | unsigned long flags; | |
2182 | ||
2183 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2184 | if (bnad->cfg_flags & BNAD_CF_MSIX) | |
8811e267 | 2185 | irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; |
8b230ed8 RM |
2186 | else |
2187 | irq = bnad->pcidev->irq; | |
2188 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2189 | ||
2190 | synchronize_irq(irq); | |
2191 | } | |
2192 | ||
2193 | /* Utility used by bnad_start_xmit, for doing TSO */ | |
2194 | static int | |
2195 | bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) | |
2196 | { | |
2197 | int err; | |
2198 | ||
8b230ed8 RM |
2199 | if (skb_header_cloned(skb)) { |
2200 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | |
2201 | if (err) { | |
2202 | BNAD_UPDATE_CTR(bnad, tso_err); | |
2203 | return err; | |
2204 | } | |
2205 | } | |
2206 | ||
2207 | /* | |
2208 | * For TSO, the TCP checksum field is seeded with pseudo-header sum | |
2209 | * excluding the length field. | |
2210 | */ | |
2211 | if (skb->protocol == htons(ETH_P_IP)) { | |
2212 | struct iphdr *iph = ip_hdr(skb); | |
2213 | ||
2214 | /* Do we really need these? */ | |
2215 | iph->tot_len = 0; | |
2216 | iph->check = 0; | |
2217 | ||
2218 | tcp_hdr(skb)->check = | |
2219 | ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, | |
2220 | IPPROTO_TCP, 0); | |
2221 | BNAD_UPDATE_CTR(bnad, tso4); | |
2222 | } else { | |
2223 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | |
2224 | ||
8b230ed8 RM |
2225 | ipv6h->payload_len = 0; |
2226 | tcp_hdr(skb)->check = | |
2227 | ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0, | |
2228 | IPPROTO_TCP, 0); | |
2229 | BNAD_UPDATE_CTR(bnad, tso6); | |
2230 | } | |
2231 | ||
2232 | return 0; | |
2233 | } | |
2234 | ||
2235 | /* | |
2236 | * Initialize Q numbers depending on Rx Paths | |
2237 | * Called with bnad->bna_lock held, because of cfg_flags | |
2238 | * access. | |
2239 | */ | |
2240 | static void | |
2241 | bnad_q_num_init(struct bnad *bnad) | |
2242 | { | |
2243 | int rxps; | |
2244 | ||
2245 | rxps = min((uint)num_online_cpus(), | |
772b5235 | 2246 | (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX)); |
8b230ed8 RM |
2247 | |
2248 | if (!(bnad->cfg_flags & BNAD_CF_MSIX)) | |
2249 | rxps = 1; /* INTx */ | |
2250 | ||
2251 | bnad->num_rx = 1; | |
2252 | bnad->num_tx = 1; | |
2253 | bnad->num_rxp_per_rx = rxps; | |
2254 | bnad->num_txq_per_tx = BNAD_TXQ_NUM; | |
2255 | } | |
2256 | ||
2257 | /* | |
2258 | * Adjusts the Q numbers, given a number of msix vectors | |
2259 | * Give preference to RSS as opposed to Tx priority Queues, | |
2260 | * in such a case, just use 1 Tx Q | |
2261 | * Called with bnad->bna_lock held b'cos of cfg_flags access | |
2262 | */ | |
2263 | static void | |
078086f3 | 2264 | bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp) |
8b230ed8 RM |
2265 | { |
2266 | bnad->num_txq_per_tx = 1; | |
2267 | if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) + | |
2268 | bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) && | |
2269 | (bnad->cfg_flags & BNAD_CF_MSIX)) { | |
2270 | bnad->num_rxp_per_rx = msix_vectors - | |
2271 | (bnad->num_tx * bnad->num_txq_per_tx) - | |
2272 | BNAD_MAILBOX_MSIX_VECTORS; | |
2273 | } else | |
2274 | bnad->num_rxp_per_rx = 1; | |
2275 | } | |
2276 | ||
078086f3 RM |
2277 | /* Enable / disable ioceth */ |
2278 | static int | |
2279 | bnad_ioceth_disable(struct bnad *bnad) | |
8b230ed8 RM |
2280 | { |
2281 | unsigned long flags; | |
078086f3 | 2282 | int err = 0; |
8b230ed8 RM |
2283 | |
2284 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 RM |
2285 | init_completion(&bnad->bnad_completions.ioc_comp); |
2286 | bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP); | |
8b230ed8 RM |
2287 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2288 | ||
078086f3 RM |
2289 | wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, |
2290 | msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); | |
2291 | ||
2292 | err = bnad->bnad_completions.ioc_comp_status; | |
2293 | return err; | |
8b230ed8 RM |
2294 | } |
2295 | ||
2296 | static int | |
078086f3 | 2297 | bnad_ioceth_enable(struct bnad *bnad) |
8b230ed8 RM |
2298 | { |
2299 | int err = 0; | |
2300 | unsigned long flags; | |
2301 | ||
8b230ed8 | 2302 | spin_lock_irqsave(&bnad->bna_lock, flags); |
078086f3 RM |
2303 | init_completion(&bnad->bnad_completions.ioc_comp); |
2304 | bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING; | |
2305 | bna_ioceth_enable(&bnad->bna.ioceth); | |
8b230ed8 RM |
2306 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2307 | ||
078086f3 RM |
2308 | wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, |
2309 | msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); | |
8b230ed8 | 2310 | |
078086f3 | 2311 | err = bnad->bnad_completions.ioc_comp_status; |
8b230ed8 RM |
2312 | |
2313 | return err; | |
2314 | } | |
2315 | ||
2316 | /* Free BNA resources */ | |
2317 | static void | |
078086f3 RM |
2318 | bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info, |
2319 | u32 res_val_max) | |
8b230ed8 RM |
2320 | { |
2321 | int i; | |
8b230ed8 | 2322 | |
078086f3 RM |
2323 | for (i = 0; i < res_val_max; i++) |
2324 | bnad_mem_free(bnad, &res_info[i].res_u.mem_info); | |
8b230ed8 RM |
2325 | } |
2326 | ||
2327 | /* Allocates memory and interrupt resources for BNA */ | |
2328 | static int | |
078086f3 RM |
2329 | bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, |
2330 | u32 res_val_max) | |
8b230ed8 RM |
2331 | { |
2332 | int i, err; | |
8b230ed8 | 2333 | |
078086f3 RM |
2334 | for (i = 0; i < res_val_max; i++) { |
2335 | err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info); | |
8b230ed8 RM |
2336 | if (err) |
2337 | goto err_return; | |
2338 | } | |
2339 | return 0; | |
2340 | ||
2341 | err_return: | |
078086f3 | 2342 | bnad_res_free(bnad, res_info, res_val_max); |
8b230ed8 RM |
2343 | return err; |
2344 | } | |
2345 | ||
2346 | /* Interrupt enable / disable */ | |
2347 | static void | |
2348 | bnad_enable_msix(struct bnad *bnad) | |
2349 | { | |
2350 | int i, ret; | |
8b230ed8 RM |
2351 | unsigned long flags; |
2352 | ||
2353 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2354 | if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { | |
2355 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2356 | return; | |
2357 | } | |
2358 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2359 | ||
2360 | if (bnad->msix_table) | |
2361 | return; | |
2362 | ||
8b230ed8 | 2363 | bnad->msix_table = |
b7ee31c5 | 2364 | kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); |
8b230ed8 RM |
2365 | |
2366 | if (!bnad->msix_table) | |
2367 | goto intx_mode; | |
2368 | ||
b7ee31c5 | 2369 | for (i = 0; i < bnad->msix_num; i++) |
8b230ed8 RM |
2370 | bnad->msix_table[i].entry = i; |
2371 | ||
b7ee31c5 | 2372 | ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num); |
8b230ed8 RM |
2373 | if (ret > 0) { |
2374 | /* Not enough MSI-X vectors. */ | |
19dbff9f RM |
2375 | pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n", |
2376 | ret, bnad->msix_num); | |
8b230ed8 RM |
2377 | |
2378 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2379 | /* ret = #of vectors that we got */ | |
271e8b79 RM |
2380 | bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2, |
2381 | (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2); | |
8b230ed8 RM |
2382 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2383 | ||
271e8b79 | 2384 | bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + |
8b230ed8 | 2385 | BNAD_MAILBOX_MSIX_VECTORS; |
8b230ed8 | 2386 | |
078086f3 RM |
2387 | if (bnad->msix_num > ret) |
2388 | goto intx_mode; | |
2389 | ||
8b230ed8 RM |
2390 | /* Try once more with adjusted numbers */ |
2391 | /* If this fails, fall back to INTx */ | |
2392 | ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, | |
b7ee31c5 | 2393 | bnad->msix_num); |
8b230ed8 RM |
2394 | if (ret) |
2395 | goto intx_mode; | |
2396 | ||
2397 | } else if (ret < 0) | |
2398 | goto intx_mode; | |
078086f3 RM |
2399 | |
2400 | pci_intx(bnad->pcidev, 0); | |
2401 | ||
8b230ed8 RM |
2402 | return; |
2403 | ||
2404 | intx_mode: | |
19dbff9f | 2405 | pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n"); |
8b230ed8 RM |
2406 | |
2407 | kfree(bnad->msix_table); | |
2408 | bnad->msix_table = NULL; | |
2409 | bnad->msix_num = 0; | |
8b230ed8 RM |
2410 | spin_lock_irqsave(&bnad->bna_lock, flags); |
2411 | bnad->cfg_flags &= ~BNAD_CF_MSIX; | |
2412 | bnad_q_num_init(bnad); | |
2413 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2414 | } | |
2415 | ||
2416 | static void | |
2417 | bnad_disable_msix(struct bnad *bnad) | |
2418 | { | |
2419 | u32 cfg_flags; | |
2420 | unsigned long flags; | |
2421 | ||
2422 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2423 | cfg_flags = bnad->cfg_flags; | |
2424 | if (bnad->cfg_flags & BNAD_CF_MSIX) | |
2425 | bnad->cfg_flags &= ~BNAD_CF_MSIX; | |
2426 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2427 | ||
2428 | if (cfg_flags & BNAD_CF_MSIX) { | |
2429 | pci_disable_msix(bnad->pcidev); | |
2430 | kfree(bnad->msix_table); | |
2431 | bnad->msix_table = NULL; | |
2432 | } | |
2433 | } | |
2434 | ||
2435 | /* Netdev entry points */ | |
2436 | static int | |
2437 | bnad_open(struct net_device *netdev) | |
2438 | { | |
2439 | int err; | |
2440 | struct bnad *bnad = netdev_priv(netdev); | |
2441 | struct bna_pause_config pause_config; | |
2442 | int mtu; | |
2443 | unsigned long flags; | |
2444 | ||
2445 | mutex_lock(&bnad->conf_mutex); | |
2446 | ||
2447 | /* Tx */ | |
2448 | err = bnad_setup_tx(bnad, 0); | |
2449 | if (err) | |
2450 | goto err_return; | |
2451 | ||
2452 | /* Rx */ | |
2453 | err = bnad_setup_rx(bnad, 0); | |
2454 | if (err) | |
2455 | goto cleanup_tx; | |
2456 | ||
2457 | /* Port */ | |
2458 | pause_config.tx_pause = 0; | |
2459 | pause_config.rx_pause = 0; | |
2460 | ||
078086f3 | 2461 | mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN; |
8b230ed8 RM |
2462 | |
2463 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 RM |
2464 | bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL); |
2465 | bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); | |
2466 | bna_enet_enable(&bnad->bna.enet); | |
8b230ed8 RM |
2467 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2468 | ||
2469 | /* Enable broadcast */ | |
2470 | bnad_enable_default_bcast(bnad); | |
2471 | ||
aad75b66 RM |
2472 | /* Restore VLANs, if any */ |
2473 | bnad_restore_vlans(bnad, 0); | |
2474 | ||
8b230ed8 RM |
2475 | /* Set the UCAST address */ |
2476 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2477 | bnad_mac_addr_set_locked(bnad, netdev->dev_addr); | |
2478 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2479 | ||
2480 | /* Start the stats timer */ | |
2481 | bnad_stats_timer_start(bnad); | |
2482 | ||
2483 | mutex_unlock(&bnad->conf_mutex); | |
2484 | ||
2485 | return 0; | |
2486 | ||
2487 | cleanup_tx: | |
2488 | bnad_cleanup_tx(bnad, 0); | |
2489 | ||
2490 | err_return: | |
2491 | mutex_unlock(&bnad->conf_mutex); | |
2492 | return err; | |
2493 | } | |
2494 | ||
2495 | static int | |
2496 | bnad_stop(struct net_device *netdev) | |
2497 | { | |
2498 | struct bnad *bnad = netdev_priv(netdev); | |
2499 | unsigned long flags; | |
2500 | ||
2501 | mutex_lock(&bnad->conf_mutex); | |
2502 | ||
2503 | /* Stop the stats timer */ | |
2504 | bnad_stats_timer_stop(bnad); | |
2505 | ||
078086f3 | 2506 | init_completion(&bnad->bnad_completions.enet_comp); |
8b230ed8 RM |
2507 | |
2508 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 RM |
2509 | bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP, |
2510 | bnad_cb_enet_disabled); | |
8b230ed8 RM |
2511 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2512 | ||
078086f3 | 2513 | wait_for_completion(&bnad->bnad_completions.enet_comp); |
8b230ed8 RM |
2514 | |
2515 | bnad_cleanup_tx(bnad, 0); | |
2516 | bnad_cleanup_rx(bnad, 0); | |
2517 | ||
2518 | /* Synchronize mailbox IRQ */ | |
2519 | bnad_mbox_irq_sync(bnad); | |
2520 | ||
2521 | mutex_unlock(&bnad->conf_mutex); | |
2522 | ||
2523 | return 0; | |
2524 | } | |
2525 | ||
2526 | /* TX */ | |
2527 | /* | |
2528 | * bnad_start_xmit : Netdev entry point for Transmit | |
2529 | * Called under lock held by net_device | |
2530 | */ | |
2531 | static netdev_tx_t | |
2532 | bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |
2533 | { | |
2534 | struct bnad *bnad = netdev_priv(netdev); | |
078086f3 RM |
2535 | u32 txq_id = 0; |
2536 | struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id]; | |
8b230ed8 | 2537 | |
0120b99c RM |
2538 | u16 txq_prod, vlan_tag = 0; |
2539 | u32 unmap_prod, wis, wis_used, wi_range; | |
2540 | u32 vectors, vect_id, i, acked; | |
0120b99c | 2541 | int err; |
271e8b79 RM |
2542 | unsigned int len; |
2543 | u32 gso_size; | |
8b230ed8 | 2544 | |
078086f3 | 2545 | struct bnad_unmap_q *unmap_q = tcb->unmap_q; |
0120b99c | 2546 | dma_addr_t dma_addr; |
8b230ed8 | 2547 | struct bna_txq_entry *txqent; |
078086f3 | 2548 | u16 flags; |
8b230ed8 | 2549 | |
271e8b79 RM |
2550 | if (unlikely(skb->len <= ETH_HLEN)) { |
2551 | dev_kfree_skb(skb); | |
2552 | BNAD_UPDATE_CTR(bnad, tx_skb_too_short); | |
2553 | return NETDEV_TX_OK; | |
2554 | } | |
2555 | if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) { | |
8b230ed8 | 2556 | dev_kfree_skb(skb); |
271e8b79 RM |
2557 | BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long); |
2558 | return NETDEV_TX_OK; | |
2559 | } | |
2560 | if (unlikely(skb_headlen(skb) == 0)) { | |
2561 | dev_kfree_skb(skb); | |
2562 | BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); | |
8b230ed8 RM |
2563 | return NETDEV_TX_OK; |
2564 | } | |
2565 | ||
2566 | /* | |
2567 | * Takes care of the Tx that is scheduled between clearing the flag | |
19dbff9f | 2568 | * and the netif_tx_stop_all_queues() call. |
8b230ed8 | 2569 | */ |
be7fa326 | 2570 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { |
8b230ed8 | 2571 | dev_kfree_skb(skb); |
271e8b79 | 2572 | BNAD_UPDATE_CTR(bnad, tx_skb_stopping); |
8b230ed8 RM |
2573 | return NETDEV_TX_OK; |
2574 | } | |
2575 | ||
8b230ed8 | 2576 | vectors = 1 + skb_shinfo(skb)->nr_frags; |
271e8b79 | 2577 | if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { |
8b230ed8 | 2578 | dev_kfree_skb(skb); |
271e8b79 | 2579 | BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); |
8b230ed8 RM |
2580 | return NETDEV_TX_OK; |
2581 | } | |
2582 | wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ | |
2583 | acked = 0; | |
078086f3 RM |
2584 | if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) || |
2585 | vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) { | |
8b230ed8 RM |
2586 | if ((u16) (*tcb->hw_consumer_index) != |
2587 | tcb->consumer_index && | |
2588 | !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { | |
2589 | acked = bnad_free_txbufs(bnad, tcb); | |
be7fa326 RM |
2590 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) |
2591 | bna_ib_ack(tcb->i_dbell, acked); | |
8b230ed8 RM |
2592 | smp_mb__before_clear_bit(); |
2593 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | |
2594 | } else { | |
2595 | netif_stop_queue(netdev); | |
2596 | BNAD_UPDATE_CTR(bnad, netif_queue_stop); | |
2597 | } | |
2598 | ||
2599 | smp_mb(); | |
2600 | /* | |
2601 | * Check again to deal with race condition between | |
2602 | * netif_stop_queue here, and netif_wake_queue in | |
2603 | * interrupt handler which is not inside netif tx lock. | |
2604 | */ | |
2605 | if (likely | |
2606 | (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) || | |
2607 | vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) { | |
2608 | BNAD_UPDATE_CTR(bnad, netif_queue_stop); | |
2609 | return NETDEV_TX_BUSY; | |
2610 | } else { | |
2611 | netif_wake_queue(netdev); | |
2612 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | |
2613 | } | |
2614 | } | |
2615 | ||
2616 | unmap_prod = unmap_q->producer_index; | |
8b230ed8 RM |
2617 | flags = 0; |
2618 | ||
2619 | txq_prod = tcb->producer_index; | |
2620 | BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range); | |
8b230ed8 RM |
2621 | txqent->hdr.wi.reserved = 0; |
2622 | txqent->hdr.wi.num_vectors = vectors; | |
8b230ed8 | 2623 | |
eab6d18d | 2624 | if (vlan_tx_tag_present(skb)) { |
8b230ed8 RM |
2625 | vlan_tag = (u16) vlan_tx_tag_get(skb); |
2626 | flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); | |
2627 | } | |
2628 | if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { | |
2629 | vlan_tag = | |
2630 | (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff); | |
2631 | flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); | |
2632 | } | |
2633 | ||
2634 | txqent->hdr.wi.vlan_tag = htons(vlan_tag); | |
2635 | ||
2636 | if (skb_is_gso(skb)) { | |
271e8b79 RM |
2637 | gso_size = skb_shinfo(skb)->gso_size; |
2638 | ||
2639 | if (unlikely(gso_size > netdev->mtu)) { | |
2640 | dev_kfree_skb(skb); | |
2641 | BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long); | |
2642 | return NETDEV_TX_OK; | |
2643 | } | |
2644 | if (unlikely((gso_size + skb_transport_offset(skb) + | |
2645 | tcp_hdrlen(skb)) >= skb->len)) { | |
2646 | txqent->hdr.wi.opcode = | |
2647 | __constant_htons(BNA_TXQ_WI_SEND); | |
2648 | txqent->hdr.wi.lso_mss = 0; | |
2649 | BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); | |
2650 | } else { | |
2651 | txqent->hdr.wi.opcode = | |
2652 | __constant_htons(BNA_TXQ_WI_SEND_LSO); | |
2653 | txqent->hdr.wi.lso_mss = htons(gso_size); | |
2654 | } | |
2655 | ||
8b230ed8 | 2656 | err = bnad_tso_prepare(bnad, skb); |
271e8b79 | 2657 | if (unlikely(err)) { |
8b230ed8 | 2658 | dev_kfree_skb(skb); |
271e8b79 | 2659 | BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare); |
8b230ed8 RM |
2660 | return NETDEV_TX_OK; |
2661 | } | |
8b230ed8 RM |
2662 | flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM); |
2663 | txqent->hdr.wi.l4_hdr_size_n_offset = | |
2664 | htons(BNA_TXQ_WI_L4_HDR_N_OFFSET | |
2665 | (tcp_hdrlen(skb) >> 2, | |
2666 | skb_transport_offset(skb))); | |
271e8b79 RM |
2667 | } else { |
2668 | txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND); | |
8b230ed8 RM |
2669 | txqent->hdr.wi.lso_mss = 0; |
2670 | ||
271e8b79 RM |
2671 | if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) { |
2672 | dev_kfree_skb(skb); | |
2673 | BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); | |
2674 | return NETDEV_TX_OK; | |
8b230ed8 | 2675 | } |
8b230ed8 | 2676 | |
271e8b79 RM |
2677 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2678 | u8 proto = 0; | |
8b230ed8 | 2679 | |
271e8b79 RM |
2680 | if (skb->protocol == __constant_htons(ETH_P_IP)) |
2681 | proto = ip_hdr(skb)->protocol; | |
2682 | else if (skb->protocol == | |
2683 | __constant_htons(ETH_P_IPV6)) { | |
2684 | /* nexthdr may not be TCP immediately. */ | |
2685 | proto = ipv6_hdr(skb)->nexthdr; | |
2686 | } | |
2687 | if (proto == IPPROTO_TCP) { | |
2688 | flags |= BNA_TXQ_WI_CF_TCP_CKSUM; | |
2689 | txqent->hdr.wi.l4_hdr_size_n_offset = | |
2690 | htons(BNA_TXQ_WI_L4_HDR_N_OFFSET | |
2691 | (0, skb_transport_offset(skb))); | |
2692 | ||
2693 | BNAD_UPDATE_CTR(bnad, tcpcsum_offload); | |
2694 | ||
2695 | if (unlikely(skb_headlen(skb) < | |
2696 | skb_transport_offset(skb) + tcp_hdrlen(skb))) { | |
2697 | dev_kfree_skb(skb); | |
2698 | BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr); | |
2699 | return NETDEV_TX_OK; | |
2700 | } | |
8b230ed8 | 2701 | |
271e8b79 RM |
2702 | } else if (proto == IPPROTO_UDP) { |
2703 | flags |= BNA_TXQ_WI_CF_UDP_CKSUM; | |
2704 | txqent->hdr.wi.l4_hdr_size_n_offset = | |
2705 | htons(BNA_TXQ_WI_L4_HDR_N_OFFSET | |
2706 | (0, skb_transport_offset(skb))); | |
2707 | ||
2708 | BNAD_UPDATE_CTR(bnad, udpcsum_offload); | |
2709 | if (unlikely(skb_headlen(skb) < | |
2710 | skb_transport_offset(skb) + | |
2711 | sizeof(struct udphdr))) { | |
2712 | dev_kfree_skb(skb); | |
2713 | BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr); | |
2714 | return NETDEV_TX_OK; | |
2715 | } | |
2716 | } else { | |
8b230ed8 | 2717 | dev_kfree_skb(skb); |
271e8b79 | 2718 | BNAD_UPDATE_CTR(bnad, tx_skb_csum_err); |
8b230ed8 RM |
2719 | return NETDEV_TX_OK; |
2720 | } | |
271e8b79 RM |
2721 | } else { |
2722 | txqent->hdr.wi.l4_hdr_size_n_offset = 0; | |
8b230ed8 | 2723 | } |
8b230ed8 RM |
2724 | } |
2725 | ||
2726 | txqent->hdr.wi.flags = htons(flags); | |
2727 | ||
2728 | txqent->hdr.wi.frame_length = htonl(skb->len); | |
2729 | ||
2730 | unmap_q->unmap_array[unmap_prod].skb = skb; | |
271e8b79 RM |
2731 | len = skb_headlen(skb); |
2732 | txqent->vector[0].length = htons(len); | |
5ea74318 IV |
2733 | dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, |
2734 | skb_headlen(skb), DMA_TO_DEVICE); | |
2735 | dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, | |
8b230ed8 RM |
2736 | dma_addr); |
2737 | ||
271e8b79 | 2738 | BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); |
8b230ed8 RM |
2739 | BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); |
2740 | ||
271e8b79 RM |
2741 | vect_id = 0; |
2742 | wis_used = 1; | |
2743 | ||
8b230ed8 | 2744 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
9e903e08 ED |
2745 | const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; |
2746 | u16 size = skb_frag_size(frag); | |
8b230ed8 | 2747 | |
271e8b79 RM |
2748 | if (unlikely(size == 0)) { |
2749 | unmap_prod = unmap_q->producer_index; | |
2750 | ||
2751 | unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev, | |
2752 | unmap_q->unmap_array, | |
2753 | unmap_prod, unmap_q->q_depth, skb, | |
2754 | i); | |
2755 | dev_kfree_skb(skb); | |
2756 | BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); | |
2757 | return NETDEV_TX_OK; | |
2758 | } | |
2759 | ||
2760 | len += size; | |
2761 | ||
8b230ed8 RM |
2762 | if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) { |
2763 | vect_id = 0; | |
2764 | if (--wi_range) | |
2765 | txqent++; | |
2766 | else { | |
2767 | BNA_QE_INDX_ADD(txq_prod, wis_used, | |
2768 | tcb->q_depth); | |
2769 | wis_used = 0; | |
2770 | BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, | |
2771 | txqent, wi_range); | |
8b230ed8 RM |
2772 | } |
2773 | wis_used++; | |
271e8b79 RM |
2774 | txqent->hdr.wi_ext.opcode = |
2775 | __constant_htons(BNA_TXQ_WI_EXTENSION); | |
8b230ed8 RM |
2776 | } |
2777 | ||
2778 | BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); | |
2779 | txqent->vector[vect_id].length = htons(size); | |
4d5b1a67 IC |
2780 | dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, |
2781 | 0, size, DMA_TO_DEVICE); | |
5ea74318 | 2782 | dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, |
8b230ed8 RM |
2783 | dma_addr); |
2784 | BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); | |
2785 | BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); | |
2786 | } | |
2787 | ||
271e8b79 RM |
2788 | if (unlikely(len != skb->len)) { |
2789 | unmap_prod = unmap_q->producer_index; | |
2790 | ||
2791 | unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev, | |
2792 | unmap_q->unmap_array, unmap_prod, | |
2793 | unmap_q->q_depth, skb, | |
2794 | skb_shinfo(skb)->nr_frags); | |
2795 | dev_kfree_skb(skb); | |
2796 | BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); | |
2797 | return NETDEV_TX_OK; | |
2798 | } | |
2799 | ||
8b230ed8 RM |
2800 | unmap_q->producer_index = unmap_prod; |
2801 | BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth); | |
2802 | tcb->producer_index = txq_prod; | |
2803 | ||
2804 | smp_mb(); | |
be7fa326 RM |
2805 | |
2806 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | |
2807 | return NETDEV_TX_OK; | |
2808 | ||
8b230ed8 | 2809 | bna_txq_prod_indx_doorbell(tcb); |
271e8b79 | 2810 | smp_mb(); |
8b230ed8 RM |
2811 | |
2812 | if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) | |
2813 | tasklet_schedule(&bnad->tx_free_tasklet); | |
2814 | ||
2815 | return NETDEV_TX_OK; | |
2816 | } | |
2817 | ||
2818 | /* | |
2819 | * Used spin_lock to synchronize reading of stats structures, which | |
2820 | * is written by BNA under the same lock. | |
2821 | */ | |
250e061e ED |
2822 | static struct rtnl_link_stats64 * |
2823 | bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) | |
8b230ed8 RM |
2824 | { |
2825 | struct bnad *bnad = netdev_priv(netdev); | |
2826 | unsigned long flags; | |
2827 | ||
2828 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2829 | ||
250e061e ED |
2830 | bnad_netdev_qstats_fill(bnad, stats); |
2831 | bnad_netdev_hwstats_fill(bnad, stats); | |
8b230ed8 RM |
2832 | |
2833 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2834 | ||
250e061e | 2835 | return stats; |
8b230ed8 RM |
2836 | } |
2837 | ||
a2122d95 | 2838 | void |
8b230ed8 RM |
2839 | bnad_set_rx_mode(struct net_device *netdev) |
2840 | { | |
2841 | struct bnad *bnad = netdev_priv(netdev); | |
2842 | u32 new_mask, valid_mask; | |
2843 | unsigned long flags; | |
2844 | ||
2845 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2846 | ||
2847 | new_mask = valid_mask = 0; | |
2848 | ||
2849 | if (netdev->flags & IFF_PROMISC) { | |
2850 | if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) { | |
2851 | new_mask = BNAD_RXMODE_PROMISC_DEFAULT; | |
2852 | valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; | |
2853 | bnad->cfg_flags |= BNAD_CF_PROMISC; | |
2854 | } | |
2855 | } else { | |
2856 | if (bnad->cfg_flags & BNAD_CF_PROMISC) { | |
2857 | new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT; | |
2858 | valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; | |
2859 | bnad->cfg_flags &= ~BNAD_CF_PROMISC; | |
2860 | } | |
2861 | } | |
2862 | ||
2863 | if (netdev->flags & IFF_ALLMULTI) { | |
2864 | if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) { | |
2865 | new_mask |= BNA_RXMODE_ALLMULTI; | |
2866 | valid_mask |= BNA_RXMODE_ALLMULTI; | |
2867 | bnad->cfg_flags |= BNAD_CF_ALLMULTI; | |
2868 | } | |
2869 | } else { | |
2870 | if (bnad->cfg_flags & BNAD_CF_ALLMULTI) { | |
2871 | new_mask &= ~BNA_RXMODE_ALLMULTI; | |
2872 | valid_mask |= BNA_RXMODE_ALLMULTI; | |
2873 | bnad->cfg_flags &= ~BNAD_CF_ALLMULTI; | |
2874 | } | |
2875 | } | |
2876 | ||
271e8b79 RM |
2877 | if (bnad->rx_info[0].rx == NULL) |
2878 | goto unlock; | |
2879 | ||
8b230ed8 RM |
2880 | bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL); |
2881 | ||
2882 | if (!netdev_mc_empty(netdev)) { | |
2883 | u8 *mcaddr_list; | |
2884 | int mc_count = netdev_mc_count(netdev); | |
2885 | ||
2886 | /* Index 0 holds the broadcast address */ | |
2887 | mcaddr_list = | |
2888 | kzalloc((mc_count + 1) * ETH_ALEN, | |
2889 | GFP_ATOMIC); | |
2890 | if (!mcaddr_list) | |
ca1cef3a | 2891 | goto unlock; |
8b230ed8 RM |
2892 | |
2893 | memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN); | |
2894 | ||
2895 | /* Copy rest of the MC addresses */ | |
2896 | bnad_netdev_mc_list_get(netdev, mcaddr_list); | |
2897 | ||
2898 | bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, | |
2899 | mcaddr_list, NULL); | |
2900 | ||
2901 | /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */ | |
2902 | kfree(mcaddr_list); | |
2903 | } | |
ca1cef3a | 2904 | unlock: |
8b230ed8 RM |
2905 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2906 | } | |
2907 | ||
2908 | /* | |
2909 | * bna_lock is used to sync writes to netdev->addr | |
2910 | * conf_lock cannot be used since this call may be made | |
2911 | * in a non-blocking context. | |
2912 | */ | |
2913 | static int | |
2914 | bnad_set_mac_address(struct net_device *netdev, void *mac_addr) | |
2915 | { | |
2916 | int err; | |
2917 | struct bnad *bnad = netdev_priv(netdev); | |
2918 | struct sockaddr *sa = (struct sockaddr *)mac_addr; | |
2919 | unsigned long flags; | |
2920 | ||
2921 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2922 | ||
2923 | err = bnad_mac_addr_set_locked(bnad, sa->sa_data); | |
2924 | ||
2925 | if (!err) | |
2926 | memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len); | |
2927 | ||
2928 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2929 | ||
2930 | return err; | |
2931 | } | |
2932 | ||
2933 | static int | |
078086f3 | 2934 | bnad_mtu_set(struct bnad *bnad, int mtu) |
8b230ed8 | 2935 | { |
8b230ed8 RM |
2936 | unsigned long flags; |
2937 | ||
078086f3 RM |
2938 | init_completion(&bnad->bnad_completions.mtu_comp); |
2939 | ||
2940 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2941 | bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set); | |
2942 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2943 | ||
2944 | wait_for_completion(&bnad->bnad_completions.mtu_comp); | |
2945 | ||
2946 | return bnad->bnad_completions.mtu_comp_status; | |
2947 | } | |
2948 | ||
2949 | static int | |
2950 | bnad_change_mtu(struct net_device *netdev, int new_mtu) | |
2951 | { | |
2952 | int err, mtu = netdev->mtu; | |
8b230ed8 RM |
2953 | struct bnad *bnad = netdev_priv(netdev); |
2954 | ||
2955 | if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) | |
2956 | return -EINVAL; | |
2957 | ||
2958 | mutex_lock(&bnad->conf_mutex); | |
2959 | ||
2960 | netdev->mtu = new_mtu; | |
2961 | ||
078086f3 RM |
2962 | mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN; |
2963 | err = bnad_mtu_set(bnad, mtu); | |
2964 | if (err) | |
2965 | err = -EBUSY; | |
8b230ed8 RM |
2966 | |
2967 | mutex_unlock(&bnad->conf_mutex); | |
2968 | return err; | |
2969 | } | |
2970 | ||
8b230ed8 RM |
2971 | static void |
2972 | bnad_vlan_rx_add_vid(struct net_device *netdev, | |
2973 | unsigned short vid) | |
2974 | { | |
2975 | struct bnad *bnad = netdev_priv(netdev); | |
2976 | unsigned long flags; | |
2977 | ||
2978 | if (!bnad->rx_info[0].rx) | |
2979 | return; | |
2980 | ||
2981 | mutex_lock(&bnad->conf_mutex); | |
2982 | ||
2983 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2984 | bna_rx_vlan_add(bnad->rx_info[0].rx, vid); | |
f859d7cb | 2985 | set_bit(vid, bnad->active_vlans); |
8b230ed8 RM |
2986 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2987 | ||
2988 | mutex_unlock(&bnad->conf_mutex); | |
2989 | } | |
2990 | ||
2991 | static void | |
2992 | bnad_vlan_rx_kill_vid(struct net_device *netdev, | |
2993 | unsigned short vid) | |
2994 | { | |
2995 | struct bnad *bnad = netdev_priv(netdev); | |
2996 | unsigned long flags; | |
2997 | ||
2998 | if (!bnad->rx_info[0].rx) | |
2999 | return; | |
3000 | ||
3001 | mutex_lock(&bnad->conf_mutex); | |
3002 | ||
3003 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
f859d7cb | 3004 | clear_bit(vid, bnad->active_vlans); |
8b230ed8 RM |
3005 | bna_rx_vlan_del(bnad->rx_info[0].rx, vid); |
3006 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3007 | ||
3008 | mutex_unlock(&bnad->conf_mutex); | |
3009 | } | |
3010 | ||
3011 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
3012 | static void | |
3013 | bnad_netpoll(struct net_device *netdev) | |
3014 | { | |
3015 | struct bnad *bnad = netdev_priv(netdev); | |
3016 | struct bnad_rx_info *rx_info; | |
3017 | struct bnad_rx_ctrl *rx_ctrl; | |
3018 | u32 curr_mask; | |
3019 | int i, j; | |
3020 | ||
3021 | if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { | |
3022 | bna_intx_disable(&bnad->bna, curr_mask); | |
3023 | bnad_isr(bnad->pcidev->irq, netdev); | |
3024 | bna_intx_enable(&bnad->bna, curr_mask); | |
3025 | } else { | |
19dbff9f RM |
3026 | /* |
3027 | * Tx processing may happen in sending context, so no need | |
3028 | * to explicitly process completions here | |
3029 | */ | |
3030 | ||
3031 | /* Rx processing */ | |
8b230ed8 RM |
3032 | for (i = 0; i < bnad->num_rx; i++) { |
3033 | rx_info = &bnad->rx_info[i]; | |
3034 | if (!rx_info->rx) | |
3035 | continue; | |
3036 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
3037 | rx_ctrl = &rx_info->rx_ctrl[j]; | |
271e8b79 | 3038 | if (rx_ctrl->ccb) |
8b230ed8 RM |
3039 | bnad_netif_rx_schedule_poll(bnad, |
3040 | rx_ctrl->ccb); | |
8b230ed8 RM |
3041 | } |
3042 | } | |
3043 | } | |
3044 | } | |
3045 | #endif | |
3046 | ||
3047 | static const struct net_device_ops bnad_netdev_ops = { | |
3048 | .ndo_open = bnad_open, | |
3049 | .ndo_stop = bnad_stop, | |
3050 | .ndo_start_xmit = bnad_start_xmit, | |
250e061e | 3051 | .ndo_get_stats64 = bnad_get_stats64, |
8b230ed8 | 3052 | .ndo_set_rx_mode = bnad_set_rx_mode, |
8b230ed8 RM |
3053 | .ndo_validate_addr = eth_validate_addr, |
3054 | .ndo_set_mac_address = bnad_set_mac_address, | |
3055 | .ndo_change_mtu = bnad_change_mtu, | |
8b230ed8 RM |
3056 | .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, |
3057 | .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, | |
3058 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
3059 | .ndo_poll_controller = bnad_netpoll | |
3060 | #endif | |
3061 | }; | |
3062 | ||
3063 | static void | |
3064 | bnad_netdev_init(struct bnad *bnad, bool using_dac) | |
3065 | { | |
3066 | struct net_device *netdev = bnad->netdev; | |
3067 | ||
e5ee20e7 MM |
3068 | netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | |
3069 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
3070 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX; | |
8b230ed8 | 3071 | |
e5ee20e7 MM |
3072 | netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | |
3073 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
3074 | NETIF_F_TSO | NETIF_F_TSO6; | |
8b230ed8 | 3075 | |
e5ee20e7 MM |
3076 | netdev->features |= netdev->hw_features | |
3077 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; | |
8b230ed8 RM |
3078 | |
3079 | if (using_dac) | |
3080 | netdev->features |= NETIF_F_HIGHDMA; | |
3081 | ||
8b230ed8 RM |
3082 | netdev->mem_start = bnad->mmio_start; |
3083 | netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; | |
3084 | ||
3085 | netdev->netdev_ops = &bnad_netdev_ops; | |
3086 | bnad_set_ethtool_ops(netdev); | |
3087 | } | |
3088 | ||
3089 | /* | |
3090 | * 1. Initialize the bnad structure | |
3091 | * 2. Setup netdev pointer in pci_dev | |
3092 | * 3. Initialze Tx free tasklet | |
3093 | * 4. Initialize no. of TxQ & CQs & MSIX vectors | |
3094 | */ | |
3095 | static int | |
3096 | bnad_init(struct bnad *bnad, | |
3097 | struct pci_dev *pdev, struct net_device *netdev) | |
3098 | { | |
3099 | unsigned long flags; | |
3100 | ||
3101 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
3102 | pci_set_drvdata(pdev, netdev); | |
3103 | ||
3104 | bnad->netdev = netdev; | |
3105 | bnad->pcidev = pdev; | |
3106 | bnad->mmio_start = pci_resource_start(pdev, 0); | |
3107 | bnad->mmio_len = pci_resource_len(pdev, 0); | |
3108 | bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); | |
3109 | if (!bnad->bar0) { | |
3110 | dev_err(&pdev->dev, "ioremap for bar0 failed\n"); | |
3111 | pci_set_drvdata(pdev, NULL); | |
3112 | return -ENOMEM; | |
3113 | } | |
3114 | pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, | |
3115 | (unsigned long long) bnad->mmio_len); | |
3116 | ||
3117 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3118 | if (!bnad_msix_disable) | |
3119 | bnad->cfg_flags = BNAD_CF_MSIX; | |
3120 | ||
3121 | bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; | |
3122 | ||
3123 | bnad_q_num_init(bnad); | |
3124 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3125 | ||
3126 | bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + | |
3127 | (bnad->num_rx * bnad->num_rxp_per_rx) + | |
3128 | BNAD_MAILBOX_MSIX_VECTORS; | |
8b230ed8 RM |
3129 | |
3130 | bnad->txq_depth = BNAD_TXQ_DEPTH; | |
3131 | bnad->rxq_depth = BNAD_RXQ_DEPTH; | |
8b230ed8 RM |
3132 | |
3133 | bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; | |
3134 | bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; | |
3135 | ||
3136 | tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet, | |
3137 | (unsigned long)bnad); | |
3138 | ||
3139 | return 0; | |
3140 | } | |
3141 | ||
3142 | /* | |
3143 | * Must be called after bnad_pci_uninit() | |
3144 | * so that iounmap() and pci_set_drvdata(NULL) | |
3145 | * happens only after PCI uninitialization. | |
3146 | */ | |
3147 | static void | |
3148 | bnad_uninit(struct bnad *bnad) | |
3149 | { | |
3150 | if (bnad->bar0) | |
3151 | iounmap(bnad->bar0); | |
3152 | pci_set_drvdata(bnad->pcidev, NULL); | |
3153 | } | |
3154 | ||
3155 | /* | |
3156 | * Initialize locks | |
078086f3 | 3157 | a) Per ioceth mutes used for serializing configuration |
8b230ed8 RM |
3158 | changes from OS interface |
3159 | b) spin lock used to protect bna state machine | |
3160 | */ | |
3161 | static void | |
3162 | bnad_lock_init(struct bnad *bnad) | |
3163 | { | |
3164 | spin_lock_init(&bnad->bna_lock); | |
3165 | mutex_init(&bnad->conf_mutex); | |
3166 | } | |
3167 | ||
3168 | static void | |
3169 | bnad_lock_uninit(struct bnad *bnad) | |
3170 | { | |
3171 | mutex_destroy(&bnad->conf_mutex); | |
3172 | } | |
3173 | ||
3174 | /* PCI Initialization */ | |
3175 | static int | |
3176 | bnad_pci_init(struct bnad *bnad, | |
3177 | struct pci_dev *pdev, bool *using_dac) | |
3178 | { | |
3179 | int err; | |
3180 | ||
3181 | err = pci_enable_device(pdev); | |
3182 | if (err) | |
3183 | return err; | |
3184 | err = pci_request_regions(pdev, BNAD_NAME); | |
3185 | if (err) | |
3186 | goto disable_device; | |
5ea74318 IV |
3187 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && |
3188 | !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { | |
8b230ed8 RM |
3189 | *using_dac = 1; |
3190 | } else { | |
5ea74318 | 3191 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
8b230ed8 | 3192 | if (err) { |
5ea74318 IV |
3193 | err = dma_set_coherent_mask(&pdev->dev, |
3194 | DMA_BIT_MASK(32)); | |
8b230ed8 RM |
3195 | if (err) |
3196 | goto release_regions; | |
3197 | } | |
3198 | *using_dac = 0; | |
3199 | } | |
3200 | pci_set_master(pdev); | |
3201 | return 0; | |
3202 | ||
3203 | release_regions: | |
3204 | pci_release_regions(pdev); | |
3205 | disable_device: | |
3206 | pci_disable_device(pdev); | |
3207 | ||
3208 | return err; | |
3209 | } | |
3210 | ||
3211 | static void | |
3212 | bnad_pci_uninit(struct pci_dev *pdev) | |
3213 | { | |
3214 | pci_release_regions(pdev); | |
3215 | pci_disable_device(pdev); | |
3216 | } | |
3217 | ||
3218 | static int __devinit | |
3219 | bnad_pci_probe(struct pci_dev *pdev, | |
3220 | const struct pci_device_id *pcidev_id) | |
3221 | { | |
3caa1e95 | 3222 | bool using_dac; |
0120b99c | 3223 | int err; |
8b230ed8 RM |
3224 | struct bnad *bnad; |
3225 | struct bna *bna; | |
3226 | struct net_device *netdev; | |
3227 | struct bfa_pcidev pcidev_info; | |
3228 | unsigned long flags; | |
3229 | ||
3230 | pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n", | |
3231 | pdev, pcidev_id, PCI_FUNC(pdev->devfn)); | |
3232 | ||
3233 | mutex_lock(&bnad_fwimg_mutex); | |
3234 | if (!cna_get_firmware_buf(pdev)) { | |
3235 | mutex_unlock(&bnad_fwimg_mutex); | |
3236 | pr_warn("Failed to load Firmware Image!\n"); | |
3237 | return -ENODEV; | |
3238 | } | |
3239 | mutex_unlock(&bnad_fwimg_mutex); | |
3240 | ||
3241 | /* | |
3242 | * Allocates sizeof(struct net_device + struct bnad) | |
3243 | * bnad = netdev->priv | |
3244 | */ | |
3245 | netdev = alloc_etherdev(sizeof(struct bnad)); | |
3246 | if (!netdev) { | |
078086f3 | 3247 | dev_err(&pdev->dev, "netdev allocation failed\n"); |
8b230ed8 RM |
3248 | err = -ENOMEM; |
3249 | return err; | |
3250 | } | |
3251 | bnad = netdev_priv(netdev); | |
3252 | ||
078086f3 RM |
3253 | bnad_lock_init(bnad); |
3254 | ||
3255 | mutex_lock(&bnad->conf_mutex); | |
8b230ed8 RM |
3256 | /* |
3257 | * PCI initialization | |
0120b99c | 3258 | * Output : using_dac = 1 for 64 bit DMA |
be7fa326 | 3259 | * = 0 for 32 bit DMA |
8b230ed8 RM |
3260 | */ |
3261 | err = bnad_pci_init(bnad, pdev, &using_dac); | |
3262 | if (err) | |
44861f44 | 3263 | goto unlock_mutex; |
8b230ed8 | 3264 | |
8b230ed8 RM |
3265 | /* |
3266 | * Initialize bnad structure | |
3267 | * Setup relation between pci_dev & netdev | |
3268 | * Init Tx free tasklet | |
3269 | */ | |
3270 | err = bnad_init(bnad, pdev, netdev); | |
3271 | if (err) | |
3272 | goto pci_uninit; | |
078086f3 | 3273 | |
8b230ed8 RM |
3274 | /* Initialize netdev structure, set up ethtool ops */ |
3275 | bnad_netdev_init(bnad, using_dac); | |
3276 | ||
815f41e7 RM |
3277 | /* Set link to down state */ |
3278 | netif_carrier_off(netdev); | |
3279 | ||
8b230ed8 | 3280 | /* Get resource requirement form bna */ |
078086f3 | 3281 | spin_lock_irqsave(&bnad->bna_lock, flags); |
8b230ed8 | 3282 | bna_res_req(&bnad->res_info[0]); |
078086f3 | 3283 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 RM |
3284 | |
3285 | /* Allocate resources from bna */ | |
078086f3 | 3286 | err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX); |
8b230ed8 | 3287 | if (err) |
078086f3 | 3288 | goto drv_uninit; |
8b230ed8 RM |
3289 | |
3290 | bna = &bnad->bna; | |
3291 | ||
3292 | /* Setup pcidev_info for bna_init() */ | |
3293 | pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn); | |
3294 | pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn); | |
3295 | pcidev_info.device_id = bnad->pcidev->device; | |
3296 | pcidev_info.pci_bar_kva = bnad->bar0; | |
3297 | ||
8b230ed8 RM |
3298 | spin_lock_irqsave(&bnad->bna_lock, flags); |
3299 | bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); | |
8b230ed8 RM |
3300 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3301 | ||
3302 | bnad->stats.bna_stats = &bna->stats; | |
3303 | ||
078086f3 RM |
3304 | bnad_enable_msix(bnad); |
3305 | err = bnad_mbox_irq_alloc(bnad); | |
3306 | if (err) | |
3307 | goto res_free; | |
3308 | ||
3309 | ||
8b230ed8 | 3310 | /* Set up timers */ |
078086f3 | 3311 | setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, |
8b230ed8 | 3312 | ((unsigned long)bnad)); |
078086f3 | 3313 | setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, |
8b230ed8 | 3314 | ((unsigned long)bnad)); |
078086f3 | 3315 | setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, |
1d32f769 | 3316 | ((unsigned long)bnad)); |
078086f3 | 3317 | setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, |
8b230ed8 RM |
3318 | ((unsigned long)bnad)); |
3319 | ||
3320 | /* Now start the timer before calling IOC */ | |
078086f3 | 3321 | mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer, |
8b230ed8 RM |
3322 | jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); |
3323 | ||
3324 | /* | |
3325 | * Start the chip | |
078086f3 RM |
3326 | * If the call back comes with error, we bail out. |
3327 | * This is a catastrophic error. | |
8b230ed8 | 3328 | */ |
078086f3 RM |
3329 | err = bnad_ioceth_enable(bnad); |
3330 | if (err) { | |
3331 | pr_err("BNA: Initialization failed err=%d\n", | |
3332 | err); | |
3333 | goto probe_success; | |
3334 | } | |
3335 | ||
3336 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3337 | if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || | |
3338 | bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) { | |
3339 | bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1, | |
3340 | bna_attr(bna)->num_rxp - 1); | |
3341 | if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || | |
3342 | bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) | |
3343 | err = -EIO; | |
3344 | } | |
3caa1e95 RM |
3345 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3346 | if (err) | |
3347 | goto disable_ioceth; | |
3348 | ||
3349 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 RM |
3350 | bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]); |
3351 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3352 | ||
3353 | err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); | |
0caa9aae RM |
3354 | if (err) { |
3355 | err = -EIO; | |
078086f3 | 3356 | goto disable_ioceth; |
0caa9aae | 3357 | } |
078086f3 RM |
3358 | |
3359 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3360 | bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]); | |
3361 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
8b230ed8 RM |
3362 | |
3363 | /* Get the burnt-in mac */ | |
3364 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 3365 | bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr); |
8b230ed8 RM |
3366 | bnad_set_netdev_perm_addr(bnad); |
3367 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3368 | ||
0caa9aae RM |
3369 | mutex_unlock(&bnad->conf_mutex); |
3370 | ||
8b230ed8 RM |
3371 | /* Finally, reguister with net_device layer */ |
3372 | err = register_netdev(netdev); | |
3373 | if (err) { | |
3374 | pr_err("BNA : Registering with netdev failed\n"); | |
078086f3 | 3375 | goto probe_uninit; |
8b230ed8 | 3376 | } |
078086f3 | 3377 | set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags); |
8b230ed8 | 3378 | |
0caa9aae RM |
3379 | return 0; |
3380 | ||
078086f3 RM |
3381 | probe_success: |
3382 | mutex_unlock(&bnad->conf_mutex); | |
8b230ed8 RM |
3383 | return 0; |
3384 | ||
078086f3 | 3385 | probe_uninit: |
3fc72370 | 3386 | mutex_lock(&bnad->conf_mutex); |
078086f3 RM |
3387 | bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); |
3388 | disable_ioceth: | |
3389 | bnad_ioceth_disable(bnad); | |
3390 | del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); | |
3391 | del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); | |
3392 | del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); | |
8b230ed8 RM |
3393 | spin_lock_irqsave(&bnad->bna_lock, flags); |
3394 | bna_uninit(bna); | |
3395 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
078086f3 | 3396 | bnad_mbox_irq_free(bnad); |
8b230ed8 | 3397 | bnad_disable_msix(bnad); |
078086f3 RM |
3398 | res_free: |
3399 | bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); | |
3400 | drv_uninit: | |
3401 | bnad_uninit(bnad); | |
8b230ed8 RM |
3402 | pci_uninit: |
3403 | bnad_pci_uninit(pdev); | |
44861f44 | 3404 | unlock_mutex: |
078086f3 | 3405 | mutex_unlock(&bnad->conf_mutex); |
8b230ed8 | 3406 | bnad_lock_uninit(bnad); |
8b230ed8 RM |
3407 | free_netdev(netdev); |
3408 | return err; | |
3409 | } | |
3410 | ||
3411 | static void __devexit | |
3412 | bnad_pci_remove(struct pci_dev *pdev) | |
3413 | { | |
3414 | struct net_device *netdev = pci_get_drvdata(pdev); | |
3415 | struct bnad *bnad; | |
3416 | struct bna *bna; | |
3417 | unsigned long flags; | |
3418 | ||
3419 | if (!netdev) | |
3420 | return; | |
3421 | ||
3422 | pr_info("%s bnad_pci_remove\n", netdev->name); | |
3423 | bnad = netdev_priv(netdev); | |
3424 | bna = &bnad->bna; | |
3425 | ||
078086f3 RM |
3426 | if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags)) |
3427 | unregister_netdev(netdev); | |
8b230ed8 RM |
3428 | |
3429 | mutex_lock(&bnad->conf_mutex); | |
078086f3 RM |
3430 | bnad_ioceth_disable(bnad); |
3431 | del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); | |
3432 | del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); | |
3433 | del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); | |
8b230ed8 RM |
3434 | spin_lock_irqsave(&bnad->bna_lock, flags); |
3435 | bna_uninit(bna); | |
3436 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
8b230ed8 | 3437 | |
078086f3 RM |
3438 | bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); |
3439 | bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); | |
3440 | bnad_mbox_irq_free(bnad); | |
8b230ed8 RM |
3441 | bnad_disable_msix(bnad); |
3442 | bnad_pci_uninit(pdev); | |
078086f3 | 3443 | mutex_unlock(&bnad->conf_mutex); |
8b230ed8 RM |
3444 | bnad_lock_uninit(bnad); |
3445 | bnad_uninit(bnad); | |
3446 | free_netdev(netdev); | |
3447 | } | |
3448 | ||
0120b99c | 3449 | static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = { |
8b230ed8 RM |
3450 | { |
3451 | PCI_DEVICE(PCI_VENDOR_ID_BROCADE, | |
3452 | PCI_DEVICE_ID_BROCADE_CT), | |
3453 | .class = PCI_CLASS_NETWORK_ETHERNET << 8, | |
3454 | .class_mask = 0xffff00 | |
586b2816 RM |
3455 | }, |
3456 | { | |
3457 | PCI_DEVICE(PCI_VENDOR_ID_BROCADE, | |
3458 | BFA_PCI_DEVICE_ID_CT2), | |
3459 | .class = PCI_CLASS_NETWORK_ETHERNET << 8, | |
3460 | .class_mask = 0xffff00 | |
3461 | }, | |
3462 | {0, }, | |
8b230ed8 RM |
3463 | }; |
3464 | ||
3465 | MODULE_DEVICE_TABLE(pci, bnad_pci_id_table); | |
3466 | ||
3467 | static struct pci_driver bnad_pci_driver = { | |
3468 | .name = BNAD_NAME, | |
3469 | .id_table = bnad_pci_id_table, | |
3470 | .probe = bnad_pci_probe, | |
3471 | .remove = __devexit_p(bnad_pci_remove), | |
3472 | }; | |
3473 | ||
3474 | static int __init | |
3475 | bnad_module_init(void) | |
3476 | { | |
3477 | int err; | |
3478 | ||
5aad0011 RM |
3479 | pr_info("Brocade 10G Ethernet driver - version: %s\n", |
3480 | BNAD_VERSION); | |
8b230ed8 | 3481 | |
8a891429 | 3482 | bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); |
8b230ed8 RM |
3483 | |
3484 | err = pci_register_driver(&bnad_pci_driver); | |
3485 | if (err < 0) { | |
3486 | pr_err("bna : PCI registration failed in module init " | |
3487 | "(%d)\n", err); | |
3488 | return err; | |
3489 | } | |
3490 | ||
3491 | return 0; | |
3492 | } | |
3493 | ||
3494 | static void __exit | |
3495 | bnad_module_exit(void) | |
3496 | { | |
3497 | pci_unregister_driver(&bnad_pci_driver); | |
3498 | ||
3499 | if (bfi_fw) | |
3500 | release_firmware(bfi_fw); | |
3501 | } | |
3502 | ||
3503 | module_init(bnad_module_init); | |
3504 | module_exit(bnad_module_exit); | |
3505 | ||
3506 | MODULE_AUTHOR("Brocade"); | |
3507 | MODULE_LICENSE("GPL"); | |
3508 | MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver"); | |
3509 | MODULE_VERSION(BNAD_VERSION); | |
3510 | MODULE_FIRMWARE(CNA_FW_FILE_CT); | |
1bf9fd70 | 3511 | MODULE_FIRMWARE(CNA_FW_FILE_CT2); |