]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/bnx2x/bnx2x_cmn.c
bnx2x, cnic: Fix SPQ return credit
[mirror_ubuntu-bionic-kernel.git] / drivers / net / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18
19#include <linux/etherdevice.h>
20#include <linux/ip.h>
f2e0899f 21#include <net/ipv6.h>
7f3e01fe 22#include <net/ip6_checksum.h>
6891dd25 23#include <linux/firmware.h>
9f6c9258
DK
24#include "bnx2x_cmn.h"
25
26#ifdef BCM_VLAN
27#include <linux/if_vlan.h>
28#endif
29
523224a3
DK
30#include "bnx2x_init.h"
31
9f6c9258
DK
32
33/* free skb in the packet ring at pos idx
34 * return idx of last bd freed
35 */
36static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
37 u16 idx)
38{
39 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
40 struct eth_tx_start_bd *tx_start_bd;
41 struct eth_tx_bd *tx_data_bd;
42 struct sk_buff *skb = tx_buf->skb;
43 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
44 int nbd;
45
46 /* prefetch skb end pointer to speedup dev_kfree_skb() */
47 prefetch(&skb->end);
48
49 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
50 idx, tx_buf, skb);
51
52 /* unmap first bd */
53 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
54 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
55 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 56 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
57
58 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
59#ifdef BNX2X_STOP_ON_ERROR
60 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
61 BNX2X_ERR("BAD nbd!\n");
62 bnx2x_panic();
63 }
64#endif
65 new_cons = nbd + tx_buf->first_bd;
66
67 /* Get the next bd */
68 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
69
70 /* Skip a parse bd... */
71 --nbd;
72 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
73
74 /* ...and the TSO split header bd since they have no mapping */
75 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
76 --nbd;
77 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
78 }
79
80 /* now free frags */
81 while (nbd > 0) {
82
83 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
84 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
85 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
86 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
87 if (--nbd)
88 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
89 }
90
91 /* release skb */
92 WARN_ON(!skb);
93 dev_kfree_skb(skb);
94 tx_buf->first_bd = 0;
95 tx_buf->skb = NULL;
96
97 return new_cons;
98}
99
100int bnx2x_tx_int(struct bnx2x_fastpath *fp)
101{
102 struct bnx2x *bp = fp->bp;
103 struct netdev_queue *txq;
104 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
105
106#ifdef BNX2X_STOP_ON_ERROR
107 if (unlikely(bp->panic))
108 return -1;
109#endif
110
111 txq = netdev_get_tx_queue(bp->dev, fp->index);
112 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
113 sw_cons = fp->tx_pkt_cons;
114
115 while (sw_cons != hw_cons) {
116 u16 pkt_cons;
117
118 pkt_cons = TX_BD(sw_cons);
119
f2e0899f
DK
120 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
121 " pkt_cons %u\n",
122 fp->index, hw_cons, sw_cons, pkt_cons);
9f6c9258 123
9f6c9258
DK
124 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
125 sw_cons++;
126 }
127
128 fp->tx_pkt_cons = sw_cons;
129 fp->tx_bd_cons = bd_cons;
130
131 /* Need to make the tx_bd_cons update visible to start_xmit()
132 * before checking for netif_tx_queue_stopped(). Without the
133 * memory barrier, there is a small possibility that
134 * start_xmit() will miss it and cause the queue to be stopped
135 * forever.
136 */
137 smp_mb();
138
139 /* TBD need a thresh? */
140 if (unlikely(netif_tx_queue_stopped(txq))) {
141 /* Taking tx_lock() is needed to prevent reenabling the queue
142 * while it's empty. This could have happen if rx_action() gets
143 * suspended in bnx2x_tx_int() after the condition before
144 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
145 *
146 * stops the queue->sees fresh tx_bd_cons->releases the queue->
147 * sends some packets consuming the whole queue again->
148 * stops the queue
149 */
150
151 __netif_tx_lock(txq, smp_processor_id());
152
153 if ((netif_tx_queue_stopped(txq)) &&
154 (bp->state == BNX2X_STATE_OPEN) &&
155 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
156 netif_tx_wake_queue(txq);
157
158 __netif_tx_unlock(txq);
159 }
160 return 0;
161}
162
163static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
164 u16 idx)
165{
166 u16 last_max = fp->last_max_sge;
167
168 if (SUB_S16(idx, last_max) > 0)
169 fp->last_max_sge = idx;
170}
171
172static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
173 struct eth_fast_path_rx_cqe *fp_cqe)
174{
175 struct bnx2x *bp = fp->bp;
176 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
177 le16_to_cpu(fp_cqe->len_on_bd)) >>
178 SGE_PAGE_SHIFT;
179 u16 last_max, last_elem, first_elem;
180 u16 delta = 0;
181 u16 i;
182
183 if (!sge_len)
184 return;
185
186 /* First mark all used pages */
187 for (i = 0; i < sge_len; i++)
523224a3
DK
188 SGE_MASK_CLEAR_BIT(fp,
189 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
190
191 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
523224a3 192 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
193
194 /* Here we assume that the last SGE index is the biggest */
195 prefetch((void *)(fp->sge_mask));
523224a3
DK
196 bnx2x_update_last_max_sge(fp,
197 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
198
199 last_max = RX_SGE(fp->last_max_sge);
200 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
201 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
202
203 /* If ring is not full */
204 if (last_elem + 1 != first_elem)
205 last_elem++;
206
207 /* Now update the prod */
208 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
209 if (likely(fp->sge_mask[i]))
210 break;
211
212 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
213 delta += RX_SGE_MASK_ELEM_SZ;
214 }
215
216 if (delta > 0) {
217 fp->rx_sge_prod += delta;
218 /* clear page-end entries */
219 bnx2x_clear_sge_mask_next_elems(fp);
220 }
221
222 DP(NETIF_MSG_RX_STATUS,
223 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
224 fp->last_max_sge, fp->rx_sge_prod);
225}
226
227static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
228 struct sk_buff *skb, u16 cons, u16 prod)
229{
230 struct bnx2x *bp = fp->bp;
231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
234 dma_addr_t mapping;
235
236 /* move empty skb from pool to prod and map it */
237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
238 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
239 bp->rx_buf_size, DMA_FROM_DEVICE);
240 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
241
242 /* move partial skb from cons to pool (don't unmap yet) */
243 fp->tpa_pool[queue] = *cons_rx_buf;
244
245 /* mark bin state as start - print error if current state != stop */
246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
248
249 fp->tpa_state[queue] = BNX2X_TPA_START;
250
251 /* point prod_bd to new skb */
252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
254
255#ifdef BNX2X_STOP_ON_ERROR
256 fp->tpa_queue_used |= (1 << queue);
257#ifdef _ASM_GENERIC_INT_L64_H
258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
259#else
260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
261#endif
262 fp->tpa_queue_used);
263#endif
264}
265
266static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
267 struct sk_buff *skb,
268 struct eth_fast_path_rx_cqe *fp_cqe,
269 u16 cqe_idx)
270{
271 struct sw_rx_page *rx_pg, old_rx_pg;
272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
273 u32 i, frag_len, frag_size, pages;
274 int err;
275 int j;
276
277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
279
280 /* This is needed in order to enable forwarding support */
281 if (frag_size)
282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
283 max(frag_size, (u32)len_on_bd));
284
285#ifdef BNX2X_STOP_ON_ERROR
286 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
287 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
288 pages, cqe_idx);
289 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
290 fp_cqe->pkt_len, len_on_bd);
291 bnx2x_panic();
292 return -EINVAL;
293 }
294#endif
295
296 /* Run through the SGL and compose the fragmented skb */
297 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
523224a3
DK
298 u16 sge_idx =
299 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
300
301 /* FW gives the indices of the SGE as if the ring is an array
302 (meaning that "next" element will consume 2 indices) */
303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
304 rx_pg = &fp->rx_page_ring[sge_idx];
305 old_rx_pg = *rx_pg;
306
307 /* If we fail to allocate a substitute page, we simply stop
308 where we are and drop the whole packet */
309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
310 if (unlikely(err)) {
311 fp->eth_q_stats.rx_skb_alloc_failed++;
312 return err;
313 }
314
315 /* Unmap the page as we r going to pass it to the stack */
316 dma_unmap_page(&bp->pdev->dev,
317 dma_unmap_addr(&old_rx_pg, mapping),
318 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
319
320 /* Add one frag and update the appropriate fields in the skb */
321 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
322
323 skb->data_len += frag_len;
324 skb->truesize += frag_len;
325 skb->len += frag_len;
326
327 frag_size -= frag_len;
328 }
329
330 return 0;
331}
332
333static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
334 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
335 u16 cqe_idx)
336{
337 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
338 struct sk_buff *skb = rx_buf->skb;
339 /* alloc new skb */
340 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
341
342 /* Unmap skb in the pool anyway, as we are going to change
343 pool entry status to BNX2X_TPA_STOP even if new skb allocation
344 fails. */
345 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
346 bp->rx_buf_size, DMA_FROM_DEVICE);
347
348 if (likely(new_skb)) {
349 /* fix ip xsum and give it to the stack */
350 /* (no need to map the new skb) */
351#ifdef BCM_VLAN
352 int is_vlan_cqe =
353 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
354 PARSING_FLAGS_VLAN);
355 int is_not_hwaccel_vlan_cqe =
356 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
357#endif
358
359 prefetch(skb);
217de5aa 360 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
361
362#ifdef BNX2X_STOP_ON_ERROR
363 if (pad + len > bp->rx_buf_size) {
364 BNX2X_ERR("skb_put is about to fail... "
365 "pad %d len %d rx_buf_size %d\n",
366 pad, len, bp->rx_buf_size);
367 bnx2x_panic();
368 return;
369 }
370#endif
371
372 skb_reserve(skb, pad);
373 skb_put(skb, len);
374
375 skb->protocol = eth_type_trans(skb, bp->dev);
376 skb->ip_summed = CHECKSUM_UNNECESSARY;
377
378 {
379 struct iphdr *iph;
380
381 iph = (struct iphdr *)skb->data;
382#ifdef BCM_VLAN
383 /* If there is no Rx VLAN offloading -
384 take VLAN tag into an account */
385 if (unlikely(is_not_hwaccel_vlan_cqe))
386 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
387#endif
388 iph->check = 0;
389 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
390 }
391
392 if (!bnx2x_fill_frag_skb(bp, fp, skb,
393 &cqe->fast_path_cqe, cqe_idx)) {
394#ifdef BCM_VLAN
523224a3
DK
395 if ((bp->vlgrp != NULL) &&
396 (le16_to_cpu(cqe->fast_path_cqe.
397 pars_flags.flags) & PARSING_FLAGS_VLAN))
9f6c9258
DK
398 vlan_gro_receive(&fp->napi, bp->vlgrp,
399 le16_to_cpu(cqe->fast_path_cqe.
400 vlan_tag), skb);
401 else
402#endif
403 napi_gro_receive(&fp->napi, skb);
404 } else {
405 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
406 " - dropping packet!\n");
407 dev_kfree_skb(skb);
408 }
409
410
411 /* put new skb in bin */
412 fp->tpa_pool[queue].skb = new_skb;
413
414 } else {
415 /* else drop the packet and keep the buffer in the bin */
416 DP(NETIF_MSG_RX_STATUS,
417 "Failed to allocate new skb - dropping packet!\n");
418 fp->eth_q_stats.rx_skb_alloc_failed++;
419 }
420
421 fp->tpa_state[queue] = BNX2X_TPA_STOP;
422}
423
424/* Set Toeplitz hash value in the skb using the value from the
425 * CQE (calculated by HW).
426 */
427static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
428 struct sk_buff *skb)
429{
430 /* Set Toeplitz hash from CQE */
431 if ((bp->dev->features & NETIF_F_RXHASH) &&
432 (cqe->fast_path_cqe.status_flags &
433 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
434 skb->rxhash =
435 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
436}
437
438int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
439{
440 struct bnx2x *bp = fp->bp;
441 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
442 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
443 int rx_pkt = 0;
444
445#ifdef BNX2X_STOP_ON_ERROR
446 if (unlikely(bp->panic))
447 return 0;
448#endif
449
450 /* CQ "next element" is of the size of the regular element,
451 that's why it's ok here */
452 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
453 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
454 hw_comp_cons++;
455
456 bd_cons = fp->rx_bd_cons;
457 bd_prod = fp->rx_bd_prod;
458 bd_prod_fw = bd_prod;
459 sw_comp_cons = fp->rx_comp_cons;
460 sw_comp_prod = fp->rx_comp_prod;
461
462 /* Memory barrier necessary as speculative reads of the rx
463 * buffer can be ahead of the index in the status block
464 */
465 rmb();
466
467 DP(NETIF_MSG_RX_STATUS,
468 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
469 fp->index, hw_comp_cons, sw_comp_cons);
470
471 while (sw_comp_cons != hw_comp_cons) {
472 struct sw_rx_bd *rx_buf = NULL;
473 struct sk_buff *skb;
474 union eth_rx_cqe *cqe;
475 u8 cqe_fp_flags;
476 u16 len, pad;
477
478 comp_ring_cons = RCQ_BD(sw_comp_cons);
479 bd_prod = RX_BD(bd_prod);
480 bd_cons = RX_BD(bd_cons);
481
482 /* Prefetch the page containing the BD descriptor
483 at producer's index. It will be needed when new skb is
484 allocated */
485 prefetch((void *)(PAGE_ALIGN((unsigned long)
486 (&fp->rx_desc_ring[bd_prod])) -
487 PAGE_SIZE + 1));
488
489 cqe = &fp->rx_comp_ring[comp_ring_cons];
490 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
491
492 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
493 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
494 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
495 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
496 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
497 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
498
499 /* is this a slowpath msg? */
500 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
501 bnx2x_sp_event(fp, cqe);
502 goto next_cqe;
503
504 /* this is an rx packet */
505 } else {
506 rx_buf = &fp->rx_buf_ring[bd_cons];
507 skb = rx_buf->skb;
508 prefetch(skb);
509 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
510 pad = cqe->fast_path_cqe.placement_offset;
511
512 /* If CQE is marked both TPA_START and TPA_END
513 it is a non-TPA CQE */
514 if ((!fp->disable_tpa) &&
515 (TPA_TYPE(cqe_fp_flags) !=
516 (TPA_TYPE_START | TPA_TYPE_END))) {
517 u16 queue = cqe->fast_path_cqe.queue_index;
518
519 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
520 DP(NETIF_MSG_RX_STATUS,
521 "calling tpa_start on queue %d\n",
522 queue);
523
524 bnx2x_tpa_start(fp, queue, skb,
525 bd_cons, bd_prod);
526
527 /* Set Toeplitz hash for an LRO skb */
528 bnx2x_set_skb_rxhash(bp, cqe, skb);
529
530 goto next_rx;
531 }
532
533 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
534 DP(NETIF_MSG_RX_STATUS,
535 "calling tpa_stop on queue %d\n",
536 queue);
537
538 if (!BNX2X_RX_SUM_FIX(cqe))
539 BNX2X_ERR("STOP on none TCP "
540 "data\n");
541
542 /* This is a size of the linear data
543 on this skb */
544 len = le16_to_cpu(cqe->fast_path_cqe.
545 len_on_bd);
546 bnx2x_tpa_stop(bp, fp, queue, pad,
547 len, cqe, comp_ring_cons);
548#ifdef BNX2X_STOP_ON_ERROR
549 if (bp->panic)
550 return 0;
551#endif
552
553 bnx2x_update_sge_prod(fp,
554 &cqe->fast_path_cqe);
555 goto next_cqe;
556 }
557 }
558
559 dma_sync_single_for_device(&bp->pdev->dev,
560 dma_unmap_addr(rx_buf, mapping),
561 pad + RX_COPY_THRESH,
562 DMA_FROM_DEVICE);
217de5aa 563 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
564
565 /* is this an error packet? */
566 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
567 DP(NETIF_MSG_RX_ERR,
568 "ERROR flags %x rx packet %u\n",
569 cqe_fp_flags, sw_comp_cons);
570 fp->eth_q_stats.rx_err_discard_pkt++;
571 goto reuse_rx;
572 }
573
574 /* Since we don't have a jumbo ring
575 * copy small packets if mtu > 1500
576 */
577 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
578 (len <= RX_COPY_THRESH)) {
579 struct sk_buff *new_skb;
580
581 new_skb = netdev_alloc_skb(bp->dev,
582 len + pad);
583 if (new_skb == NULL) {
584 DP(NETIF_MSG_RX_ERR,
585 "ERROR packet dropped "
586 "because of alloc failure\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++;
588 goto reuse_rx;
589 }
590
591 /* aligned copy */
592 skb_copy_from_linear_data_offset(skb, pad,
593 new_skb->data + pad, len);
594 skb_reserve(new_skb, pad);
595 skb_put(new_skb, len);
596
749a8503 597 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
598
599 skb = new_skb;
600
601 } else
602 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
603 dma_unmap_single(&bp->pdev->dev,
604 dma_unmap_addr(rx_buf, mapping),
605 bp->rx_buf_size,
606 DMA_FROM_DEVICE);
607 skb_reserve(skb, pad);
608 skb_put(skb, len);
609
610 } else {
611 DP(NETIF_MSG_RX_ERR,
612 "ERROR packet dropped because "
613 "of alloc failure\n");
614 fp->eth_q_stats.rx_skb_alloc_failed++;
615reuse_rx:
749a8503 616 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
617 goto next_rx;
618 }
619
620 skb->protocol = eth_type_trans(skb, bp->dev);
621
622 /* Set Toeplitz hash for a none-LRO skb */
623 bnx2x_set_skb_rxhash(bp, cqe, skb);
624
bc8acf2c 625 skb_checksum_none_assert(skb);
9f6c9258
DK
626 if (bp->rx_csum) {
627 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY;
629 else
630 fp->eth_q_stats.hw_csum_err++;
631 }
632 }
633
634 skb_record_rx_queue(skb, fp->index);
635
636#ifdef BCM_VLAN
637 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
638 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
639 PARSING_FLAGS_VLAN))
640 vlan_gro_receive(&fp->napi, bp->vlgrp,
641 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
642 else
643#endif
644 napi_gro_receive(&fp->napi, skb);
645
646
647next_rx:
648 rx_buf->skb = NULL;
649
650 bd_cons = NEXT_RX_IDX(bd_cons);
651 bd_prod = NEXT_RX_IDX(bd_prod);
652 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
653 rx_pkt++;
654next_cqe:
655 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
656 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
657
658 if (rx_pkt == budget)
659 break;
660 } /* while */
661
662 fp->rx_bd_cons = bd_cons;
663 fp->rx_bd_prod = bd_prod_fw;
664 fp->rx_comp_cons = sw_comp_cons;
665 fp->rx_comp_prod = sw_comp_prod;
666
667 /* Update producers */
668 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
669 fp->rx_sge_prod);
670
671 fp->rx_pkt += rx_pkt;
672 fp->rx_calls++;
673
674 return rx_pkt;
675}
676
677static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
678{
679 struct bnx2x_fastpath *fp = fp_cookie;
680 struct bnx2x *bp = fp->bp;
681
682 /* Return here if interrupt is disabled */
683 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
684 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
685 return IRQ_HANDLED;
686 }
687
523224a3
DK
688 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
689 "[fp %d fw_sd %d igusb %d]\n",
690 fp->index, fp->fw_sb_id, fp->igu_sb_id);
691 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
692
693#ifdef BNX2X_STOP_ON_ERROR
694 if (unlikely(bp->panic))
695 return IRQ_HANDLED;
696#endif
697
698 /* Handle Rx and Tx according to MSI-X vector */
699 prefetch(fp->rx_cons_sb);
700 prefetch(fp->tx_cons_sb);
523224a3 701 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
702 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
703
704 return IRQ_HANDLED;
705}
706
707
708/* HW Lock for shared dual port PHYs */
709void bnx2x_acquire_phy_lock(struct bnx2x *bp)
710{
711 mutex_lock(&bp->port.phy_mutex);
712
713 if (bp->port.need_hw_lock)
714 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
715}
716
717void bnx2x_release_phy_lock(struct bnx2x *bp)
718{
719 if (bp->port.need_hw_lock)
720 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
721
722 mutex_unlock(&bp->port.phy_mutex);
723}
724
725void bnx2x_link_report(struct bnx2x *bp)
726{
727 if (bp->flags & MF_FUNC_DIS) {
728 netif_carrier_off(bp->dev);
729 netdev_err(bp->dev, "NIC Link is Down\n");
730 return;
731 }
732
733 if (bp->link_vars.link_up) {
734 u16 line_speed;
735
736 if (bp->state == BNX2X_STATE_OPEN)
737 netif_carrier_on(bp->dev);
738 netdev_info(bp->dev, "NIC Link is Up, ");
739
740 line_speed = bp->link_vars.line_speed;
fb3bff17 741 if (IS_MF(bp)) {
9f6c9258
DK
742 u16 vn_max_rate;
743
744 vn_max_rate =
f2e0899f
DK
745 ((bp->mf_config[BP_VN(bp)] &
746 FUNC_MF_CFG_MAX_BW_MASK) >>
747 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9f6c9258
DK
748 if (vn_max_rate < line_speed)
749 line_speed = vn_max_rate;
750 }
751 pr_cont("%d Mbps ", line_speed);
752
753 if (bp->link_vars.duplex == DUPLEX_FULL)
754 pr_cont("full duplex");
755 else
756 pr_cont("half duplex");
757
758 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
759 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
760 pr_cont(", receive ");
761 if (bp->link_vars.flow_ctrl &
762 BNX2X_FLOW_CTRL_TX)
763 pr_cont("& transmit ");
764 } else {
765 pr_cont(", transmit ");
766 }
767 pr_cont("flow control ON");
768 }
769 pr_cont("\n");
770
771 } else { /* link_down */
772 netif_carrier_off(bp->dev);
773 netdev_err(bp->dev, "NIC Link is Down\n");
774 }
775}
776
523224a3
DK
777/* Returns the number of actually allocated BDs */
778static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
779 int rx_ring_size)
780{
781 struct bnx2x *bp = fp->bp;
782 u16 ring_prod, cqe_ring_prod;
783 int i;
784
785 fp->rx_comp_cons = 0;
786 cqe_ring_prod = ring_prod = 0;
787 for (i = 0; i < rx_ring_size; i++) {
788 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
789 BNX2X_ERR("was only able to allocate "
790 "%d rx skbs on queue[%d]\n", i, fp->index);
791 fp->eth_q_stats.rx_skb_alloc_failed++;
792 break;
793 }
794 ring_prod = NEXT_RX_IDX(ring_prod);
795 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
796 WARN_ON(ring_prod <= i);
797 }
798
799 fp->rx_bd_prod = ring_prod;
800 /* Limit the CQE producer by the CQE ring size */
801 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
802 cqe_ring_prod);
803 fp->rx_pkt = fp->rx_calls = 0;
804
805 return i;
806}
807
808static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
809{
810 struct bnx2x *bp = fp->bp;
811 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
812 MAX_RX_AVAIL/bp->num_queues;
813
814 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
815
816 bnx2x_alloc_rx_bds(fp, rx_ring_size);
817
818 /* Warning!
819 * this will generate an interrupt (to the TSTORM)
820 * must only be done after chip is initialized
821 */
822 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
823 fp->rx_sge_prod);
824}
825
9f6c9258
DK
826void bnx2x_init_rx_rings(struct bnx2x *bp)
827{
828 int func = BP_FUNC(bp);
829 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
830 ETH_MAX_AGGREGATION_QUEUES_E1H;
523224a3 831 u16 ring_prod;
9f6c9258 832 int i, j;
25141580 833
523224a3
DK
834 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
835 BNX2X_FW_IP_HDR_ALIGN_PAD;
9f6c9258 836
9f6c9258
DK
837 DP(NETIF_MSG_IFUP,
838 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
839
523224a3
DK
840 for_each_queue(bp, j) {
841 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 842
523224a3 843 if (!fp->disable_tpa) {
9f6c9258
DK
844 for (i = 0; i < max_agg_queues; i++) {
845 fp->tpa_pool[i].skb =
846 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
847 if (!fp->tpa_pool[i].skb) {
848 BNX2X_ERR("Failed to allocate TPA "
849 "skb pool for queue[%d] - "
850 "disabling TPA on this "
851 "queue!\n", j);
852 bnx2x_free_tpa_pool(bp, fp, i);
853 fp->disable_tpa = 1;
854 break;
855 }
856 dma_unmap_addr_set((struct sw_rx_bd *)
857 &bp->fp->tpa_pool[i],
858 mapping, 0);
859 fp->tpa_state[i] = BNX2X_TPA_STOP;
860 }
523224a3
DK
861
862 /* "next page" elements initialization */
863 bnx2x_set_next_page_sgl(fp);
864
865 /* set SGEs bit mask */
866 bnx2x_init_sge_ring_bit_mask(fp);
867
868 /* Allocate SGEs and initialize the ring elements */
869 for (i = 0, ring_prod = 0;
870 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
871
872 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
873 BNX2X_ERR("was only able to allocate "
874 "%d rx sges\n", i);
875 BNX2X_ERR("disabling TPA for"
876 " queue[%d]\n", j);
877 /* Cleanup already allocated elements */
878 bnx2x_free_rx_sge_range(bp,
879 fp, ring_prod);
880 bnx2x_free_tpa_pool(bp,
881 fp, max_agg_queues);
882 fp->disable_tpa = 1;
883 ring_prod = 0;
884 break;
885 }
886 ring_prod = NEXT_SGE_IDX(ring_prod);
887 }
888
889 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
890 }
891 }
892
893 for_each_queue(bp, j) {
894 struct bnx2x_fastpath *fp = &bp->fp[j];
895
896 fp->rx_bd_cons = 0;
9f6c9258 897
523224a3 898 bnx2x_set_next_page_rx_bd(fp);
9f6c9258
DK
899
900 /* CQ ring */
523224a3 901 bnx2x_set_next_page_rx_cq(fp);
9f6c9258
DK
902
903 /* Allocate BDs and initialize BD ring */
523224a3 904 bnx2x_alloc_rx_bd_ring(fp);
9f6c9258 905
9f6c9258
DK
906 if (j != 0)
907 continue;
908
f2e0899f
DK
909 if (!CHIP_IS_E2(bp)) {
910 REG_WR(bp, BAR_USTRORM_INTMEM +
911 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
912 U64_LO(fp->rx_comp_mapping));
913 REG_WR(bp, BAR_USTRORM_INTMEM +
914 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
915 U64_HI(fp->rx_comp_mapping));
916 }
9f6c9258
DK
917 }
918}
919static void bnx2x_free_tx_skbs(struct bnx2x *bp)
920{
921 int i;
922
923 for_each_queue(bp, i) {
924 struct bnx2x_fastpath *fp = &bp->fp[i];
925
926 u16 bd_cons = fp->tx_bd_cons;
927 u16 sw_prod = fp->tx_pkt_prod;
928 u16 sw_cons = fp->tx_pkt_cons;
929
930 while (sw_cons != sw_prod) {
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
932 sw_cons++;
933 }
934 }
935}
936
937static void bnx2x_free_rx_skbs(struct bnx2x *bp)
938{
939 int i, j;
940
941 for_each_queue(bp, j) {
942 struct bnx2x_fastpath *fp = &bp->fp[j];
943
944 for (i = 0; i < NUM_RX_BD; i++) {
945 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
946 struct sk_buff *skb = rx_buf->skb;
947
948 if (skb == NULL)
949 continue;
950
951 dma_unmap_single(&bp->pdev->dev,
952 dma_unmap_addr(rx_buf, mapping),
953 bp->rx_buf_size, DMA_FROM_DEVICE);
954
955 rx_buf->skb = NULL;
956 dev_kfree_skb(skb);
957 }
958 if (!fp->disable_tpa)
959 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
960 ETH_MAX_AGGREGATION_QUEUES_E1 :
961 ETH_MAX_AGGREGATION_QUEUES_E1H);
962 }
963}
964
965void bnx2x_free_skbs(struct bnx2x *bp)
966{
967 bnx2x_free_tx_skbs(bp);
968 bnx2x_free_rx_skbs(bp);
969}
970
971static void bnx2x_free_msix_irqs(struct bnx2x *bp)
972{
973 int i, offset = 1;
974
975 free_irq(bp->msix_table[0].vector, bp->dev);
976 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
977 bp->msix_table[0].vector);
978
979#ifdef BCM_CNIC
980 offset++;
981#endif
982 for_each_queue(bp, i) {
983 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
984 "state %x\n", i, bp->msix_table[i + offset].vector,
985 bnx2x_fp(bp, i, state));
986
987 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
988 }
989}
990
d6214d7a 991void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 992{
d6214d7a
DK
993 if (bp->flags & USING_MSIX_FLAG)
994 bnx2x_free_msix_irqs(bp);
995 else if (bp->flags & USING_MSI_FLAG)
996 free_irq(bp->pdev->irq, bp->dev);
997 else
9f6c9258
DK
998 free_irq(bp->pdev->irq, bp->dev);
999}
1000
d6214d7a 1001int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1002{
d6214d7a 1003 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1004
d6214d7a
DK
1005 bp->msix_table[msix_vec].entry = msix_vec;
1006 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1007 bp->msix_table[0].entry);
1008 msix_vec++;
9f6c9258
DK
1009
1010#ifdef BCM_CNIC
d6214d7a
DK
1011 bp->msix_table[msix_vec].entry = msix_vec;
1012 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1013 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1014 msix_vec++;
9f6c9258
DK
1015#endif
1016 for_each_queue(bp, i) {
d6214d7a 1017 bp->msix_table[msix_vec].entry = msix_vec;
9f6c9258 1018 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
d6214d7a
DK
1019 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1020 msix_vec++;
9f6c9258
DK
1021 }
1022
d6214d7a
DK
1023 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1024
1025 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1026
1027 /*
1028 * reconfigure number of tx/rx queues according to available
1029 * MSI-X vectors
1030 */
1031 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1032 /* how less vectors we will have? */
1033 int diff = req_cnt - rc;
9f6c9258
DK
1034
1035 DP(NETIF_MSG_IFUP,
1036 "Trying to use less MSI-X vectors: %d\n", rc);
1037
1038 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1039
1040 if (rc) {
1041 DP(NETIF_MSG_IFUP,
1042 "MSI-X is not attainable rc %d\n", rc);
1043 return rc;
1044 }
d6214d7a
DK
1045 /*
1046 * decrease number of queues by number of unallocated entries
1047 */
1048 bp->num_queues -= diff;
9f6c9258
DK
1049
1050 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1051 bp->num_queues);
1052 } else if (rc) {
d6214d7a
DK
1053 /* fall to INTx if not enough memory */
1054 if (rc == -ENOMEM)
1055 bp->flags |= DISABLE_MSI_FLAG;
9f6c9258
DK
1056 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1057 return rc;
1058 }
1059
1060 bp->flags |= USING_MSIX_FLAG;
1061
1062 return 0;
1063}
1064
1065static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1066{
1067 int i, rc, offset = 1;
1068
1069 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1070 bp->dev->name, bp->dev);
1071 if (rc) {
1072 BNX2X_ERR("request sp irq failed\n");
1073 return -EBUSY;
1074 }
1075
1076#ifdef BCM_CNIC
1077 offset++;
1078#endif
1079 for_each_queue(bp, i) {
1080 struct bnx2x_fastpath *fp = &bp->fp[i];
1081 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1082 bp->dev->name, i);
1083
d6214d7a 1084 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1085 bnx2x_msix_fp_int, 0, fp->name, fp);
1086 if (rc) {
1087 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1088 bnx2x_free_msix_irqs(bp);
1089 return -EBUSY;
1090 }
1091
d6214d7a 1092 offset++;
9f6c9258
DK
1093 fp->state = BNX2X_FP_STATE_IRQ;
1094 }
1095
1096 i = BNX2X_NUM_QUEUES(bp);
d6214d7a 1097 offset = 1 + CNIC_CONTEXT_USE;
9f6c9258
DK
1098 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1099 " ... fp[%d] %d\n",
1100 bp->msix_table[0].vector,
1101 0, bp->msix_table[offset].vector,
1102 i - 1, bp->msix_table[offset + i - 1].vector);
1103
1104 return 0;
1105}
1106
d6214d7a 1107int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1108{
1109 int rc;
1110
1111 rc = pci_enable_msi(bp->pdev);
1112 if (rc) {
1113 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1114 return -1;
1115 }
1116 bp->flags |= USING_MSI_FLAG;
1117
1118 return 0;
1119}
1120
1121static int bnx2x_req_irq(struct bnx2x *bp)
1122{
1123 unsigned long flags;
1124 int rc;
1125
1126 if (bp->flags & USING_MSI_FLAG)
1127 flags = 0;
1128 else
1129 flags = IRQF_SHARED;
1130
1131 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1132 bp->dev->name, bp->dev);
1133 if (!rc)
1134 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1135
1136 return rc;
1137}
1138
1139static void bnx2x_napi_enable(struct bnx2x *bp)
1140{
1141 int i;
1142
1143 for_each_queue(bp, i)
1144 napi_enable(&bnx2x_fp(bp, i, napi));
1145}
1146
1147static void bnx2x_napi_disable(struct bnx2x *bp)
1148{
1149 int i;
1150
1151 for_each_queue(bp, i)
1152 napi_disable(&bnx2x_fp(bp, i, napi));
1153}
1154
1155void bnx2x_netif_start(struct bnx2x *bp)
1156{
1157 int intr_sem;
1158
1159 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1160 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1161
1162 if (intr_sem) {
1163 if (netif_running(bp->dev)) {
1164 bnx2x_napi_enable(bp);
1165 bnx2x_int_enable(bp);
1166 if (bp->state == BNX2X_STATE_OPEN)
1167 netif_tx_wake_all_queues(bp->dev);
1168 }
1169 }
1170}
1171
1172void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1173{
1174 bnx2x_int_disable_sync(bp, disable_hw);
1175 bnx2x_napi_disable(bp);
1176 netif_tx_disable(bp->dev);
1177}
9f6c9258 1178
d6214d7a
DK
1179void bnx2x_set_num_queues(struct bnx2x *bp)
1180{
1181 switch (bp->multi_mode) {
1182 case ETH_RSS_MODE_DISABLED:
9f6c9258 1183 bp->num_queues = 1;
d6214d7a
DK
1184 break;
1185 case ETH_RSS_MODE_REGULAR:
1186 bp->num_queues = bnx2x_calc_num_queues(bp);
9f6c9258
DK
1187 break;
1188 default:
d6214d7a 1189 bp->num_queues = 1;
9f6c9258
DK
1190 break;
1191 }
9f6c9258
DK
1192}
1193
6891dd25
DK
1194static void bnx2x_release_firmware(struct bnx2x *bp)
1195{
1196 kfree(bp->init_ops_offsets);
1197 kfree(bp->init_ops);
1198 kfree(bp->init_data);
1199 release_firmware(bp->firmware);
1200}
1201
9f6c9258
DK
1202/* must be called with rtnl_lock */
1203int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1204{
1205 u32 load_code;
1206 int i, rc;
1207
6891dd25
DK
1208 /* Set init arrays */
1209 rc = bnx2x_init_firmware(bp);
1210 if (rc) {
1211 BNX2X_ERR("Error loading firmware\n");
1212 return rc;
1213 }
1214
9f6c9258
DK
1215#ifdef BNX2X_STOP_ON_ERROR
1216 if (unlikely(bp->panic))
1217 return -EPERM;
1218#endif
1219
1220 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1221
523224a3
DK
1222 /* must be called before memory allocation and HW init */
1223 bnx2x_ilt_set_info(bp);
1224
d6214d7a 1225 if (bnx2x_alloc_mem(bp))
9f6c9258 1226 return -ENOMEM;
d6214d7a
DK
1227
1228 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1229 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1230 if (rc) {
1231 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1232 goto load_error0;
9f6c9258
DK
1233 }
1234
1235 for_each_queue(bp, i)
1236 bnx2x_fp(bp, i, disable_tpa) =
1237 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1238
9f6c9258
DK
1239 bnx2x_napi_enable(bp);
1240
9f6c9258
DK
1241 /* Send LOAD_REQUEST command to MCP
1242 Returns the type of LOAD command:
1243 if it is the first port to be initialized
1244 common blocks should be initialized, otherwise - not
1245 */
1246 if (!BP_NOMCP(bp)) {
a22f0788 1247 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1248 if (!load_code) {
1249 BNX2X_ERR("MCP response failure, aborting\n");
1250 rc = -EBUSY;
d6214d7a 1251 goto load_error1;
9f6c9258
DK
1252 }
1253 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1254 rc = -EBUSY; /* other port in diagnostic mode */
d6214d7a 1255 goto load_error1;
9f6c9258
DK
1256 }
1257
1258 } else {
f2e0899f 1259 int path = BP_PATH(bp);
9f6c9258
DK
1260 int port = BP_PORT(bp);
1261
f2e0899f
DK
1262 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1263 path, load_count[path][0], load_count[path][1],
1264 load_count[path][2]);
1265 load_count[path][0]++;
1266 load_count[path][1 + port]++;
1267 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1268 path, load_count[path][0], load_count[path][1],
1269 load_count[path][2]);
1270 if (load_count[path][0] == 1)
9f6c9258 1271 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1272 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1273 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1274 else
1275 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1276 }
1277
1278 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1279 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
9f6c9258
DK
1280 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1281 bp->port.pmf = 1;
1282 else
1283 bp->port.pmf = 0;
1284 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1285
1286 /* Initialize HW */
1287 rc = bnx2x_init_hw(bp, load_code);
1288 if (rc) {
1289 BNX2X_ERR("HW init failed, aborting\n");
a22f0788
YR
1290 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1291 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1292 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9f6c9258
DK
1293 goto load_error2;
1294 }
1295
d6214d7a
DK
1296 /* Connect to IRQs */
1297 rc = bnx2x_setup_irqs(bp);
523224a3
DK
1298 if (rc) {
1299 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1300 goto load_error2;
1301 }
1302
9f6c9258
DK
1303 /* Setup NIC internals and enable interrupts */
1304 bnx2x_nic_init(bp, load_code);
1305
f2e0899f
DK
1306 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1307 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
9f6c9258
DK
1308 (bp->common.shmem2_base))
1309 SHMEM2_WR(bp, dcc_support,
1310 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1311 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1312
1313 /* Send LOAD_DONE command to MCP */
1314 if (!BP_NOMCP(bp)) {
a22f0788 1315 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1316 if (!load_code) {
1317 BNX2X_ERR("MCP response failure, aborting\n");
1318 rc = -EBUSY;
1319 goto load_error3;
1320 }
1321 }
1322
1323 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1324
523224a3
DK
1325 rc = bnx2x_func_start(bp);
1326 if (rc) {
1327 BNX2X_ERR("Function start failed!\n");
1328#ifndef BNX2X_STOP_ON_ERROR
1329 goto load_error3;
1330#else
1331 bp->panic = 1;
1332 return -EBUSY;
1333#endif
1334 }
1335
1336 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
9f6c9258
DK
1337 if (rc) {
1338 BNX2X_ERR("Setup leading failed!\n");
1339#ifndef BNX2X_STOP_ON_ERROR
1340 goto load_error3;
1341#else
1342 bp->panic = 1;
1343 return -EBUSY;
1344#endif
1345 }
1346
f2e0899f
DK
1347 if (!CHIP_IS_E1(bp) &&
1348 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1349 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1350 bp->flags |= MF_FUNC_DIS;
1351 }
9f6c9258 1352
9f6c9258 1353#ifdef BCM_CNIC
523224a3
DK
1354 /* Enable Timer scan */
1355 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
9f6c9258 1356#endif
523224a3
DK
1357 for_each_nondefault_queue(bp, i) {
1358 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1359 if (rc)
9f6c9258 1360#ifdef BCM_CNIC
523224a3 1361 goto load_error4;
9f6c9258 1362#else
523224a3 1363 goto load_error3;
9f6c9258 1364#endif
523224a3
DK
1365 }
1366
1367 /* Now when Clients are configured we are ready to work */
1368 bp->state = BNX2X_STATE_OPEN;
1369
1370 bnx2x_set_eth_mac(bp, 1);
9f6c9258 1371
9f6c9258
DK
1372 if (bp->port.pmf)
1373 bnx2x_initial_phy_init(bp, load_mode);
1374
1375 /* Start fast path */
1376 switch (load_mode) {
1377 case LOAD_NORMAL:
523224a3
DK
1378 /* Tx queue should be only reenabled */
1379 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1380 /* Initialize the receive filter. */
1381 bnx2x_set_rx_mode(bp->dev);
1382 break;
1383
1384 case LOAD_OPEN:
1385 netif_tx_start_all_queues(bp->dev);
523224a3 1386 smp_mb__after_clear_bit();
9f6c9258
DK
1387 /* Initialize the receive filter. */
1388 bnx2x_set_rx_mode(bp->dev);
1389 break;
1390
1391 case LOAD_DIAG:
1392 /* Initialize the receive filter. */
1393 bnx2x_set_rx_mode(bp->dev);
1394 bp->state = BNX2X_STATE_DIAG;
1395 break;
1396
1397 default:
1398 break;
1399 }
1400
1401 if (!bp->port.pmf)
1402 bnx2x__link_status_update(bp);
1403
1404 /* start the timer */
1405 mod_timer(&bp->timer, jiffies + bp->current_interval);
1406
1407#ifdef BCM_CNIC
1408 bnx2x_setup_cnic_irq_info(bp);
1409 if (bp->state == BNX2X_STATE_OPEN)
1410 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1411#endif
1412 bnx2x_inc_load_cnt(bp);
1413
6891dd25
DK
1414 bnx2x_release_firmware(bp);
1415
9f6c9258
DK
1416 return 0;
1417
1418#ifdef BCM_CNIC
1419load_error4:
1420 /* Disable Timer scan */
1421 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1422#endif
1423load_error3:
1424 bnx2x_int_disable_sync(bp, 1);
d6214d7a 1425
9f6c9258
DK
1426 /* Free SKBs, SGEs, TPA pool and driver internals */
1427 bnx2x_free_skbs(bp);
1428 for_each_queue(bp, i)
1429 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1430
9f6c9258 1431 /* Release IRQs */
d6214d7a
DK
1432 bnx2x_free_irq(bp);
1433load_error2:
1434 if (!BP_NOMCP(bp)) {
1435 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1436 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1437 }
1438
1439 bp->port.pmf = 0;
9f6c9258
DK
1440load_error1:
1441 bnx2x_napi_disable(bp);
d6214d7a 1442load_error0:
9f6c9258
DK
1443 bnx2x_free_mem(bp);
1444
6891dd25
DK
1445 bnx2x_release_firmware(bp);
1446
9f6c9258
DK
1447 return rc;
1448}
1449
1450/* must be called with rtnl_lock */
1451int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1452{
1453 int i;
1454
1455 if (bp->state == BNX2X_STATE_CLOSED) {
1456 /* Interface has been removed - nothing to recover */
1457 bp->recovery_state = BNX2X_RECOVERY_DONE;
1458 bp->is_leader = 0;
1459 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1460 smp_wmb();
1461
1462 return -EINVAL;
1463 }
1464
1465#ifdef BCM_CNIC
1466 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1467#endif
1468 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1469
1470 /* Set "drop all" */
1471 bp->rx_mode = BNX2X_RX_MODE_NONE;
1472 bnx2x_set_storm_rx_mode(bp);
1473
f2e0899f
DK
1474 /* Stop Tx */
1475 bnx2x_tx_disable(bp);
9f6c9258 1476 del_timer_sync(&bp->timer);
f2e0899f 1477 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
9f6c9258
DK
1478 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1479 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1480
9f6c9258
DK
1481
1482 /* Cleanup the chip if needed */
1483 if (unload_mode != UNLOAD_RECOVERY)
1484 bnx2x_chip_cleanup(bp, unload_mode);
523224a3
DK
1485 else {
1486 /* Disable HW interrupts, NAPI and Tx */
1487 bnx2x_netif_stop(bp, 1);
1488
1489 /* Release IRQs */
d6214d7a 1490 bnx2x_free_irq(bp);
523224a3 1491 }
9f6c9258
DK
1492
1493 bp->port.pmf = 0;
1494
1495 /* Free SKBs, SGEs, TPA pool and driver internals */
1496 bnx2x_free_skbs(bp);
1497 for_each_queue(bp, i)
1498 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1499
9f6c9258
DK
1500 bnx2x_free_mem(bp);
1501
1502 bp->state = BNX2X_STATE_CLOSED;
1503
1504 /* The last driver must disable a "close the gate" if there is no
1505 * parity attention or "process kill" pending.
1506 */
1507 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1508 bnx2x_reset_is_done(bp))
1509 bnx2x_disable_close_the_gate(bp);
1510
1511 /* Reset MCP mail box sequence if there is on going recovery */
1512 if (unload_mode == UNLOAD_RECOVERY)
1513 bp->fw_seq = 0;
1514
1515 return 0;
1516}
1517int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1518{
1519 u16 pmcsr;
1520
1521 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1522
1523 switch (state) {
1524 case PCI_D0:
1525 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1526 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1527 PCI_PM_CTRL_PME_STATUS));
1528
1529 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1530 /* delay required during transition out of D3hot */
1531 msleep(20);
1532 break;
1533
1534 case PCI_D3hot:
1535 /* If there are other clients above don't
1536 shut down the power */
1537 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1538 return 0;
1539 /* Don't shut down the power for emulation and FPGA */
1540 if (CHIP_REV_IS_SLOW(bp))
1541 return 0;
1542
1543 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1544 pmcsr |= 3;
1545
1546 if (bp->wol)
1547 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1548
1549 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1550 pmcsr);
1551
1552 /* No more memory access after this point until
1553 * device is brought back to D0.
1554 */
1555 break;
1556
1557 default:
1558 return -EINVAL;
1559 }
1560 return 0;
1561}
1562
1563
1564
1565/*
1566 * net_device service functions
1567 */
1568
d6214d7a 1569int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
1570{
1571 int work_done = 0;
1572 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1573 napi);
1574 struct bnx2x *bp = fp->bp;
1575
1576 while (1) {
1577#ifdef BNX2X_STOP_ON_ERROR
1578 if (unlikely(bp->panic)) {
1579 napi_complete(napi);
1580 return 0;
1581 }
1582#endif
1583
1584 if (bnx2x_has_tx_work(fp))
1585 bnx2x_tx_int(fp);
1586
1587 if (bnx2x_has_rx_work(fp)) {
1588 work_done += bnx2x_rx_int(fp, budget - work_done);
1589
1590 /* must not complete if we consumed full budget */
1591 if (work_done >= budget)
1592 break;
1593 }
1594
1595 /* Fall out from the NAPI loop if needed */
1596 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1597 bnx2x_update_fpsb_idx(fp);
523224a3
DK
1598 /* bnx2x_has_rx_work() reads the status block,
1599 * thus we need to ensure that status block indices
1600 * have been actually read (bnx2x_update_fpsb_idx)
1601 * prior to this check (bnx2x_has_rx_work) so that
1602 * we won't write the "newer" value of the status block
1603 * to IGU (if there was a DMA right after
1604 * bnx2x_has_rx_work and if there is no rmb, the memory
1605 * reading (bnx2x_update_fpsb_idx) may be postponed
1606 * to right before bnx2x_ack_sb). In this case there
1607 * will never be another interrupt until there is
1608 * another update of the status block, while there
1609 * is still unhandled work.
9f6c9258
DK
1610 */
1611 rmb();
1612
1613 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1614 napi_complete(napi);
1615 /* Re-enable interrupts */
523224a3
DK
1616 DP(NETIF_MSG_HW,
1617 "Update index to %d\n", fp->fp_hc_idx);
1618 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1619 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
1620 IGU_INT_ENABLE, 1);
1621 break;
1622 }
1623 }
1624 }
1625
1626 return work_done;
1627}
1628
1629
1630/* we split the first BD into headers and data BDs
1631 * to ease the pain of our fellow microcode engineers
1632 * we use one mapping for both BDs
1633 * So far this has only been observed to happen
1634 * in Other Operating Systems(TM)
1635 */
1636static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1637 struct bnx2x_fastpath *fp,
1638 struct sw_tx_bd *tx_buf,
1639 struct eth_tx_start_bd **tx_bd, u16 hlen,
1640 u16 bd_prod, int nbd)
1641{
1642 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1643 struct eth_tx_bd *d_tx_bd;
1644 dma_addr_t mapping;
1645 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1646
1647 /* first fix first BD */
1648 h_tx_bd->nbd = cpu_to_le16(nbd);
1649 h_tx_bd->nbytes = cpu_to_le16(hlen);
1650
1651 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1652 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1653 h_tx_bd->addr_lo, h_tx_bd->nbd);
1654
1655 /* now get a new data BD
1656 * (after the pbd) and fill it */
1657 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1658 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1659
1660 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1661 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1662
1663 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1664 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1665 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1666
1667 /* this marks the BD as one that has no individual mapping */
1668 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1669
1670 DP(NETIF_MSG_TX_QUEUED,
1671 "TSO split data size is %d (%x:%x)\n",
1672 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1673
1674 /* update tx_bd */
1675 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1676
1677 return bd_prod;
1678}
1679
1680static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1681{
1682 if (fix > 0)
1683 csum = (u16) ~csum_fold(csum_sub(csum,
1684 csum_partial(t_header - fix, fix, 0)));
1685
1686 else if (fix < 0)
1687 csum = (u16) ~csum_fold(csum_add(csum,
1688 csum_partial(t_header, -fix, 0)));
1689
1690 return swab16(csum);
1691}
1692
1693static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1694{
1695 u32 rc;
1696
1697 if (skb->ip_summed != CHECKSUM_PARTIAL)
1698 rc = XMIT_PLAIN;
1699
1700 else {
1701 if (skb->protocol == htons(ETH_P_IPV6)) {
1702 rc = XMIT_CSUM_V6;
1703 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1704 rc |= XMIT_CSUM_TCP;
1705
1706 } else {
1707 rc = XMIT_CSUM_V4;
1708 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1709 rc |= XMIT_CSUM_TCP;
1710 }
1711 }
1712
1713 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1714 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1715
1716 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1717 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1718
1719 return rc;
1720}
1721
1722#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1723/* check if packet requires linearization (packet is too fragmented)
1724 no need to check fragmentation if page size > 8K (there will be no
1725 violation to FW restrictions) */
1726static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1727 u32 xmit_type)
1728{
1729 int to_copy = 0;
1730 int hlen = 0;
1731 int first_bd_sz = 0;
1732
1733 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1734 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1735
1736 if (xmit_type & XMIT_GSO) {
1737 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1738 /* Check if LSO packet needs to be copied:
1739 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1740 int wnd_size = MAX_FETCH_BD - 3;
1741 /* Number of windows to check */
1742 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1743 int wnd_idx = 0;
1744 int frag_idx = 0;
1745 u32 wnd_sum = 0;
1746
1747 /* Headers length */
1748 hlen = (int)(skb_transport_header(skb) - skb->data) +
1749 tcp_hdrlen(skb);
1750
1751 /* Amount of data (w/o headers) on linear part of SKB*/
1752 first_bd_sz = skb_headlen(skb) - hlen;
1753
1754 wnd_sum = first_bd_sz;
1755
1756 /* Calculate the first sum - it's special */
1757 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1758 wnd_sum +=
1759 skb_shinfo(skb)->frags[frag_idx].size;
1760
1761 /* If there was data on linear skb data - check it */
1762 if (first_bd_sz > 0) {
1763 if (unlikely(wnd_sum < lso_mss)) {
1764 to_copy = 1;
1765 goto exit_lbl;
1766 }
1767
1768 wnd_sum -= first_bd_sz;
1769 }
1770
1771 /* Others are easier: run through the frag list and
1772 check all windows */
1773 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1774 wnd_sum +=
1775 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1776
1777 if (unlikely(wnd_sum < lso_mss)) {
1778 to_copy = 1;
1779 break;
1780 }
1781 wnd_sum -=
1782 skb_shinfo(skb)->frags[wnd_idx].size;
1783 }
1784 } else {
1785 /* in non-LSO too fragmented packet should always
1786 be linearized */
1787 to_copy = 1;
1788 }
1789 }
1790
1791exit_lbl:
1792 if (unlikely(to_copy))
1793 DP(NETIF_MSG_TX_QUEUED,
1794 "Linearization IS REQUIRED for %s packet. "
1795 "num_frags %d hlen %d first_bd_sz %d\n",
1796 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1797 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1798
1799 return to_copy;
1800}
1801#endif
1802
f2e0899f
DK
1803static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1804 struct eth_tx_parse_bd_e2 *pbd,
1805 u32 xmit_type)
1806{
1807 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1808 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1809 if ((xmit_type & XMIT_GSO_V6) &&
1810 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1811 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1812}
1813
1814/**
1815 * Update PBD in GSO case.
1816 *
1817 * @param skb
1818 * @param tx_start_bd
1819 * @param pbd
1820 * @param xmit_type
1821 */
1822static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1823 struct eth_tx_parse_bd_e1x *pbd,
1824 u32 xmit_type)
1825{
1826 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1827 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1828 pbd->tcp_flags = pbd_tcp_flags(skb);
1829
1830 if (xmit_type & XMIT_GSO_V4) {
1831 pbd->ip_id = swab16(ip_hdr(skb)->id);
1832 pbd->tcp_pseudo_csum =
1833 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1834 ip_hdr(skb)->daddr,
1835 0, IPPROTO_TCP, 0));
1836
1837 } else
1838 pbd->tcp_pseudo_csum =
1839 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1840 &ipv6_hdr(skb)->daddr,
1841 0, IPPROTO_TCP, 0));
1842
1843 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1844}
1845/**
1846 *
1847 * @param skb
1848 * @param tx_start_bd
1849 * @param pbd_e2
1850 * @param xmit_type
1851 *
1852 * @return header len
1853 */
1854static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1855 struct eth_tx_parse_bd_e2 *pbd,
1856 u32 xmit_type)
1857{
1858 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1859 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1860
1861 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1862 skb->data) / 2) <<
1863 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1864
1865 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1866}
1867
1868/**
1869 *
1870 * @param skb
1871 * @param tx_start_bd
1872 * @param pbd
1873 * @param xmit_type
1874 *
1875 * @return Header length
1876 */
1877static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1878 struct eth_tx_parse_bd_e1x *pbd,
1879 u32 xmit_type)
1880{
1881 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1882
1883 /* for now NS flag is not used in Linux */
1884 pbd->global_data =
1885 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1886 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1887
1888 pbd->ip_hlen_w = (skb_transport_header(skb) -
1889 skb_network_header(skb)) / 2;
1890
1891 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1892
1893 pbd->total_hlen_w = cpu_to_le16(hlen);
1894 hlen = hlen*2;
1895
1896 if (xmit_type & XMIT_CSUM_TCP) {
1897 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1898
1899 } else {
1900 s8 fix = SKB_CS_OFF(skb); /* signed! */
1901
1902 DP(NETIF_MSG_TX_QUEUED,
1903 "hlen %d fix %d csum before fix %x\n",
1904 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1905
1906 /* HW bug: fixup the CSUM */
1907 pbd->tcp_pseudo_csum =
1908 bnx2x_csum_fix(skb_transport_header(skb),
1909 SKB_CS(skb), fix);
1910
1911 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1912 pbd->tcp_pseudo_csum);
1913 }
1914
1915 return hlen;
1916}
9f6c9258
DK
1917/* called with netif_tx_lock
1918 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1919 * netif_wake_queue()
1920 */
1921netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1922{
1923 struct bnx2x *bp = netdev_priv(dev);
1924 struct bnx2x_fastpath *fp;
1925 struct netdev_queue *txq;
1926 struct sw_tx_bd *tx_buf;
1927 struct eth_tx_start_bd *tx_start_bd;
1928 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 1929 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 1930 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
9f6c9258
DK
1931 u16 pkt_prod, bd_prod;
1932 int nbd, fp_index;
1933 dma_addr_t mapping;
1934 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1935 int i;
1936 u8 hlen = 0;
1937 __le16 pkt_size = 0;
1938 struct ethhdr *eth;
1939 u8 mac_type = UNICAST_ADDRESS;
1940
1941#ifdef BNX2X_STOP_ON_ERROR
1942 if (unlikely(bp->panic))
1943 return NETDEV_TX_BUSY;
1944#endif
1945
1946 fp_index = skb_get_queue_mapping(skb);
1947 txq = netdev_get_tx_queue(dev, fp_index);
1948
1949 fp = &bp->fp[fp_index];
1950
1951 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1952 fp->eth_q_stats.driver_xoff++;
1953 netif_tx_stop_queue(txq);
1954 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1955 return NETDEV_TX_BUSY;
1956 }
1957
f2e0899f
DK
1958 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1959 "protocol(%x,%x) gso type %x xmit_type %x\n",
1960 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
1961 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1962
1963 eth = (struct ethhdr *)skb->data;
1964
1965 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1966 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1967 if (is_broadcast_ether_addr(eth->h_dest))
1968 mac_type = BROADCAST_ADDRESS;
1969 else
1970 mac_type = MULTICAST_ADDRESS;
1971 }
1972
1973#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1974 /* First, check if we need to linearize the skb (due to FW
1975 restrictions). No need to check fragmentation if page size > 8K
1976 (there will be no violation to FW restrictions) */
1977 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1978 /* Statistics of linearization */
1979 bp->lin_cnt++;
1980 if (skb_linearize(skb) != 0) {
1981 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1982 "silently dropping this SKB\n");
1983 dev_kfree_skb_any(skb);
1984 return NETDEV_TX_OK;
1985 }
1986 }
1987#endif
1988
1989 /*
1990 Please read carefully. First we use one BD which we mark as start,
1991 then we have a parsing info BD (used for TSO or xsum),
1992 and only then we have the rest of the TSO BDs.
1993 (don't forget to mark the last one as last,
1994 and to unmap only AFTER you write to the BD ...)
1995 And above all, all pdb sizes are in words - NOT DWORDS!
1996 */
1997
1998 pkt_prod = fp->tx_pkt_prod++;
1999 bd_prod = TX_BD(fp->tx_bd_prod);
2000
2001 /* get a tx_buf and first BD */
2002 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2003 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2004
2005 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
523224a3
DK
2006 SET_FLAG(tx_start_bd->general_data,
2007 ETH_TX_START_BD_ETH_ADDR_TYPE,
2008 mac_type);
9f6c9258 2009 /* header nbd */
523224a3
DK
2010 SET_FLAG(tx_start_bd->general_data,
2011 ETH_TX_START_BD_HDR_NBDS,
2012 1);
9f6c9258
DK
2013
2014 /* remember the first BD of the packet */
2015 tx_buf->first_bd = fp->tx_bd_prod;
2016 tx_buf->skb = skb;
2017 tx_buf->flags = 0;
2018
2019 DP(NETIF_MSG_TX_QUEUED,
2020 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2021 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2022
2023#ifdef BCM_VLAN
2024 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
2025 (bp->flags & HW_VLAN_TX_FLAG)) {
523224a3
DK
2026 tx_start_bd->vlan_or_ethertype =
2027 cpu_to_le16(vlan_tx_tag_get(skb));
2028 tx_start_bd->bd_flags.as_bitfield |=
2029 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258
DK
2030 } else
2031#endif
523224a3 2032 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2033
2034 /* turn on parsing and get a BD */
2035 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2036
523224a3
DK
2037 if (xmit_type & XMIT_CSUM) {
2038 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2039
2040 if (xmit_type & XMIT_CSUM_V4)
2041 tx_start_bd->bd_flags.as_bitfield |=
2042 ETH_TX_BD_FLAGS_IP_CSUM;
2043 else
2044 tx_start_bd->bd_flags.as_bitfield |=
2045 ETH_TX_BD_FLAGS_IPV6;
9f6c9258 2046
523224a3
DK
2047 if (!(xmit_type & XMIT_CSUM_TCP))
2048 tx_start_bd->bd_flags.as_bitfield |=
2049 ETH_TX_BD_FLAGS_IS_UDP;
2050 }
9f6c9258 2051
f2e0899f
DK
2052 if (CHIP_IS_E2(bp)) {
2053 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2054 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2055 /* Set PBD in checksum offload case */
2056 if (xmit_type & XMIT_CSUM)
2057 hlen = bnx2x_set_pbd_csum_e2(bp,
2058 skb, pbd_e2, xmit_type);
2059 } else {
2060 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2061 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2062 /* Set PBD in checksum offload case */
2063 if (xmit_type & XMIT_CSUM)
2064 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2065
9f6c9258
DK
2066 }
2067
2068 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2069 skb_headlen(skb), DMA_TO_DEVICE);
2070
2071 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2072 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2073 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2074 tx_start_bd->nbd = cpu_to_le16(nbd);
2075 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2076 pkt_size = tx_start_bd->nbytes;
2077
2078 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2079 " nbytes %d flags %x vlan %x\n",
2080 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2081 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2082 tx_start_bd->bd_flags.as_bitfield,
2083 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2084
2085 if (xmit_type & XMIT_GSO) {
2086
2087 DP(NETIF_MSG_TX_QUEUED,
2088 "TSO packet len %d hlen %d total len %d tso size %d\n",
2089 skb->len, hlen, skb_headlen(skb),
2090 skb_shinfo(skb)->gso_size);
2091
2092 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2093
2094 if (unlikely(skb_headlen(skb) > hlen))
2095 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2096 hlen, bd_prod, ++nbd);
f2e0899f
DK
2097 if (CHIP_IS_E2(bp))
2098 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2099 else
2100 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258
DK
2101 }
2102 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2103
2104 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2105 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2106
2107 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2108 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2109 if (total_pkt_bd == NULL)
2110 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2111
2112 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2113 frag->page_offset,
2114 frag->size, DMA_TO_DEVICE);
2115
2116 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2117 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2118 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2119 le16_add_cpu(&pkt_size, frag->size);
2120
2121 DP(NETIF_MSG_TX_QUEUED,
2122 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2123 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2124 le16_to_cpu(tx_data_bd->nbytes));
2125 }
2126
2127 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2128
2129 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2130
2131 /* now send a tx doorbell, counting the next BD
2132 * if the packet contains or ends with it
2133 */
2134 if (TX_BD_POFF(bd_prod) < nbd)
2135 nbd++;
2136
2137 if (total_pkt_bd != NULL)
2138 total_pkt_bd->total_pkt_bytes = pkt_size;
2139
523224a3 2140 if (pbd_e1x)
9f6c9258 2141 DP(NETIF_MSG_TX_QUEUED,
523224a3 2142 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9f6c9258 2143 " tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2144 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2145 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2146 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2147 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2148 if (pbd_e2)
2149 DP(NETIF_MSG_TX_QUEUED,
2150 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2151 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2152 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2153 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2154 pbd_e2->parsing_data);
9f6c9258
DK
2155 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2156
2157 /*
2158 * Make sure that the BD data is updated before updating the producer
2159 * since FW might read the BD right after the producer is updated.
2160 * This is only applicable for weak-ordered memory model archs such
2161 * as IA-64. The following barrier is also mandatory since FW will
2162 * assumes packets must have BDs.
2163 */
2164 wmb();
2165
2166 fp->tx_db.data.prod += nbd;
2167 barrier();
523224a3 2168 DOORBELL(bp, fp->cid, fp->tx_db.raw);
9f6c9258
DK
2169
2170 mmiowb();
2171
2172 fp->tx_bd_prod += nbd;
2173
2174 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2175 netif_tx_stop_queue(txq);
2176
2177 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2178 * ordering of set_bit() in netif_tx_stop_queue() and read of
2179 * fp->bd_tx_cons */
2180 smp_mb();
2181
2182 fp->eth_q_stats.driver_xoff++;
2183 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2184 netif_tx_wake_queue(txq);
2185 }
2186 fp->tx_pkt++;
2187
2188 return NETDEV_TX_OK;
2189}
2190/* called with rtnl_lock */
2191int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2192{
2193 struct sockaddr *addr = p;
2194 struct bnx2x *bp = netdev_priv(dev);
2195
2196 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2197 return -EINVAL;
2198
2199 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
523224a3
DK
2200 if (netif_running(dev))
2201 bnx2x_set_eth_mac(bp, 1);
9f6c9258
DK
2202
2203 return 0;
2204}
2205
d6214d7a
DK
2206
2207int bnx2x_setup_irqs(struct bnx2x *bp)
2208{
2209 int rc = 0;
2210 if (bp->flags & USING_MSIX_FLAG) {
2211 rc = bnx2x_req_msix_irqs(bp);
2212 if (rc)
2213 return rc;
2214 } else {
2215 bnx2x_ack_int(bp);
2216 rc = bnx2x_req_irq(bp);
2217 if (rc) {
2218 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2219 return rc;
2220 }
2221 if (bp->flags & USING_MSI_FLAG) {
2222 bp->dev->irq = bp->pdev->irq;
2223 netdev_info(bp->dev, "using MSI IRQ %d\n",
2224 bp->pdev->irq);
2225 }
2226 }
2227
2228 return 0;
2229}
2230
523224a3
DK
2231void bnx2x_free_mem_bp(struct bnx2x *bp)
2232{
2233 kfree(bp->fp);
2234 kfree(bp->msix_table);
2235 kfree(bp->ilt);
2236}
2237
2238int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2239{
2240 struct bnx2x_fastpath *fp;
2241 struct msix_entry *tbl;
2242 struct bnx2x_ilt *ilt;
2243
2244 /* fp array */
2245 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2246 if (!fp)
2247 goto alloc_err;
2248 bp->fp = fp;
2249
2250 /* msix table */
2251 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2252 GFP_KERNEL);
2253 if (!tbl)
2254 goto alloc_err;
2255 bp->msix_table = tbl;
2256
2257 /* ilt */
2258 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2259 if (!ilt)
2260 goto alloc_err;
2261 bp->ilt = ilt;
2262
2263 return 0;
2264alloc_err:
2265 bnx2x_free_mem_bp(bp);
2266 return -ENOMEM;
2267
2268}
2269
9f6c9258
DK
2270/* called with rtnl_lock */
2271int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2272{
2273 struct bnx2x *bp = netdev_priv(dev);
2274 int rc = 0;
2275
2276 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2277 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2278 return -EAGAIN;
2279 }
2280
2281 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2282 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2283 return -EINVAL;
2284
2285 /* This does not race with packet allocation
2286 * because the actual alloc size is
2287 * only updated as part of load
2288 */
2289 dev->mtu = new_mtu;
2290
2291 if (netif_running(dev)) {
2292 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2293 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2294 }
2295
2296 return rc;
2297}
2298
2299void bnx2x_tx_timeout(struct net_device *dev)
2300{
2301 struct bnx2x *bp = netdev_priv(dev);
2302
2303#ifdef BNX2X_STOP_ON_ERROR
2304 if (!bp->panic)
2305 bnx2x_panic();
2306#endif
2307 /* This allows the netif to be shutdown gracefully before resetting */
2308 schedule_delayed_work(&bp->reset_task, 0);
2309}
2310
2311#ifdef BCM_VLAN
2312/* called with rtnl_lock */
2313void bnx2x_vlan_rx_register(struct net_device *dev,
2314 struct vlan_group *vlgrp)
2315{
2316 struct bnx2x *bp = netdev_priv(dev);
2317
2318 bp->vlgrp = vlgrp;
9f6c9258
DK
2319}
2320
2321#endif
2322int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2323{
2324 struct net_device *dev = pci_get_drvdata(pdev);
2325 struct bnx2x *bp;
2326
2327 if (!dev) {
2328 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2329 return -ENODEV;
2330 }
2331 bp = netdev_priv(dev);
2332
2333 rtnl_lock();
2334
2335 pci_save_state(pdev);
2336
2337 if (!netif_running(dev)) {
2338 rtnl_unlock();
2339 return 0;
2340 }
2341
2342 netif_device_detach(dev);
2343
2344 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2345
2346 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2347
2348 rtnl_unlock();
2349
2350 return 0;
2351}
2352
2353int bnx2x_resume(struct pci_dev *pdev)
2354{
2355 struct net_device *dev = pci_get_drvdata(pdev);
2356 struct bnx2x *bp;
2357 int rc;
2358
2359 if (!dev) {
2360 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2361 return -ENODEV;
2362 }
2363 bp = netdev_priv(dev);
2364
2365 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2366 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2367 return -EAGAIN;
2368 }
2369
2370 rtnl_lock();
2371
2372 pci_restore_state(pdev);
2373
2374 if (!netif_running(dev)) {
2375 rtnl_unlock();
2376 return 0;
2377 }
2378
2379 bnx2x_set_power_state(bp, PCI_D0);
2380 netif_device_attach(dev);
2381
f2e0899f
DK
2382 /* Since the chip was reset, clear the FW sequence number */
2383 bp->fw_seq = 0;
9f6c9258
DK
2384 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2385
2386 rtnl_unlock();
2387
2388 return rc;
2389}