]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/bnx2.c
[BNX2]: Fix nvram write logic.
[mirror_ubuntu-artful-kernel.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.7"
58 #define DRV_MODULE_RELDATE "March 29, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
64
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
84 BCM5708,
85 BCM5708S,
86 BCM5709,
87 } board_t;
88
89 /* indexed by board_t, above */
90 static const struct {
91 char *name;
92 } board_info[] __devinitdata = {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
98 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
100 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
101 };
102
103 static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
120 { 0, }
121 };
122
123 static struct flash_spec flash_table[] =
124 {
125 /* Slow EEPROM */
126 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
127 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
128 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129 "EEPROM - slow"},
130 /* Expansion entry 0001 */
131 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
132 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
133 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134 "Entry 0001"},
135 /* Saifun SA25F010 (non-buffered flash) */
136 /* strap, cfg1, & write1 need updates */
137 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
138 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
140 "Non-buffered flash (128kB)"},
141 /* Saifun SA25F020 (non-buffered flash) */
142 /* strap, cfg1, & write1 need updates */
143 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
144 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
146 "Non-buffered flash (256kB)"},
147 /* Expansion entry 0100 */
148 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
149 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
150 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151 "Entry 0100"},
152 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
153 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
154 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
155 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
156 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
157 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
158 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
159 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
160 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
161 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
162 /* Saifun SA25F005 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
164 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
165 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
166 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
167 "Non-buffered flash (64kB)"},
168 /* Fast EEPROM */
169 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
170 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172 "EEPROM - fast"},
173 /* Expansion entry 1001 */
174 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
175 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 "Entry 1001"},
178 /* Expansion entry 1010 */
179 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
180 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 "Entry 1010"},
183 /* ATMEL AT45DB011B (buffered flash) */
184 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
185 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
186 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
187 "Buffered flash (128kB)"},
188 /* Expansion entry 1100 */
189 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
190 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
192 "Entry 1100"},
193 /* Expansion entry 1101 */
194 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
195 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 "Entry 1101"},
198 /* Ateml Expansion entry 1110 */
199 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
200 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
201 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
202 "Entry 1110 (Atmel)"},
203 /* ATMEL AT45DB021B (buffered flash) */
204 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
205 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
207 "Buffered flash (256kB)"},
208 };
209
210 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
211
212 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
213 {
214 u32 diff;
215
216 smp_mb();
217
218 /* The ring uses 256 indices for 255 entries, one of them
219 * needs to be skipped.
220 */
221 diff = bp->tx_prod - bp->tx_cons;
222 if (unlikely(diff >= TX_DESC_CNT)) {
223 diff &= 0xffff;
224 if (diff == TX_DESC_CNT)
225 diff = MAX_TX_DESC_CNT;
226 }
227 return (bp->tx_ring_size - diff);
228 }
229
230 static u32
231 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
232 {
233 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
234 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
235 }
236
237 static void
238 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
239 {
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
242 }
243
244 static void
245 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
246 {
247 offset += cid_addr;
248 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
249 int i;
250
251 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
252 REG_WR(bp, BNX2_CTX_CTX_CTRL,
253 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
254 for (i = 0; i < 5; i++) {
255 u32 val;
256 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
257 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
258 break;
259 udelay(5);
260 }
261 } else {
262 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
263 REG_WR(bp, BNX2_CTX_DATA, val);
264 }
265 }
266
267 static int
268 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
269 {
270 u32 val1;
271 int i, ret;
272
273 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
274 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
275 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
276
277 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
278 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
279
280 udelay(40);
281 }
282
283 val1 = (bp->phy_addr << 21) | (reg << 16) |
284 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
285 BNX2_EMAC_MDIO_COMM_START_BUSY;
286 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
287
288 for (i = 0; i < 50; i++) {
289 udelay(10);
290
291 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
292 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
293 udelay(5);
294
295 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
296 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
297
298 break;
299 }
300 }
301
302 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
303 *val = 0x0;
304 ret = -EBUSY;
305 }
306 else {
307 *val = val1;
308 ret = 0;
309 }
310
311 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318 udelay(40);
319 }
320
321 return ret;
322 }
323
324 static int
325 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
326 {
327 u32 val1;
328 int i, ret;
329
330 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
331 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
332 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
333
334 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
335 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
336
337 udelay(40);
338 }
339
340 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
341 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
342 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
343 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
344
345 for (i = 0; i < 50; i++) {
346 udelay(10);
347
348 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
349 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
350 udelay(5);
351 break;
352 }
353 }
354
355 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
356 ret = -EBUSY;
357 else
358 ret = 0;
359
360 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
363
364 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
365 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366
367 udelay(40);
368 }
369
370 return ret;
371 }
372
373 static void
374 bnx2_disable_int(struct bnx2 *bp)
375 {
376 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
377 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
378 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
379 }
380
381 static void
382 bnx2_enable_int(struct bnx2 *bp)
383 {
384 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
385 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
386 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
387
388 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
390
391 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
392 }
393
394 static void
395 bnx2_disable_int_sync(struct bnx2 *bp)
396 {
397 atomic_inc(&bp->intr_sem);
398 bnx2_disable_int(bp);
399 synchronize_irq(bp->pdev->irq);
400 }
401
402 static void
403 bnx2_netif_stop(struct bnx2 *bp)
404 {
405 bnx2_disable_int_sync(bp);
406 if (netif_running(bp->dev)) {
407 netif_poll_disable(bp->dev);
408 netif_tx_disable(bp->dev);
409 bp->dev->trans_start = jiffies; /* prevent tx timeout */
410 }
411 }
412
413 static void
414 bnx2_netif_start(struct bnx2 *bp)
415 {
416 if (atomic_dec_and_test(&bp->intr_sem)) {
417 if (netif_running(bp->dev)) {
418 netif_wake_queue(bp->dev);
419 netif_poll_enable(bp->dev);
420 bnx2_enable_int(bp);
421 }
422 }
423 }
424
425 static void
426 bnx2_free_mem(struct bnx2 *bp)
427 {
428 int i;
429
430 for (i = 0; i < bp->ctx_pages; i++) {
431 if (bp->ctx_blk[i]) {
432 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
433 bp->ctx_blk[i],
434 bp->ctx_blk_mapping[i]);
435 bp->ctx_blk[i] = NULL;
436 }
437 }
438 if (bp->status_blk) {
439 pci_free_consistent(bp->pdev, bp->status_stats_size,
440 bp->status_blk, bp->status_blk_mapping);
441 bp->status_blk = NULL;
442 bp->stats_blk = NULL;
443 }
444 if (bp->tx_desc_ring) {
445 pci_free_consistent(bp->pdev,
446 sizeof(struct tx_bd) * TX_DESC_CNT,
447 bp->tx_desc_ring, bp->tx_desc_mapping);
448 bp->tx_desc_ring = NULL;
449 }
450 kfree(bp->tx_buf_ring);
451 bp->tx_buf_ring = NULL;
452 for (i = 0; i < bp->rx_max_ring; i++) {
453 if (bp->rx_desc_ring[i])
454 pci_free_consistent(bp->pdev,
455 sizeof(struct rx_bd) * RX_DESC_CNT,
456 bp->rx_desc_ring[i],
457 bp->rx_desc_mapping[i]);
458 bp->rx_desc_ring[i] = NULL;
459 }
460 vfree(bp->rx_buf_ring);
461 bp->rx_buf_ring = NULL;
462 }
463
464 static int
465 bnx2_alloc_mem(struct bnx2 *bp)
466 {
467 int i, status_blk_size;
468
469 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
470 GFP_KERNEL);
471 if (bp->tx_buf_ring == NULL)
472 return -ENOMEM;
473
474 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
475 sizeof(struct tx_bd) *
476 TX_DESC_CNT,
477 &bp->tx_desc_mapping);
478 if (bp->tx_desc_ring == NULL)
479 goto alloc_mem_err;
480
481 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
482 bp->rx_max_ring);
483 if (bp->rx_buf_ring == NULL)
484 goto alloc_mem_err;
485
486 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
487 bp->rx_max_ring);
488
489 for (i = 0; i < bp->rx_max_ring; i++) {
490 bp->rx_desc_ring[i] =
491 pci_alloc_consistent(bp->pdev,
492 sizeof(struct rx_bd) * RX_DESC_CNT,
493 &bp->rx_desc_mapping[i]);
494 if (bp->rx_desc_ring[i] == NULL)
495 goto alloc_mem_err;
496
497 }
498
499 /* Combine status and statistics blocks into one allocation. */
500 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
501 bp->status_stats_size = status_blk_size +
502 sizeof(struct statistics_block);
503
504 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
505 &bp->status_blk_mapping);
506 if (bp->status_blk == NULL)
507 goto alloc_mem_err;
508
509 memset(bp->status_blk, 0, bp->status_stats_size);
510
511 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
512 status_blk_size);
513
514 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
515
516 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
517 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
518 if (bp->ctx_pages == 0)
519 bp->ctx_pages = 1;
520 for (i = 0; i < bp->ctx_pages; i++) {
521 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
522 BCM_PAGE_SIZE,
523 &bp->ctx_blk_mapping[i]);
524 if (bp->ctx_blk[i] == NULL)
525 goto alloc_mem_err;
526 }
527 }
528 return 0;
529
530 alloc_mem_err:
531 bnx2_free_mem(bp);
532 return -ENOMEM;
533 }
534
535 static void
536 bnx2_report_fw_link(struct bnx2 *bp)
537 {
538 u32 fw_link_status = 0;
539
540 if (bp->link_up) {
541 u32 bmsr;
542
543 switch (bp->line_speed) {
544 case SPEED_10:
545 if (bp->duplex == DUPLEX_HALF)
546 fw_link_status = BNX2_LINK_STATUS_10HALF;
547 else
548 fw_link_status = BNX2_LINK_STATUS_10FULL;
549 break;
550 case SPEED_100:
551 if (bp->duplex == DUPLEX_HALF)
552 fw_link_status = BNX2_LINK_STATUS_100HALF;
553 else
554 fw_link_status = BNX2_LINK_STATUS_100FULL;
555 break;
556 case SPEED_1000:
557 if (bp->duplex == DUPLEX_HALF)
558 fw_link_status = BNX2_LINK_STATUS_1000HALF;
559 else
560 fw_link_status = BNX2_LINK_STATUS_1000FULL;
561 break;
562 case SPEED_2500:
563 if (bp->duplex == DUPLEX_HALF)
564 fw_link_status = BNX2_LINK_STATUS_2500HALF;
565 else
566 fw_link_status = BNX2_LINK_STATUS_2500FULL;
567 break;
568 }
569
570 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
571
572 if (bp->autoneg) {
573 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
574
575 bnx2_read_phy(bp, MII_BMSR, &bmsr);
576 bnx2_read_phy(bp, MII_BMSR, &bmsr);
577
578 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
579 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
580 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
581 else
582 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
583 }
584 }
585 else
586 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
587
588 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
589 }
590
591 static void
592 bnx2_report_link(struct bnx2 *bp)
593 {
594 if (bp->link_up) {
595 netif_carrier_on(bp->dev);
596 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
597
598 printk("%d Mbps ", bp->line_speed);
599
600 if (bp->duplex == DUPLEX_FULL)
601 printk("full duplex");
602 else
603 printk("half duplex");
604
605 if (bp->flow_ctrl) {
606 if (bp->flow_ctrl & FLOW_CTRL_RX) {
607 printk(", receive ");
608 if (bp->flow_ctrl & FLOW_CTRL_TX)
609 printk("& transmit ");
610 }
611 else {
612 printk(", transmit ");
613 }
614 printk("flow control ON");
615 }
616 printk("\n");
617 }
618 else {
619 netif_carrier_off(bp->dev);
620 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
621 }
622
623 bnx2_report_fw_link(bp);
624 }
625
626 static void
627 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
628 {
629 u32 local_adv, remote_adv;
630
631 bp->flow_ctrl = 0;
632 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
633 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
634
635 if (bp->duplex == DUPLEX_FULL) {
636 bp->flow_ctrl = bp->req_flow_ctrl;
637 }
638 return;
639 }
640
641 if (bp->duplex != DUPLEX_FULL) {
642 return;
643 }
644
645 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
646 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
647 u32 val;
648
649 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
650 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
651 bp->flow_ctrl |= FLOW_CTRL_TX;
652 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
653 bp->flow_ctrl |= FLOW_CTRL_RX;
654 return;
655 }
656
657 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
658 bnx2_read_phy(bp, MII_LPA, &remote_adv);
659
660 if (bp->phy_flags & PHY_SERDES_FLAG) {
661 u32 new_local_adv = 0;
662 u32 new_remote_adv = 0;
663
664 if (local_adv & ADVERTISE_1000XPAUSE)
665 new_local_adv |= ADVERTISE_PAUSE_CAP;
666 if (local_adv & ADVERTISE_1000XPSE_ASYM)
667 new_local_adv |= ADVERTISE_PAUSE_ASYM;
668 if (remote_adv & ADVERTISE_1000XPAUSE)
669 new_remote_adv |= ADVERTISE_PAUSE_CAP;
670 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
671 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
672
673 local_adv = new_local_adv;
674 remote_adv = new_remote_adv;
675 }
676
677 /* See Table 28B-3 of 802.3ab-1999 spec. */
678 if (local_adv & ADVERTISE_PAUSE_CAP) {
679 if(local_adv & ADVERTISE_PAUSE_ASYM) {
680 if (remote_adv & ADVERTISE_PAUSE_CAP) {
681 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
682 }
683 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
684 bp->flow_ctrl = FLOW_CTRL_RX;
685 }
686 }
687 else {
688 if (remote_adv & ADVERTISE_PAUSE_CAP) {
689 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
690 }
691 }
692 }
693 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
694 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
695 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
696
697 bp->flow_ctrl = FLOW_CTRL_TX;
698 }
699 }
700 }
701
702 static int
703 bnx2_5708s_linkup(struct bnx2 *bp)
704 {
705 u32 val;
706
707 bp->link_up = 1;
708 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
709 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
710 case BCM5708S_1000X_STAT1_SPEED_10:
711 bp->line_speed = SPEED_10;
712 break;
713 case BCM5708S_1000X_STAT1_SPEED_100:
714 bp->line_speed = SPEED_100;
715 break;
716 case BCM5708S_1000X_STAT1_SPEED_1G:
717 bp->line_speed = SPEED_1000;
718 break;
719 case BCM5708S_1000X_STAT1_SPEED_2G5:
720 bp->line_speed = SPEED_2500;
721 break;
722 }
723 if (val & BCM5708S_1000X_STAT1_FD)
724 bp->duplex = DUPLEX_FULL;
725 else
726 bp->duplex = DUPLEX_HALF;
727
728 return 0;
729 }
730
731 static int
732 bnx2_5706s_linkup(struct bnx2 *bp)
733 {
734 u32 bmcr, local_adv, remote_adv, common;
735
736 bp->link_up = 1;
737 bp->line_speed = SPEED_1000;
738
739 bnx2_read_phy(bp, MII_BMCR, &bmcr);
740 if (bmcr & BMCR_FULLDPLX) {
741 bp->duplex = DUPLEX_FULL;
742 }
743 else {
744 bp->duplex = DUPLEX_HALF;
745 }
746
747 if (!(bmcr & BMCR_ANENABLE)) {
748 return 0;
749 }
750
751 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
752 bnx2_read_phy(bp, MII_LPA, &remote_adv);
753
754 common = local_adv & remote_adv;
755 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
756
757 if (common & ADVERTISE_1000XFULL) {
758 bp->duplex = DUPLEX_FULL;
759 }
760 else {
761 bp->duplex = DUPLEX_HALF;
762 }
763 }
764
765 return 0;
766 }
767
768 static int
769 bnx2_copper_linkup(struct bnx2 *bp)
770 {
771 u32 bmcr;
772
773 bnx2_read_phy(bp, MII_BMCR, &bmcr);
774 if (bmcr & BMCR_ANENABLE) {
775 u32 local_adv, remote_adv, common;
776
777 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
778 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
779
780 common = local_adv & (remote_adv >> 2);
781 if (common & ADVERTISE_1000FULL) {
782 bp->line_speed = SPEED_1000;
783 bp->duplex = DUPLEX_FULL;
784 }
785 else if (common & ADVERTISE_1000HALF) {
786 bp->line_speed = SPEED_1000;
787 bp->duplex = DUPLEX_HALF;
788 }
789 else {
790 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
791 bnx2_read_phy(bp, MII_LPA, &remote_adv);
792
793 common = local_adv & remote_adv;
794 if (common & ADVERTISE_100FULL) {
795 bp->line_speed = SPEED_100;
796 bp->duplex = DUPLEX_FULL;
797 }
798 else if (common & ADVERTISE_100HALF) {
799 bp->line_speed = SPEED_100;
800 bp->duplex = DUPLEX_HALF;
801 }
802 else if (common & ADVERTISE_10FULL) {
803 bp->line_speed = SPEED_10;
804 bp->duplex = DUPLEX_FULL;
805 }
806 else if (common & ADVERTISE_10HALF) {
807 bp->line_speed = SPEED_10;
808 bp->duplex = DUPLEX_HALF;
809 }
810 else {
811 bp->line_speed = 0;
812 bp->link_up = 0;
813 }
814 }
815 }
816 else {
817 if (bmcr & BMCR_SPEED100) {
818 bp->line_speed = SPEED_100;
819 }
820 else {
821 bp->line_speed = SPEED_10;
822 }
823 if (bmcr & BMCR_FULLDPLX) {
824 bp->duplex = DUPLEX_FULL;
825 }
826 else {
827 bp->duplex = DUPLEX_HALF;
828 }
829 }
830
831 return 0;
832 }
833
834 static int
835 bnx2_set_mac_link(struct bnx2 *bp)
836 {
837 u32 val;
838
839 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
840 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
841 (bp->duplex == DUPLEX_HALF)) {
842 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
843 }
844
845 /* Configure the EMAC mode register. */
846 val = REG_RD(bp, BNX2_EMAC_MODE);
847
848 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
849 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
850 BNX2_EMAC_MODE_25G_MODE);
851
852 if (bp->link_up) {
853 switch (bp->line_speed) {
854 case SPEED_10:
855 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
856 val |= BNX2_EMAC_MODE_PORT_MII_10M;
857 break;
858 }
859 /* fall through */
860 case SPEED_100:
861 val |= BNX2_EMAC_MODE_PORT_MII;
862 break;
863 case SPEED_2500:
864 val |= BNX2_EMAC_MODE_25G_MODE;
865 /* fall through */
866 case SPEED_1000:
867 val |= BNX2_EMAC_MODE_PORT_GMII;
868 break;
869 }
870 }
871 else {
872 val |= BNX2_EMAC_MODE_PORT_GMII;
873 }
874
875 /* Set the MAC to operate in the appropriate duplex mode. */
876 if (bp->duplex == DUPLEX_HALF)
877 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
878 REG_WR(bp, BNX2_EMAC_MODE, val);
879
880 /* Enable/disable rx PAUSE. */
881 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
882
883 if (bp->flow_ctrl & FLOW_CTRL_RX)
884 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
885 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
886
887 /* Enable/disable tx PAUSE. */
888 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
889 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
890
891 if (bp->flow_ctrl & FLOW_CTRL_TX)
892 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
893 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
894
895 /* Acknowledge the interrupt. */
896 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
897
898 return 0;
899 }
900
901 static int
902 bnx2_set_link(struct bnx2 *bp)
903 {
904 u32 bmsr;
905 u8 link_up;
906
907 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
908 bp->link_up = 1;
909 return 0;
910 }
911
912 link_up = bp->link_up;
913
914 bnx2_read_phy(bp, MII_BMSR, &bmsr);
915 bnx2_read_phy(bp, MII_BMSR, &bmsr);
916
917 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
918 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
919 u32 val;
920
921 val = REG_RD(bp, BNX2_EMAC_STATUS);
922 if (val & BNX2_EMAC_STATUS_LINK)
923 bmsr |= BMSR_LSTATUS;
924 else
925 bmsr &= ~BMSR_LSTATUS;
926 }
927
928 if (bmsr & BMSR_LSTATUS) {
929 bp->link_up = 1;
930
931 if (bp->phy_flags & PHY_SERDES_FLAG) {
932 if (CHIP_NUM(bp) == CHIP_NUM_5706)
933 bnx2_5706s_linkup(bp);
934 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
935 bnx2_5708s_linkup(bp);
936 }
937 else {
938 bnx2_copper_linkup(bp);
939 }
940 bnx2_resolve_flow_ctrl(bp);
941 }
942 else {
943 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
944 (bp->autoneg & AUTONEG_SPEED)) {
945
946 u32 bmcr;
947
948 bnx2_read_phy(bp, MII_BMCR, &bmcr);
949 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
950 if (!(bmcr & BMCR_ANENABLE)) {
951 bnx2_write_phy(bp, MII_BMCR, bmcr |
952 BMCR_ANENABLE);
953 }
954 }
955 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
956 bp->link_up = 0;
957 }
958
959 if (bp->link_up != link_up) {
960 bnx2_report_link(bp);
961 }
962
963 bnx2_set_mac_link(bp);
964
965 return 0;
966 }
967
968 static int
969 bnx2_reset_phy(struct bnx2 *bp)
970 {
971 int i;
972 u32 reg;
973
974 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
975
976 #define PHY_RESET_MAX_WAIT 100
977 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
978 udelay(10);
979
980 bnx2_read_phy(bp, MII_BMCR, &reg);
981 if (!(reg & BMCR_RESET)) {
982 udelay(20);
983 break;
984 }
985 }
986 if (i == PHY_RESET_MAX_WAIT) {
987 return -EBUSY;
988 }
989 return 0;
990 }
991
992 static u32
993 bnx2_phy_get_pause_adv(struct bnx2 *bp)
994 {
995 u32 adv = 0;
996
997 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
998 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
999
1000 if (bp->phy_flags & PHY_SERDES_FLAG) {
1001 adv = ADVERTISE_1000XPAUSE;
1002 }
1003 else {
1004 adv = ADVERTISE_PAUSE_CAP;
1005 }
1006 }
1007 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1008 if (bp->phy_flags & PHY_SERDES_FLAG) {
1009 adv = ADVERTISE_1000XPSE_ASYM;
1010 }
1011 else {
1012 adv = ADVERTISE_PAUSE_ASYM;
1013 }
1014 }
1015 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1016 if (bp->phy_flags & PHY_SERDES_FLAG) {
1017 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1018 }
1019 else {
1020 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1021 }
1022 }
1023 return adv;
1024 }
1025
1026 static int
1027 bnx2_setup_serdes_phy(struct bnx2 *bp)
1028 {
1029 u32 adv, bmcr, up1;
1030 u32 new_adv = 0;
1031
1032 if (!(bp->autoneg & AUTONEG_SPEED)) {
1033 u32 new_bmcr;
1034 int force_link_down = 0;
1035
1036 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1037 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1038
1039 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1040 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1041 new_bmcr |= BMCR_SPEED1000;
1042 if (bp->req_line_speed == SPEED_2500) {
1043 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 if (!(up1 & BCM5708S_UP1_2G5)) {
1046 up1 |= BCM5708S_UP1_2G5;
1047 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1048 force_link_down = 1;
1049 }
1050 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1051 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1052 if (up1 & BCM5708S_UP1_2G5) {
1053 up1 &= ~BCM5708S_UP1_2G5;
1054 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1055 force_link_down = 1;
1056 }
1057 }
1058
1059 if (bp->req_duplex == DUPLEX_FULL) {
1060 adv |= ADVERTISE_1000XFULL;
1061 new_bmcr |= BMCR_FULLDPLX;
1062 }
1063 else {
1064 adv |= ADVERTISE_1000XHALF;
1065 new_bmcr &= ~BMCR_FULLDPLX;
1066 }
1067 if ((new_bmcr != bmcr) || (force_link_down)) {
1068 /* Force a link down visible on the other side */
1069 if (bp->link_up) {
1070 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1071 ~(ADVERTISE_1000XFULL |
1072 ADVERTISE_1000XHALF));
1073 bnx2_write_phy(bp, MII_BMCR, bmcr |
1074 BMCR_ANRESTART | BMCR_ANENABLE);
1075
1076 bp->link_up = 0;
1077 netif_carrier_off(bp->dev);
1078 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1079 bnx2_report_link(bp);
1080 }
1081 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1082 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1083 }
1084 return 0;
1085 }
1086
1087 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1088 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1089 up1 |= BCM5708S_UP1_2G5;
1090 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1091 }
1092
1093 if (bp->advertising & ADVERTISED_1000baseT_Full)
1094 new_adv |= ADVERTISE_1000XFULL;
1095
1096 new_adv |= bnx2_phy_get_pause_adv(bp);
1097
1098 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1099 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1100
1101 bp->serdes_an_pending = 0;
1102 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1103 /* Force a link down visible on the other side */
1104 if (bp->link_up) {
1105 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1106 spin_unlock_bh(&bp->phy_lock);
1107 msleep(20);
1108 spin_lock_bh(&bp->phy_lock);
1109 }
1110
1111 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1112 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1113 BMCR_ANENABLE);
1114 /* Speed up link-up time when the link partner
1115 * does not autonegotiate which is very common
1116 * in blade servers. Some blade servers use
1117 * IPMI for kerboard input and it's important
1118 * to minimize link disruptions. Autoneg. involves
1119 * exchanging base pages plus 3 next pages and
1120 * normally completes in about 120 msec.
1121 */
1122 bp->current_interval = SERDES_AN_TIMEOUT;
1123 bp->serdes_an_pending = 1;
1124 mod_timer(&bp->timer, jiffies + bp->current_interval);
1125 }
1126
1127 return 0;
1128 }
1129
1130 #define ETHTOOL_ALL_FIBRE_SPEED \
1131 (ADVERTISED_1000baseT_Full)
1132
1133 #define ETHTOOL_ALL_COPPER_SPEED \
1134 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1135 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1136 ADVERTISED_1000baseT_Full)
1137
1138 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1139 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1140
1141 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1142
1143 static int
1144 bnx2_setup_copper_phy(struct bnx2 *bp)
1145 {
1146 u32 bmcr;
1147 u32 new_bmcr;
1148
1149 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1150
1151 if (bp->autoneg & AUTONEG_SPEED) {
1152 u32 adv_reg, adv1000_reg;
1153 u32 new_adv_reg = 0;
1154 u32 new_adv1000_reg = 0;
1155
1156 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1157 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1158 ADVERTISE_PAUSE_ASYM);
1159
1160 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1161 adv1000_reg &= PHY_ALL_1000_SPEED;
1162
1163 if (bp->advertising & ADVERTISED_10baseT_Half)
1164 new_adv_reg |= ADVERTISE_10HALF;
1165 if (bp->advertising & ADVERTISED_10baseT_Full)
1166 new_adv_reg |= ADVERTISE_10FULL;
1167 if (bp->advertising & ADVERTISED_100baseT_Half)
1168 new_adv_reg |= ADVERTISE_100HALF;
1169 if (bp->advertising & ADVERTISED_100baseT_Full)
1170 new_adv_reg |= ADVERTISE_100FULL;
1171 if (bp->advertising & ADVERTISED_1000baseT_Full)
1172 new_adv1000_reg |= ADVERTISE_1000FULL;
1173
1174 new_adv_reg |= ADVERTISE_CSMA;
1175
1176 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1177
1178 if ((adv1000_reg != new_adv1000_reg) ||
1179 (adv_reg != new_adv_reg) ||
1180 ((bmcr & BMCR_ANENABLE) == 0)) {
1181
1182 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1183 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1184 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1185 BMCR_ANENABLE);
1186 }
1187 else if (bp->link_up) {
1188 /* Flow ctrl may have changed from auto to forced */
1189 /* or vice-versa. */
1190
1191 bnx2_resolve_flow_ctrl(bp);
1192 bnx2_set_mac_link(bp);
1193 }
1194 return 0;
1195 }
1196
1197 new_bmcr = 0;
1198 if (bp->req_line_speed == SPEED_100) {
1199 new_bmcr |= BMCR_SPEED100;
1200 }
1201 if (bp->req_duplex == DUPLEX_FULL) {
1202 new_bmcr |= BMCR_FULLDPLX;
1203 }
1204 if (new_bmcr != bmcr) {
1205 u32 bmsr;
1206
1207 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1208 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1209
1210 if (bmsr & BMSR_LSTATUS) {
1211 /* Force link down */
1212 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1213 spin_unlock_bh(&bp->phy_lock);
1214 msleep(50);
1215 spin_lock_bh(&bp->phy_lock);
1216
1217 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1218 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1219 }
1220
1221 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1222
1223 /* Normally, the new speed is setup after the link has
1224 * gone down and up again. In some cases, link will not go
1225 * down so we need to set up the new speed here.
1226 */
1227 if (bmsr & BMSR_LSTATUS) {
1228 bp->line_speed = bp->req_line_speed;
1229 bp->duplex = bp->req_duplex;
1230 bnx2_resolve_flow_ctrl(bp);
1231 bnx2_set_mac_link(bp);
1232 }
1233 }
1234 return 0;
1235 }
1236
1237 static int
1238 bnx2_setup_phy(struct bnx2 *bp)
1239 {
1240 if (bp->loopback == MAC_LOOPBACK)
1241 return 0;
1242
1243 if (bp->phy_flags & PHY_SERDES_FLAG) {
1244 return (bnx2_setup_serdes_phy(bp));
1245 }
1246 else {
1247 return (bnx2_setup_copper_phy(bp));
1248 }
1249 }
1250
1251 static int
1252 bnx2_init_5708s_phy(struct bnx2 *bp)
1253 {
1254 u32 val;
1255
1256 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1257 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1258 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1259
1260 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1261 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1262 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1263
1264 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1265 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1266 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1267
1268 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1269 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1270 val |= BCM5708S_UP1_2G5;
1271 bnx2_write_phy(bp, BCM5708S_UP1, val);
1272 }
1273
1274 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1275 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1276 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1277 /* increase tx signal amplitude */
1278 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1279 BCM5708S_BLK_ADDR_TX_MISC);
1280 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1281 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1282 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1283 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1284 }
1285
1286 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1287 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1288
1289 if (val) {
1290 u32 is_backplane;
1291
1292 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1293 BNX2_SHARED_HW_CFG_CONFIG);
1294 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1295 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1296 BCM5708S_BLK_ADDR_TX_MISC);
1297 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1298 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1299 BCM5708S_BLK_ADDR_DIG);
1300 }
1301 }
1302 return 0;
1303 }
1304
1305 static int
1306 bnx2_init_5706s_phy(struct bnx2 *bp)
1307 {
1308 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1309
1310 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1311 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1312
1313 if (bp->dev->mtu > 1500) {
1314 u32 val;
1315
1316 /* Set extended packet length bit */
1317 bnx2_write_phy(bp, 0x18, 0x7);
1318 bnx2_read_phy(bp, 0x18, &val);
1319 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1320
1321 bnx2_write_phy(bp, 0x1c, 0x6c00);
1322 bnx2_read_phy(bp, 0x1c, &val);
1323 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1324 }
1325 else {
1326 u32 val;
1327
1328 bnx2_write_phy(bp, 0x18, 0x7);
1329 bnx2_read_phy(bp, 0x18, &val);
1330 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1331
1332 bnx2_write_phy(bp, 0x1c, 0x6c00);
1333 bnx2_read_phy(bp, 0x1c, &val);
1334 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1335 }
1336
1337 return 0;
1338 }
1339
1340 static int
1341 bnx2_init_copper_phy(struct bnx2 *bp)
1342 {
1343 u32 val;
1344
1345 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1346 bnx2_write_phy(bp, 0x18, 0x0c00);
1347 bnx2_write_phy(bp, 0x17, 0x000a);
1348 bnx2_write_phy(bp, 0x15, 0x310b);
1349 bnx2_write_phy(bp, 0x17, 0x201f);
1350 bnx2_write_phy(bp, 0x15, 0x9506);
1351 bnx2_write_phy(bp, 0x17, 0x401f);
1352 bnx2_write_phy(bp, 0x15, 0x14e2);
1353 bnx2_write_phy(bp, 0x18, 0x0400);
1354 }
1355
1356 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1357 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1358 MII_BNX2_DSP_EXPAND_REG | 0x8);
1359 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1360 val &= ~(1 << 8);
1361 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1362 }
1363
1364 if (bp->dev->mtu > 1500) {
1365 /* Set extended packet length bit */
1366 bnx2_write_phy(bp, 0x18, 0x7);
1367 bnx2_read_phy(bp, 0x18, &val);
1368 bnx2_write_phy(bp, 0x18, val | 0x4000);
1369
1370 bnx2_read_phy(bp, 0x10, &val);
1371 bnx2_write_phy(bp, 0x10, val | 0x1);
1372 }
1373 else {
1374 bnx2_write_phy(bp, 0x18, 0x7);
1375 bnx2_read_phy(bp, 0x18, &val);
1376 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1377
1378 bnx2_read_phy(bp, 0x10, &val);
1379 bnx2_write_phy(bp, 0x10, val & ~0x1);
1380 }
1381
1382 /* ethernet@wirespeed */
1383 bnx2_write_phy(bp, 0x18, 0x7007);
1384 bnx2_read_phy(bp, 0x18, &val);
1385 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1386 return 0;
1387 }
1388
1389
1390 static int
1391 bnx2_init_phy(struct bnx2 *bp)
1392 {
1393 u32 val;
1394 int rc = 0;
1395
1396 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1397 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1398
1399 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1400
1401 bnx2_reset_phy(bp);
1402
1403 bnx2_read_phy(bp, MII_PHYSID1, &val);
1404 bp->phy_id = val << 16;
1405 bnx2_read_phy(bp, MII_PHYSID2, &val);
1406 bp->phy_id |= val & 0xffff;
1407
1408 if (bp->phy_flags & PHY_SERDES_FLAG) {
1409 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1410 rc = bnx2_init_5706s_phy(bp);
1411 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1412 rc = bnx2_init_5708s_phy(bp);
1413 }
1414 else {
1415 rc = bnx2_init_copper_phy(bp);
1416 }
1417
1418 bnx2_setup_phy(bp);
1419
1420 return rc;
1421 }
1422
1423 static int
1424 bnx2_set_mac_loopback(struct bnx2 *bp)
1425 {
1426 u32 mac_mode;
1427
1428 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1429 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1430 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1431 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1432 bp->link_up = 1;
1433 return 0;
1434 }
1435
1436 static int bnx2_test_link(struct bnx2 *);
1437
1438 static int
1439 bnx2_set_phy_loopback(struct bnx2 *bp)
1440 {
1441 u32 mac_mode;
1442 int rc, i;
1443
1444 spin_lock_bh(&bp->phy_lock);
1445 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1446 BMCR_SPEED1000);
1447 spin_unlock_bh(&bp->phy_lock);
1448 if (rc)
1449 return rc;
1450
1451 for (i = 0; i < 10; i++) {
1452 if (bnx2_test_link(bp) == 0)
1453 break;
1454 msleep(100);
1455 }
1456
1457 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1458 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1459 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1460 BNX2_EMAC_MODE_25G_MODE);
1461
1462 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1463 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1464 bp->link_up = 1;
1465 return 0;
1466 }
1467
1468 static int
1469 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1470 {
1471 int i;
1472 u32 val;
1473
1474 bp->fw_wr_seq++;
1475 msg_data |= bp->fw_wr_seq;
1476
1477 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1478
1479 /* wait for an acknowledgement. */
1480 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1481 msleep(10);
1482
1483 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1484
1485 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1486 break;
1487 }
1488 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1489 return 0;
1490
1491 /* If we timed out, inform the firmware that this is the case. */
1492 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1493 if (!silent)
1494 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1495 "%x\n", msg_data);
1496
1497 msg_data &= ~BNX2_DRV_MSG_CODE;
1498 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1499
1500 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1501
1502 return -EBUSY;
1503 }
1504
1505 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1506 return -EIO;
1507
1508 return 0;
1509 }
1510
1511 static int
1512 bnx2_init_5709_context(struct bnx2 *bp)
1513 {
1514 int i, ret = 0;
1515 u32 val;
1516
1517 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1518 val |= (BCM_PAGE_BITS - 8) << 16;
1519 REG_WR(bp, BNX2_CTX_COMMAND, val);
1520 for (i = 0; i < bp->ctx_pages; i++) {
1521 int j;
1522
1523 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1524 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1525 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1526 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1527 (u64) bp->ctx_blk_mapping[i] >> 32);
1528 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1529 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1530 for (j = 0; j < 10; j++) {
1531
1532 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1533 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1534 break;
1535 udelay(5);
1536 }
1537 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1538 ret = -EBUSY;
1539 break;
1540 }
1541 }
1542 return ret;
1543 }
1544
1545 static void
1546 bnx2_init_context(struct bnx2 *bp)
1547 {
1548 u32 vcid;
1549
1550 vcid = 96;
1551 while (vcid) {
1552 u32 vcid_addr, pcid_addr, offset;
1553
1554 vcid--;
1555
1556 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1557 u32 new_vcid;
1558
1559 vcid_addr = GET_PCID_ADDR(vcid);
1560 if (vcid & 0x8) {
1561 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1562 }
1563 else {
1564 new_vcid = vcid;
1565 }
1566 pcid_addr = GET_PCID_ADDR(new_vcid);
1567 }
1568 else {
1569 vcid_addr = GET_CID_ADDR(vcid);
1570 pcid_addr = vcid_addr;
1571 }
1572
1573 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1574 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1575
1576 /* Zero out the context. */
1577 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1578 CTX_WR(bp, 0x00, offset, 0);
1579 }
1580
1581 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1582 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1583 }
1584 }
1585
1586 static int
1587 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1588 {
1589 u16 *good_mbuf;
1590 u32 good_mbuf_cnt;
1591 u32 val;
1592
1593 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1594 if (good_mbuf == NULL) {
1595 printk(KERN_ERR PFX "Failed to allocate memory in "
1596 "bnx2_alloc_bad_rbuf\n");
1597 return -ENOMEM;
1598 }
1599
1600 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1601 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1602
1603 good_mbuf_cnt = 0;
1604
1605 /* Allocate a bunch of mbufs and save the good ones in an array. */
1606 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1607 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1608 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1609
1610 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1611
1612 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1613
1614 /* The addresses with Bit 9 set are bad memory blocks. */
1615 if (!(val & (1 << 9))) {
1616 good_mbuf[good_mbuf_cnt] = (u16) val;
1617 good_mbuf_cnt++;
1618 }
1619
1620 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1621 }
1622
1623 /* Free the good ones back to the mbuf pool thus discarding
1624 * all the bad ones. */
1625 while (good_mbuf_cnt) {
1626 good_mbuf_cnt--;
1627
1628 val = good_mbuf[good_mbuf_cnt];
1629 val = (val << 9) | val | 1;
1630
1631 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1632 }
1633 kfree(good_mbuf);
1634 return 0;
1635 }
1636
1637 static void
1638 bnx2_set_mac_addr(struct bnx2 *bp)
1639 {
1640 u32 val;
1641 u8 *mac_addr = bp->dev->dev_addr;
1642
1643 val = (mac_addr[0] << 8) | mac_addr[1];
1644
1645 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1646
1647 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1648 (mac_addr[4] << 8) | mac_addr[5];
1649
1650 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1651 }
1652
1653 static inline int
1654 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1655 {
1656 struct sk_buff *skb;
1657 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1658 dma_addr_t mapping;
1659 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1660 unsigned long align;
1661
1662 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1663 if (skb == NULL) {
1664 return -ENOMEM;
1665 }
1666
1667 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1668 skb_reserve(skb, BNX2_RX_ALIGN - align);
1669
1670 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1671 PCI_DMA_FROMDEVICE);
1672
1673 rx_buf->skb = skb;
1674 pci_unmap_addr_set(rx_buf, mapping, mapping);
1675
1676 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1677 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1678
1679 bp->rx_prod_bseq += bp->rx_buf_use_size;
1680
1681 return 0;
1682 }
1683
1684 static void
1685 bnx2_phy_int(struct bnx2 *bp)
1686 {
1687 u32 new_link_state, old_link_state;
1688
1689 new_link_state = bp->status_blk->status_attn_bits &
1690 STATUS_ATTN_BITS_LINK_STATE;
1691 old_link_state = bp->status_blk->status_attn_bits_ack &
1692 STATUS_ATTN_BITS_LINK_STATE;
1693 if (new_link_state != old_link_state) {
1694 if (new_link_state) {
1695 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1696 STATUS_ATTN_BITS_LINK_STATE);
1697 }
1698 else {
1699 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1700 STATUS_ATTN_BITS_LINK_STATE);
1701 }
1702 bnx2_set_link(bp);
1703 }
1704 }
1705
1706 static void
1707 bnx2_tx_int(struct bnx2 *bp)
1708 {
1709 struct status_block *sblk = bp->status_blk;
1710 u16 hw_cons, sw_cons, sw_ring_cons;
1711 int tx_free_bd = 0;
1712
1713 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1714 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1715 hw_cons++;
1716 }
1717 sw_cons = bp->tx_cons;
1718
1719 while (sw_cons != hw_cons) {
1720 struct sw_bd *tx_buf;
1721 struct sk_buff *skb;
1722 int i, last;
1723
1724 sw_ring_cons = TX_RING_IDX(sw_cons);
1725
1726 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1727 skb = tx_buf->skb;
1728
1729 /* partial BD completions possible with TSO packets */
1730 if (skb_is_gso(skb)) {
1731 u16 last_idx, last_ring_idx;
1732
1733 last_idx = sw_cons +
1734 skb_shinfo(skb)->nr_frags + 1;
1735 last_ring_idx = sw_ring_cons +
1736 skb_shinfo(skb)->nr_frags + 1;
1737 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1738 last_idx++;
1739 }
1740 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1741 break;
1742 }
1743 }
1744
1745 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1746 skb_headlen(skb), PCI_DMA_TODEVICE);
1747
1748 tx_buf->skb = NULL;
1749 last = skb_shinfo(skb)->nr_frags;
1750
1751 for (i = 0; i < last; i++) {
1752 sw_cons = NEXT_TX_BD(sw_cons);
1753
1754 pci_unmap_page(bp->pdev,
1755 pci_unmap_addr(
1756 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1757 mapping),
1758 skb_shinfo(skb)->frags[i].size,
1759 PCI_DMA_TODEVICE);
1760 }
1761
1762 sw_cons = NEXT_TX_BD(sw_cons);
1763
1764 tx_free_bd += last + 1;
1765
1766 dev_kfree_skb(skb);
1767
1768 hw_cons = bp->hw_tx_cons =
1769 sblk->status_tx_quick_consumer_index0;
1770
1771 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1772 hw_cons++;
1773 }
1774 }
1775
1776 bp->tx_cons = sw_cons;
1777 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1778 * before checking for netif_queue_stopped(). Without the
1779 * memory barrier, there is a small possibility that bnx2_start_xmit()
1780 * will miss it and cause the queue to be stopped forever.
1781 */
1782 smp_mb();
1783
1784 if (unlikely(netif_queue_stopped(bp->dev)) &&
1785 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1786 netif_tx_lock(bp->dev);
1787 if ((netif_queue_stopped(bp->dev)) &&
1788 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1789 netif_wake_queue(bp->dev);
1790 netif_tx_unlock(bp->dev);
1791 }
1792 }
1793
1794 static inline void
1795 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1796 u16 cons, u16 prod)
1797 {
1798 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1799 struct rx_bd *cons_bd, *prod_bd;
1800
1801 cons_rx_buf = &bp->rx_buf_ring[cons];
1802 prod_rx_buf = &bp->rx_buf_ring[prod];
1803
1804 pci_dma_sync_single_for_device(bp->pdev,
1805 pci_unmap_addr(cons_rx_buf, mapping),
1806 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1807
1808 bp->rx_prod_bseq += bp->rx_buf_use_size;
1809
1810 prod_rx_buf->skb = skb;
1811
1812 if (cons == prod)
1813 return;
1814
1815 pci_unmap_addr_set(prod_rx_buf, mapping,
1816 pci_unmap_addr(cons_rx_buf, mapping));
1817
1818 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1819 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1820 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1821 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1822 }
1823
1824 static int
1825 bnx2_rx_int(struct bnx2 *bp, int budget)
1826 {
1827 struct status_block *sblk = bp->status_blk;
1828 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1829 struct l2_fhdr *rx_hdr;
1830 int rx_pkt = 0;
1831
1832 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1833 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1834 hw_cons++;
1835 }
1836 sw_cons = bp->rx_cons;
1837 sw_prod = bp->rx_prod;
1838
1839 /* Memory barrier necessary as speculative reads of the rx
1840 * buffer can be ahead of the index in the status block
1841 */
1842 rmb();
1843 while (sw_cons != hw_cons) {
1844 unsigned int len;
1845 u32 status;
1846 struct sw_bd *rx_buf;
1847 struct sk_buff *skb;
1848 dma_addr_t dma_addr;
1849
1850 sw_ring_cons = RX_RING_IDX(sw_cons);
1851 sw_ring_prod = RX_RING_IDX(sw_prod);
1852
1853 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1854 skb = rx_buf->skb;
1855
1856 rx_buf->skb = NULL;
1857
1858 dma_addr = pci_unmap_addr(rx_buf, mapping);
1859
1860 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1861 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1862
1863 rx_hdr = (struct l2_fhdr *) skb->data;
1864 len = rx_hdr->l2_fhdr_pkt_len - 4;
1865
1866 if ((status = rx_hdr->l2_fhdr_status) &
1867 (L2_FHDR_ERRORS_BAD_CRC |
1868 L2_FHDR_ERRORS_PHY_DECODE |
1869 L2_FHDR_ERRORS_ALIGNMENT |
1870 L2_FHDR_ERRORS_TOO_SHORT |
1871 L2_FHDR_ERRORS_GIANT_FRAME)) {
1872
1873 goto reuse_rx;
1874 }
1875
1876 /* Since we don't have a jumbo ring, copy small packets
1877 * if mtu > 1500
1878 */
1879 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1880 struct sk_buff *new_skb;
1881
1882 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1883 if (new_skb == NULL)
1884 goto reuse_rx;
1885
1886 /* aligned copy */
1887 memcpy(new_skb->data,
1888 skb->data + bp->rx_offset - 2,
1889 len + 2);
1890
1891 skb_reserve(new_skb, 2);
1892 skb_put(new_skb, len);
1893
1894 bnx2_reuse_rx_skb(bp, skb,
1895 sw_ring_cons, sw_ring_prod);
1896
1897 skb = new_skb;
1898 }
1899 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1900 pci_unmap_single(bp->pdev, dma_addr,
1901 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1902
1903 skb_reserve(skb, bp->rx_offset);
1904 skb_put(skb, len);
1905 }
1906 else {
1907 reuse_rx:
1908 bnx2_reuse_rx_skb(bp, skb,
1909 sw_ring_cons, sw_ring_prod);
1910 goto next_rx;
1911 }
1912
1913 skb->protocol = eth_type_trans(skb, bp->dev);
1914
1915 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1916 (ntohs(skb->protocol) != 0x8100)) {
1917
1918 dev_kfree_skb(skb);
1919 goto next_rx;
1920
1921 }
1922
1923 skb->ip_summed = CHECKSUM_NONE;
1924 if (bp->rx_csum &&
1925 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1926 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1927
1928 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1929 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1930 skb->ip_summed = CHECKSUM_UNNECESSARY;
1931 }
1932
1933 #ifdef BCM_VLAN
1934 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1935 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1936 rx_hdr->l2_fhdr_vlan_tag);
1937 }
1938 else
1939 #endif
1940 netif_receive_skb(skb);
1941
1942 bp->dev->last_rx = jiffies;
1943 rx_pkt++;
1944
1945 next_rx:
1946 sw_cons = NEXT_RX_BD(sw_cons);
1947 sw_prod = NEXT_RX_BD(sw_prod);
1948
1949 if ((rx_pkt == budget))
1950 break;
1951
1952 /* Refresh hw_cons to see if there is new work */
1953 if (sw_cons == hw_cons) {
1954 hw_cons = bp->hw_rx_cons =
1955 sblk->status_rx_quick_consumer_index0;
1956 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1957 hw_cons++;
1958 rmb();
1959 }
1960 }
1961 bp->rx_cons = sw_cons;
1962 bp->rx_prod = sw_prod;
1963
1964 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1965
1966 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1967
1968 mmiowb();
1969
1970 return rx_pkt;
1971
1972 }
1973
1974 /* MSI ISR - The only difference between this and the INTx ISR
1975 * is that the MSI interrupt is always serviced.
1976 */
1977 static irqreturn_t
1978 bnx2_msi(int irq, void *dev_instance)
1979 {
1980 struct net_device *dev = dev_instance;
1981 struct bnx2 *bp = netdev_priv(dev);
1982
1983 prefetch(bp->status_blk);
1984 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1985 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1986 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1987
1988 /* Return here if interrupt is disabled. */
1989 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1990 return IRQ_HANDLED;
1991
1992 netif_rx_schedule(dev);
1993
1994 return IRQ_HANDLED;
1995 }
1996
1997 static irqreturn_t
1998 bnx2_interrupt(int irq, void *dev_instance)
1999 {
2000 struct net_device *dev = dev_instance;
2001 struct bnx2 *bp = netdev_priv(dev);
2002
2003 /* When using INTx, it is possible for the interrupt to arrive
2004 * at the CPU before the status block posted prior to the
2005 * interrupt. Reading a register will flush the status block.
2006 * When using MSI, the MSI message will always complete after
2007 * the status block write.
2008 */
2009 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2010 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2011 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2012 return IRQ_NONE;
2013
2014 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2015 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2016 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2017
2018 /* Return here if interrupt is shared and is disabled. */
2019 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2020 return IRQ_HANDLED;
2021
2022 netif_rx_schedule(dev);
2023
2024 return IRQ_HANDLED;
2025 }
2026
2027 static inline int
2028 bnx2_has_work(struct bnx2 *bp)
2029 {
2030 struct status_block *sblk = bp->status_blk;
2031
2032 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2033 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2034 return 1;
2035
2036 if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2037 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
2038 return 1;
2039
2040 return 0;
2041 }
2042
2043 static int
2044 bnx2_poll(struct net_device *dev, int *budget)
2045 {
2046 struct bnx2 *bp = netdev_priv(dev);
2047
2048 if ((bp->status_blk->status_attn_bits &
2049 STATUS_ATTN_BITS_LINK_STATE) !=
2050 (bp->status_blk->status_attn_bits_ack &
2051 STATUS_ATTN_BITS_LINK_STATE)) {
2052
2053 spin_lock(&bp->phy_lock);
2054 bnx2_phy_int(bp);
2055 spin_unlock(&bp->phy_lock);
2056
2057 /* This is needed to take care of transient status
2058 * during link changes.
2059 */
2060 REG_WR(bp, BNX2_HC_COMMAND,
2061 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2062 REG_RD(bp, BNX2_HC_COMMAND);
2063 }
2064
2065 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2066 bnx2_tx_int(bp);
2067
2068 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2069 int orig_budget = *budget;
2070 int work_done;
2071
2072 if (orig_budget > dev->quota)
2073 orig_budget = dev->quota;
2074
2075 work_done = bnx2_rx_int(bp, orig_budget);
2076 *budget -= work_done;
2077 dev->quota -= work_done;
2078 }
2079
2080 bp->last_status_idx = bp->status_blk->status_idx;
2081 rmb();
2082
2083 if (!bnx2_has_work(bp)) {
2084 netif_rx_complete(dev);
2085 if (likely(bp->flags & USING_MSI_FLAG)) {
2086 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2087 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2088 bp->last_status_idx);
2089 return 0;
2090 }
2091 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2092 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2093 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2094 bp->last_status_idx);
2095
2096 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2097 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2098 bp->last_status_idx);
2099 return 0;
2100 }
2101
2102 return 1;
2103 }
2104
2105 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2106 * from set_multicast.
2107 */
2108 static void
2109 bnx2_set_rx_mode(struct net_device *dev)
2110 {
2111 struct bnx2 *bp = netdev_priv(dev);
2112 u32 rx_mode, sort_mode;
2113 int i;
2114
2115 spin_lock_bh(&bp->phy_lock);
2116
2117 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2118 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2119 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2120 #ifdef BCM_VLAN
2121 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2122 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2123 #else
2124 if (!(bp->flags & ASF_ENABLE_FLAG))
2125 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2126 #endif
2127 if (dev->flags & IFF_PROMISC) {
2128 /* Promiscuous mode. */
2129 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2130 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2131 BNX2_RPM_SORT_USER0_PROM_VLAN;
2132 }
2133 else if (dev->flags & IFF_ALLMULTI) {
2134 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2135 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2136 0xffffffff);
2137 }
2138 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2139 }
2140 else {
2141 /* Accept one or more multicast(s). */
2142 struct dev_mc_list *mclist;
2143 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2144 u32 regidx;
2145 u32 bit;
2146 u32 crc;
2147
2148 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2149
2150 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2151 i++, mclist = mclist->next) {
2152
2153 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2154 bit = crc & 0xff;
2155 regidx = (bit & 0xe0) >> 5;
2156 bit &= 0x1f;
2157 mc_filter[regidx] |= (1 << bit);
2158 }
2159
2160 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2161 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2162 mc_filter[i]);
2163 }
2164
2165 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2166 }
2167
2168 if (rx_mode != bp->rx_mode) {
2169 bp->rx_mode = rx_mode;
2170 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2171 }
2172
2173 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2174 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2175 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2176
2177 spin_unlock_bh(&bp->phy_lock);
2178 }
2179
2180 #define FW_BUF_SIZE 0x8000
2181
2182 static int
2183 bnx2_gunzip_init(struct bnx2 *bp)
2184 {
2185 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2186 goto gunzip_nomem1;
2187
2188 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2189 goto gunzip_nomem2;
2190
2191 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2192 if (bp->strm->workspace == NULL)
2193 goto gunzip_nomem3;
2194
2195 return 0;
2196
2197 gunzip_nomem3:
2198 kfree(bp->strm);
2199 bp->strm = NULL;
2200
2201 gunzip_nomem2:
2202 vfree(bp->gunzip_buf);
2203 bp->gunzip_buf = NULL;
2204
2205 gunzip_nomem1:
2206 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2207 "uncompression.\n", bp->dev->name);
2208 return -ENOMEM;
2209 }
2210
2211 static void
2212 bnx2_gunzip_end(struct bnx2 *bp)
2213 {
2214 kfree(bp->strm->workspace);
2215
2216 kfree(bp->strm);
2217 bp->strm = NULL;
2218
2219 if (bp->gunzip_buf) {
2220 vfree(bp->gunzip_buf);
2221 bp->gunzip_buf = NULL;
2222 }
2223 }
2224
2225 static int
2226 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2227 {
2228 int n, rc;
2229
2230 /* check gzip header */
2231 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2232 return -EINVAL;
2233
2234 n = 10;
2235
2236 #define FNAME 0x8
2237 if (zbuf[3] & FNAME)
2238 while ((zbuf[n++] != 0) && (n < len));
2239
2240 bp->strm->next_in = zbuf + n;
2241 bp->strm->avail_in = len - n;
2242 bp->strm->next_out = bp->gunzip_buf;
2243 bp->strm->avail_out = FW_BUF_SIZE;
2244
2245 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2246 if (rc != Z_OK)
2247 return rc;
2248
2249 rc = zlib_inflate(bp->strm, Z_FINISH);
2250
2251 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2252 *outbuf = bp->gunzip_buf;
2253
2254 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2255 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2256 bp->dev->name, bp->strm->msg);
2257
2258 zlib_inflateEnd(bp->strm);
2259
2260 if (rc == Z_STREAM_END)
2261 return 0;
2262
2263 return rc;
2264 }
2265
2266 static void
2267 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2268 u32 rv2p_proc)
2269 {
2270 int i;
2271 u32 val;
2272
2273
2274 for (i = 0; i < rv2p_code_len; i += 8) {
2275 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2276 rv2p_code++;
2277 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2278 rv2p_code++;
2279
2280 if (rv2p_proc == RV2P_PROC1) {
2281 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2282 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2283 }
2284 else {
2285 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2286 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2287 }
2288 }
2289
2290 /* Reset the processor, un-stall is done later. */
2291 if (rv2p_proc == RV2P_PROC1) {
2292 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2293 }
2294 else {
2295 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2296 }
2297 }
2298
2299 static int
2300 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2301 {
2302 u32 offset;
2303 u32 val;
2304 int rc;
2305
2306 /* Halt the CPU. */
2307 val = REG_RD_IND(bp, cpu_reg->mode);
2308 val |= cpu_reg->mode_value_halt;
2309 REG_WR_IND(bp, cpu_reg->mode, val);
2310 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2311
2312 /* Load the Text area. */
2313 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2314 if (fw->gz_text) {
2315 u32 text_len;
2316 void *text;
2317
2318 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2319 &text_len);
2320 if (rc)
2321 return rc;
2322
2323 fw->text = text;
2324 }
2325 if (fw->gz_text) {
2326 int j;
2327
2328 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2329 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2330 }
2331 }
2332
2333 /* Load the Data area. */
2334 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2335 if (fw->data) {
2336 int j;
2337
2338 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2339 REG_WR_IND(bp, offset, fw->data[j]);
2340 }
2341 }
2342
2343 /* Load the SBSS area. */
2344 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2345 if (fw->sbss) {
2346 int j;
2347
2348 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2349 REG_WR_IND(bp, offset, fw->sbss[j]);
2350 }
2351 }
2352
2353 /* Load the BSS area. */
2354 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2355 if (fw->bss) {
2356 int j;
2357
2358 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2359 REG_WR_IND(bp, offset, fw->bss[j]);
2360 }
2361 }
2362
2363 /* Load the Read-Only area. */
2364 offset = cpu_reg->spad_base +
2365 (fw->rodata_addr - cpu_reg->mips_view_base);
2366 if (fw->rodata) {
2367 int j;
2368
2369 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2370 REG_WR_IND(bp, offset, fw->rodata[j]);
2371 }
2372 }
2373
2374 /* Clear the pre-fetch instruction. */
2375 REG_WR_IND(bp, cpu_reg->inst, 0);
2376 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2377
2378 /* Start the CPU. */
2379 val = REG_RD_IND(bp, cpu_reg->mode);
2380 val &= ~cpu_reg->mode_value_halt;
2381 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2382 REG_WR_IND(bp, cpu_reg->mode, val);
2383
2384 return 0;
2385 }
2386
2387 static int
2388 bnx2_init_cpus(struct bnx2 *bp)
2389 {
2390 struct cpu_reg cpu_reg;
2391 struct fw_info *fw;
2392 int rc = 0;
2393 void *text;
2394 u32 text_len;
2395
2396 if ((rc = bnx2_gunzip_init(bp)) != 0)
2397 return rc;
2398
2399 /* Initialize the RV2P processor. */
2400 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2401 &text_len);
2402 if (rc)
2403 goto init_cpu_err;
2404
2405 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2406
2407 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2408 &text_len);
2409 if (rc)
2410 goto init_cpu_err;
2411
2412 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2413
2414 /* Initialize the RX Processor. */
2415 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2416 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2417 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2418 cpu_reg.state = BNX2_RXP_CPU_STATE;
2419 cpu_reg.state_value_clear = 0xffffff;
2420 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2421 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2422 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2423 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2424 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2425 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2426 cpu_reg.mips_view_base = 0x8000000;
2427
2428 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2429 fw = &bnx2_rxp_fw_09;
2430 else
2431 fw = &bnx2_rxp_fw_06;
2432
2433 rc = load_cpu_fw(bp, &cpu_reg, fw);
2434 if (rc)
2435 goto init_cpu_err;
2436
2437 /* Initialize the TX Processor. */
2438 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2439 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2440 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2441 cpu_reg.state = BNX2_TXP_CPU_STATE;
2442 cpu_reg.state_value_clear = 0xffffff;
2443 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2444 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2445 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2446 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2447 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2448 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2449 cpu_reg.mips_view_base = 0x8000000;
2450
2451 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2452 fw = &bnx2_txp_fw_09;
2453 else
2454 fw = &bnx2_txp_fw_06;
2455
2456 rc = load_cpu_fw(bp, &cpu_reg, fw);
2457 if (rc)
2458 goto init_cpu_err;
2459
2460 /* Initialize the TX Patch-up Processor. */
2461 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2462 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2463 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2464 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2465 cpu_reg.state_value_clear = 0xffffff;
2466 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2467 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2468 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2469 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2470 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2471 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2472 cpu_reg.mips_view_base = 0x8000000;
2473
2474 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2475 fw = &bnx2_tpat_fw_09;
2476 else
2477 fw = &bnx2_tpat_fw_06;
2478
2479 rc = load_cpu_fw(bp, &cpu_reg, fw);
2480 if (rc)
2481 goto init_cpu_err;
2482
2483 /* Initialize the Completion Processor. */
2484 cpu_reg.mode = BNX2_COM_CPU_MODE;
2485 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2486 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2487 cpu_reg.state = BNX2_COM_CPU_STATE;
2488 cpu_reg.state_value_clear = 0xffffff;
2489 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2490 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2491 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2492 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2493 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2494 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2495 cpu_reg.mips_view_base = 0x8000000;
2496
2497 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2498 fw = &bnx2_com_fw_09;
2499 else
2500 fw = &bnx2_com_fw_06;
2501
2502 rc = load_cpu_fw(bp, &cpu_reg, fw);
2503 if (rc)
2504 goto init_cpu_err;
2505
2506 /* Initialize the Command Processor. */
2507 cpu_reg.mode = BNX2_CP_CPU_MODE;
2508 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2509 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2510 cpu_reg.state = BNX2_CP_CPU_STATE;
2511 cpu_reg.state_value_clear = 0xffffff;
2512 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2513 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2514 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2515 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2516 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2517 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2518 cpu_reg.mips_view_base = 0x8000000;
2519
2520 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2521 fw = &bnx2_cp_fw_09;
2522
2523 rc = load_cpu_fw(bp, &cpu_reg, fw);
2524 if (rc)
2525 goto init_cpu_err;
2526 }
2527 init_cpu_err:
2528 bnx2_gunzip_end(bp);
2529 return rc;
2530 }
2531
2532 static int
2533 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2534 {
2535 u16 pmcsr;
2536
2537 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2538
2539 switch (state) {
2540 case PCI_D0: {
2541 u32 val;
2542
2543 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2544 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2545 PCI_PM_CTRL_PME_STATUS);
2546
2547 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2548 /* delay required during transition out of D3hot */
2549 msleep(20);
2550
2551 val = REG_RD(bp, BNX2_EMAC_MODE);
2552 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2553 val &= ~BNX2_EMAC_MODE_MPKT;
2554 REG_WR(bp, BNX2_EMAC_MODE, val);
2555
2556 val = REG_RD(bp, BNX2_RPM_CONFIG);
2557 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2558 REG_WR(bp, BNX2_RPM_CONFIG, val);
2559 break;
2560 }
2561 case PCI_D3hot: {
2562 int i;
2563 u32 val, wol_msg;
2564
2565 if (bp->wol) {
2566 u32 advertising;
2567 u8 autoneg;
2568
2569 autoneg = bp->autoneg;
2570 advertising = bp->advertising;
2571
2572 bp->autoneg = AUTONEG_SPEED;
2573 bp->advertising = ADVERTISED_10baseT_Half |
2574 ADVERTISED_10baseT_Full |
2575 ADVERTISED_100baseT_Half |
2576 ADVERTISED_100baseT_Full |
2577 ADVERTISED_Autoneg;
2578
2579 bnx2_setup_copper_phy(bp);
2580
2581 bp->autoneg = autoneg;
2582 bp->advertising = advertising;
2583
2584 bnx2_set_mac_addr(bp);
2585
2586 val = REG_RD(bp, BNX2_EMAC_MODE);
2587
2588 /* Enable port mode. */
2589 val &= ~BNX2_EMAC_MODE_PORT;
2590 val |= BNX2_EMAC_MODE_PORT_MII |
2591 BNX2_EMAC_MODE_MPKT_RCVD |
2592 BNX2_EMAC_MODE_ACPI_RCVD |
2593 BNX2_EMAC_MODE_MPKT;
2594
2595 REG_WR(bp, BNX2_EMAC_MODE, val);
2596
2597 /* receive all multicast */
2598 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2599 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2600 0xffffffff);
2601 }
2602 REG_WR(bp, BNX2_EMAC_RX_MODE,
2603 BNX2_EMAC_RX_MODE_SORT_MODE);
2604
2605 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2606 BNX2_RPM_SORT_USER0_MC_EN;
2607 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2608 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2609 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2610 BNX2_RPM_SORT_USER0_ENA);
2611
2612 /* Need to enable EMAC and RPM for WOL. */
2613 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2614 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2615 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2616 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2617
2618 val = REG_RD(bp, BNX2_RPM_CONFIG);
2619 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2620 REG_WR(bp, BNX2_RPM_CONFIG, val);
2621
2622 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2623 }
2624 else {
2625 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2626 }
2627
2628 if (!(bp->flags & NO_WOL_FLAG))
2629 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2630
2631 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2632 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2633 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2634
2635 if (bp->wol)
2636 pmcsr |= 3;
2637 }
2638 else {
2639 pmcsr |= 3;
2640 }
2641 if (bp->wol) {
2642 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2643 }
2644 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2645 pmcsr);
2646
2647 /* No more memory access after this point until
2648 * device is brought back to D0.
2649 */
2650 udelay(50);
2651 break;
2652 }
2653 default:
2654 return -EINVAL;
2655 }
2656 return 0;
2657 }
2658
2659 static int
2660 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2661 {
2662 u32 val;
2663 int j;
2664
2665 /* Request access to the flash interface. */
2666 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2667 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2668 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2669 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2670 break;
2671
2672 udelay(5);
2673 }
2674
2675 if (j >= NVRAM_TIMEOUT_COUNT)
2676 return -EBUSY;
2677
2678 return 0;
2679 }
2680
2681 static int
2682 bnx2_release_nvram_lock(struct bnx2 *bp)
2683 {
2684 int j;
2685 u32 val;
2686
2687 /* Relinquish nvram interface. */
2688 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2689
2690 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2691 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2692 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2693 break;
2694
2695 udelay(5);
2696 }
2697
2698 if (j >= NVRAM_TIMEOUT_COUNT)
2699 return -EBUSY;
2700
2701 return 0;
2702 }
2703
2704
2705 static int
2706 bnx2_enable_nvram_write(struct bnx2 *bp)
2707 {
2708 u32 val;
2709
2710 val = REG_RD(bp, BNX2_MISC_CFG);
2711 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2712
2713 if (!bp->flash_info->buffered) {
2714 int j;
2715
2716 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2717 REG_WR(bp, BNX2_NVM_COMMAND,
2718 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2719
2720 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2721 udelay(5);
2722
2723 val = REG_RD(bp, BNX2_NVM_COMMAND);
2724 if (val & BNX2_NVM_COMMAND_DONE)
2725 break;
2726 }
2727
2728 if (j >= NVRAM_TIMEOUT_COUNT)
2729 return -EBUSY;
2730 }
2731 return 0;
2732 }
2733
2734 static void
2735 bnx2_disable_nvram_write(struct bnx2 *bp)
2736 {
2737 u32 val;
2738
2739 val = REG_RD(bp, BNX2_MISC_CFG);
2740 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2741 }
2742
2743
2744 static void
2745 bnx2_enable_nvram_access(struct bnx2 *bp)
2746 {
2747 u32 val;
2748
2749 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2750 /* Enable both bits, even on read. */
2751 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2752 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2753 }
2754
2755 static void
2756 bnx2_disable_nvram_access(struct bnx2 *bp)
2757 {
2758 u32 val;
2759
2760 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2761 /* Disable both bits, even after read. */
2762 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2763 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2764 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2765 }
2766
2767 static int
2768 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2769 {
2770 u32 cmd;
2771 int j;
2772
2773 if (bp->flash_info->buffered)
2774 /* Buffered flash, no erase needed */
2775 return 0;
2776
2777 /* Build an erase command */
2778 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2779 BNX2_NVM_COMMAND_DOIT;
2780
2781 /* Need to clear DONE bit separately. */
2782 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2783
2784 /* Address of the NVRAM to read from. */
2785 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2786
2787 /* Issue an erase command. */
2788 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2789
2790 /* Wait for completion. */
2791 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2792 u32 val;
2793
2794 udelay(5);
2795
2796 val = REG_RD(bp, BNX2_NVM_COMMAND);
2797 if (val & BNX2_NVM_COMMAND_DONE)
2798 break;
2799 }
2800
2801 if (j >= NVRAM_TIMEOUT_COUNT)
2802 return -EBUSY;
2803
2804 return 0;
2805 }
2806
2807 static int
2808 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2809 {
2810 u32 cmd;
2811 int j;
2812
2813 /* Build the command word. */
2814 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2815
2816 /* Calculate an offset of a buffered flash. */
2817 if (bp->flash_info->buffered) {
2818 offset = ((offset / bp->flash_info->page_size) <<
2819 bp->flash_info->page_bits) +
2820 (offset % bp->flash_info->page_size);
2821 }
2822
2823 /* Need to clear DONE bit separately. */
2824 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2825
2826 /* Address of the NVRAM to read from. */
2827 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2828
2829 /* Issue a read command. */
2830 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2831
2832 /* Wait for completion. */
2833 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2834 u32 val;
2835
2836 udelay(5);
2837
2838 val = REG_RD(bp, BNX2_NVM_COMMAND);
2839 if (val & BNX2_NVM_COMMAND_DONE) {
2840 val = REG_RD(bp, BNX2_NVM_READ);
2841
2842 val = be32_to_cpu(val);
2843 memcpy(ret_val, &val, 4);
2844 break;
2845 }
2846 }
2847 if (j >= NVRAM_TIMEOUT_COUNT)
2848 return -EBUSY;
2849
2850 return 0;
2851 }
2852
2853
2854 static int
2855 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2856 {
2857 u32 cmd, val32;
2858 int j;
2859
2860 /* Build the command word. */
2861 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2862
2863 /* Calculate an offset of a buffered flash. */
2864 if (bp->flash_info->buffered) {
2865 offset = ((offset / bp->flash_info->page_size) <<
2866 bp->flash_info->page_bits) +
2867 (offset % bp->flash_info->page_size);
2868 }
2869
2870 /* Need to clear DONE bit separately. */
2871 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2872
2873 memcpy(&val32, val, 4);
2874 val32 = cpu_to_be32(val32);
2875
2876 /* Write the data. */
2877 REG_WR(bp, BNX2_NVM_WRITE, val32);
2878
2879 /* Address of the NVRAM to write to. */
2880 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2881
2882 /* Issue the write command. */
2883 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2884
2885 /* Wait for completion. */
2886 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2887 udelay(5);
2888
2889 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2890 break;
2891 }
2892 if (j >= NVRAM_TIMEOUT_COUNT)
2893 return -EBUSY;
2894
2895 return 0;
2896 }
2897
2898 static int
2899 bnx2_init_nvram(struct bnx2 *bp)
2900 {
2901 u32 val;
2902 int j, entry_count, rc;
2903 struct flash_spec *flash;
2904
2905 /* Determine the selected interface. */
2906 val = REG_RD(bp, BNX2_NVM_CFG1);
2907
2908 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2909
2910 rc = 0;
2911 if (val & 0x40000000) {
2912
2913 /* Flash interface has been reconfigured */
2914 for (j = 0, flash = &flash_table[0]; j < entry_count;
2915 j++, flash++) {
2916 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2917 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2918 bp->flash_info = flash;
2919 break;
2920 }
2921 }
2922 }
2923 else {
2924 u32 mask;
2925 /* Not yet been reconfigured */
2926
2927 if (val & (1 << 23))
2928 mask = FLASH_BACKUP_STRAP_MASK;
2929 else
2930 mask = FLASH_STRAP_MASK;
2931
2932 for (j = 0, flash = &flash_table[0]; j < entry_count;
2933 j++, flash++) {
2934
2935 if ((val & mask) == (flash->strapping & mask)) {
2936 bp->flash_info = flash;
2937
2938 /* Request access to the flash interface. */
2939 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2940 return rc;
2941
2942 /* Enable access to flash interface */
2943 bnx2_enable_nvram_access(bp);
2944
2945 /* Reconfigure the flash interface */
2946 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2947 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2948 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2949 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2950
2951 /* Disable access to flash interface */
2952 bnx2_disable_nvram_access(bp);
2953 bnx2_release_nvram_lock(bp);
2954
2955 break;
2956 }
2957 }
2958 } /* if (val & 0x40000000) */
2959
2960 if (j == entry_count) {
2961 bp->flash_info = NULL;
2962 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2963 return -ENODEV;
2964 }
2965
2966 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2967 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2968 if (val)
2969 bp->flash_size = val;
2970 else
2971 bp->flash_size = bp->flash_info->total_size;
2972
2973 return rc;
2974 }
2975
2976 static int
2977 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2978 int buf_size)
2979 {
2980 int rc = 0;
2981 u32 cmd_flags, offset32, len32, extra;
2982
2983 if (buf_size == 0)
2984 return 0;
2985
2986 /* Request access to the flash interface. */
2987 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2988 return rc;
2989
2990 /* Enable access to flash interface */
2991 bnx2_enable_nvram_access(bp);
2992
2993 len32 = buf_size;
2994 offset32 = offset;
2995 extra = 0;
2996
2997 cmd_flags = 0;
2998
2999 if (offset32 & 3) {
3000 u8 buf[4];
3001 u32 pre_len;
3002
3003 offset32 &= ~3;
3004 pre_len = 4 - (offset & 3);
3005
3006 if (pre_len >= len32) {
3007 pre_len = len32;
3008 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3009 BNX2_NVM_COMMAND_LAST;
3010 }
3011 else {
3012 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3013 }
3014
3015 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3016
3017 if (rc)
3018 return rc;
3019
3020 memcpy(ret_buf, buf + (offset & 3), pre_len);
3021
3022 offset32 += 4;
3023 ret_buf += pre_len;
3024 len32 -= pre_len;
3025 }
3026 if (len32 & 3) {
3027 extra = 4 - (len32 & 3);
3028 len32 = (len32 + 4) & ~3;
3029 }
3030
3031 if (len32 == 4) {
3032 u8 buf[4];
3033
3034 if (cmd_flags)
3035 cmd_flags = BNX2_NVM_COMMAND_LAST;
3036 else
3037 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3038 BNX2_NVM_COMMAND_LAST;
3039
3040 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3041
3042 memcpy(ret_buf, buf, 4 - extra);
3043 }
3044 else if (len32 > 0) {
3045 u8 buf[4];
3046
3047 /* Read the first word. */
3048 if (cmd_flags)
3049 cmd_flags = 0;
3050 else
3051 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3052
3053 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3054
3055 /* Advance to the next dword. */
3056 offset32 += 4;
3057 ret_buf += 4;
3058 len32 -= 4;
3059
3060 while (len32 > 4 && rc == 0) {
3061 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3062
3063 /* Advance to the next dword. */
3064 offset32 += 4;
3065 ret_buf += 4;
3066 len32 -= 4;
3067 }
3068
3069 if (rc)
3070 return rc;
3071
3072 cmd_flags = BNX2_NVM_COMMAND_LAST;
3073 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3074
3075 memcpy(ret_buf, buf, 4 - extra);
3076 }
3077
3078 /* Disable access to flash interface */
3079 bnx2_disable_nvram_access(bp);
3080
3081 bnx2_release_nvram_lock(bp);
3082
3083 return rc;
3084 }
3085
3086 static int
3087 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3088 int buf_size)
3089 {
3090 u32 written, offset32, len32;
3091 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3092 int rc = 0;
3093 int align_start, align_end;
3094
3095 buf = data_buf;
3096 offset32 = offset;
3097 len32 = buf_size;
3098 align_start = align_end = 0;
3099
3100 if ((align_start = (offset32 & 3))) {
3101 offset32 &= ~3;
3102 len32 += align_start;
3103 if (len32 < 4)
3104 len32 = 4;
3105 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3106 return rc;
3107 }
3108
3109 if (len32 & 3) {
3110 align_end = 4 - (len32 & 3);
3111 len32 += align_end;
3112 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3113 return rc;
3114 }
3115
3116 if (align_start || align_end) {
3117 align_buf = kmalloc(len32, GFP_KERNEL);
3118 if (align_buf == NULL)
3119 return -ENOMEM;
3120 if (align_start) {
3121 memcpy(align_buf, start, 4);
3122 }
3123 if (align_end) {
3124 memcpy(align_buf + len32 - 4, end, 4);
3125 }
3126 memcpy(align_buf + align_start, data_buf, buf_size);
3127 buf = align_buf;
3128 }
3129
3130 if (bp->flash_info->buffered == 0) {
3131 flash_buffer = kmalloc(264, GFP_KERNEL);
3132 if (flash_buffer == NULL) {
3133 rc = -ENOMEM;
3134 goto nvram_write_end;
3135 }
3136 }
3137
3138 written = 0;
3139 while ((written < len32) && (rc == 0)) {
3140 u32 page_start, page_end, data_start, data_end;
3141 u32 addr, cmd_flags;
3142 int i;
3143
3144 /* Find the page_start addr */
3145 page_start = offset32 + written;
3146 page_start -= (page_start % bp->flash_info->page_size);
3147 /* Find the page_end addr */
3148 page_end = page_start + bp->flash_info->page_size;
3149 /* Find the data_start addr */
3150 data_start = (written == 0) ? offset32 : page_start;
3151 /* Find the data_end addr */
3152 data_end = (page_end > offset32 + len32) ?
3153 (offset32 + len32) : page_end;
3154
3155 /* Request access to the flash interface. */
3156 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3157 goto nvram_write_end;
3158
3159 /* Enable access to flash interface */
3160 bnx2_enable_nvram_access(bp);
3161
3162 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3163 if (bp->flash_info->buffered == 0) {
3164 int j;
3165
3166 /* Read the whole page into the buffer
3167 * (non-buffer flash only) */
3168 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3169 if (j == (bp->flash_info->page_size - 4)) {
3170 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3171 }
3172 rc = bnx2_nvram_read_dword(bp,
3173 page_start + j,
3174 &flash_buffer[j],
3175 cmd_flags);
3176
3177 if (rc)
3178 goto nvram_write_end;
3179
3180 cmd_flags = 0;
3181 }
3182 }
3183
3184 /* Enable writes to flash interface (unlock write-protect) */
3185 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3186 goto nvram_write_end;
3187
3188 /* Loop to write back the buffer data from page_start to
3189 * data_start */
3190 i = 0;
3191 if (bp->flash_info->buffered == 0) {
3192 /* Erase the page */
3193 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3194 goto nvram_write_end;
3195
3196 /* Re-enable the write again for the actual write */
3197 bnx2_enable_nvram_write(bp);
3198
3199 for (addr = page_start; addr < data_start;
3200 addr += 4, i += 4) {
3201
3202 rc = bnx2_nvram_write_dword(bp, addr,
3203 &flash_buffer[i], cmd_flags);
3204
3205 if (rc != 0)
3206 goto nvram_write_end;
3207
3208 cmd_flags = 0;
3209 }
3210 }
3211
3212 /* Loop to write the new data from data_start to data_end */
3213 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3214 if ((addr == page_end - 4) ||
3215 ((bp->flash_info->buffered) &&
3216 (addr == data_end - 4))) {
3217
3218 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3219 }
3220 rc = bnx2_nvram_write_dword(bp, addr, buf,
3221 cmd_flags);
3222
3223 if (rc != 0)
3224 goto nvram_write_end;
3225
3226 cmd_flags = 0;
3227 buf += 4;
3228 }
3229
3230 /* Loop to write back the buffer data from data_end
3231 * to page_end */
3232 if (bp->flash_info->buffered == 0) {
3233 for (addr = data_end; addr < page_end;
3234 addr += 4, i += 4) {
3235
3236 if (addr == page_end-4) {
3237 cmd_flags = BNX2_NVM_COMMAND_LAST;
3238 }
3239 rc = bnx2_nvram_write_dword(bp, addr,
3240 &flash_buffer[i], cmd_flags);
3241
3242 if (rc != 0)
3243 goto nvram_write_end;
3244
3245 cmd_flags = 0;
3246 }
3247 }
3248
3249 /* Disable writes to flash interface (lock write-protect) */
3250 bnx2_disable_nvram_write(bp);
3251
3252 /* Disable access to flash interface */
3253 bnx2_disable_nvram_access(bp);
3254 bnx2_release_nvram_lock(bp);
3255
3256 /* Increment written */
3257 written += data_end - data_start;
3258 }
3259
3260 nvram_write_end:
3261 kfree(flash_buffer);
3262 kfree(align_buf);
3263 return rc;
3264 }
3265
3266 static int
3267 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3268 {
3269 u32 val;
3270 int i, rc = 0;
3271
3272 /* Wait for the current PCI transaction to complete before
3273 * issuing a reset. */
3274 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3275 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3276 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3277 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3278 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3279 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3280 udelay(5);
3281
3282 /* Wait for the firmware to tell us it is ok to issue a reset. */
3283 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3284
3285 /* Deposit a driver reset signature so the firmware knows that
3286 * this is a soft reset. */
3287 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3288 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3289
3290 /* Do a dummy read to force the chip to complete all current transaction
3291 * before we issue a reset. */
3292 val = REG_RD(bp, BNX2_MISC_ID);
3293
3294 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3295 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3296 REG_RD(bp, BNX2_MISC_COMMAND);
3297 udelay(5);
3298
3299 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3300 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3301
3302 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3303
3304 } else {
3305 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3306 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3307 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3308
3309 /* Chip reset. */
3310 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3311
3312 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3313 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3314 current->state = TASK_UNINTERRUPTIBLE;
3315 schedule_timeout(HZ / 50);
3316 }
3317
3318 /* Reset takes approximate 30 usec */
3319 for (i = 0; i < 10; i++) {
3320 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3321 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3322 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3323 break;
3324 udelay(10);
3325 }
3326
3327 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3328 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3329 printk(KERN_ERR PFX "Chip reset did not complete\n");
3330 return -EBUSY;
3331 }
3332 }
3333
3334 /* Make sure byte swapping is properly configured. */
3335 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3336 if (val != 0x01020304) {
3337 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3338 return -ENODEV;
3339 }
3340
3341 /* Wait for the firmware to finish its initialization. */
3342 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3343 if (rc)
3344 return rc;
3345
3346 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3347 /* Adjust the voltage regular to two steps lower. The default
3348 * of this register is 0x0000000e. */
3349 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3350
3351 /* Remove bad rbuf memory from the free pool. */
3352 rc = bnx2_alloc_bad_rbuf(bp);
3353 }
3354
3355 return rc;
3356 }
3357
3358 static int
3359 bnx2_init_chip(struct bnx2 *bp)
3360 {
3361 u32 val;
3362 int rc;
3363
3364 /* Make sure the interrupt is not active. */
3365 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3366
3367 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3368 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3369 #ifdef __BIG_ENDIAN
3370 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3371 #endif
3372 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3373 DMA_READ_CHANS << 12 |
3374 DMA_WRITE_CHANS << 16;
3375
3376 val |= (0x2 << 20) | (1 << 11);
3377
3378 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3379 val |= (1 << 23);
3380
3381 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3382 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3383 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3384
3385 REG_WR(bp, BNX2_DMA_CONFIG, val);
3386
3387 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3388 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3389 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3390 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3391 }
3392
3393 if (bp->flags & PCIX_FLAG) {
3394 u16 val16;
3395
3396 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3397 &val16);
3398 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3399 val16 & ~PCI_X_CMD_ERO);
3400 }
3401
3402 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3403 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3404 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3405 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3406
3407 /* Initialize context mapping and zero out the quick contexts. The
3408 * context block must have already been enabled. */
3409 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3410 bnx2_init_5709_context(bp);
3411 else
3412 bnx2_init_context(bp);
3413
3414 if ((rc = bnx2_init_cpus(bp)) != 0)
3415 return rc;
3416
3417 bnx2_init_nvram(bp);
3418
3419 bnx2_set_mac_addr(bp);
3420
3421 val = REG_RD(bp, BNX2_MQ_CONFIG);
3422 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3423 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3424 REG_WR(bp, BNX2_MQ_CONFIG, val);
3425
3426 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3427 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3428 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3429
3430 val = (BCM_PAGE_BITS - 8) << 24;
3431 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3432
3433 /* Configure page size. */
3434 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3435 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3436 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3437 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3438
3439 val = bp->mac_addr[0] +
3440 (bp->mac_addr[1] << 8) +
3441 (bp->mac_addr[2] << 16) +
3442 bp->mac_addr[3] +
3443 (bp->mac_addr[4] << 8) +
3444 (bp->mac_addr[5] << 16);
3445 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3446
3447 /* Program the MTU. Also include 4 bytes for CRC32. */
3448 val = bp->dev->mtu + ETH_HLEN + 4;
3449 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3450 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3451 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3452
3453 bp->last_status_idx = 0;
3454 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3455
3456 /* Set up how to generate a link change interrupt. */
3457 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3458
3459 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3460 (u64) bp->status_blk_mapping & 0xffffffff);
3461 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3462
3463 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3464 (u64) bp->stats_blk_mapping & 0xffffffff);
3465 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3466 (u64) bp->stats_blk_mapping >> 32);
3467
3468 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3469 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3470
3471 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3472 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3473
3474 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3475 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3476
3477 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3478
3479 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3480
3481 REG_WR(bp, BNX2_HC_COM_TICKS,
3482 (bp->com_ticks_int << 16) | bp->com_ticks);
3483
3484 REG_WR(bp, BNX2_HC_CMD_TICKS,
3485 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3486
3487 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3488 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3489
3490 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3491 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3492 else {
3493 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3494 BNX2_HC_CONFIG_TX_TMR_MODE |
3495 BNX2_HC_CONFIG_COLLECT_STATS);
3496 }
3497
3498 /* Clear internal stats counters. */
3499 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3500
3501 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3502
3503 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3504 BNX2_PORT_FEATURE_ASF_ENABLED)
3505 bp->flags |= ASF_ENABLE_FLAG;
3506
3507 /* Initialize the receive filter. */
3508 bnx2_set_rx_mode(bp->dev);
3509
3510 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3511 0);
3512
3513 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3514 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3515
3516 udelay(20);
3517
3518 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3519
3520 return rc;
3521 }
3522
3523 static void
3524 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3525 {
3526 u32 val, offset0, offset1, offset2, offset3;
3527
3528 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3529 offset0 = BNX2_L2CTX_TYPE_XI;
3530 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3531 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3532 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3533 } else {
3534 offset0 = BNX2_L2CTX_TYPE;
3535 offset1 = BNX2_L2CTX_CMD_TYPE;
3536 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3537 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3538 }
3539 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3540 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3541
3542 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3543 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3544
3545 val = (u64) bp->tx_desc_mapping >> 32;
3546 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3547
3548 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3549 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3550 }
3551
3552 static void
3553 bnx2_init_tx_ring(struct bnx2 *bp)
3554 {
3555 struct tx_bd *txbd;
3556 u32 cid;
3557
3558 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3559
3560 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3561
3562 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3563 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3564
3565 bp->tx_prod = 0;
3566 bp->tx_cons = 0;
3567 bp->hw_tx_cons = 0;
3568 bp->tx_prod_bseq = 0;
3569
3570 cid = TX_CID;
3571 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3572 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3573
3574 bnx2_init_tx_context(bp, cid);
3575 }
3576
3577 static void
3578 bnx2_init_rx_ring(struct bnx2 *bp)
3579 {
3580 struct rx_bd *rxbd;
3581 int i;
3582 u16 prod, ring_prod;
3583 u32 val;
3584
3585 /* 8 for CRC and VLAN */
3586 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3587 /* hw alignment */
3588 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3589
3590 ring_prod = prod = bp->rx_prod = 0;
3591 bp->rx_cons = 0;
3592 bp->hw_rx_cons = 0;
3593 bp->rx_prod_bseq = 0;
3594
3595 for (i = 0; i < bp->rx_max_ring; i++) {
3596 int j;
3597
3598 rxbd = &bp->rx_desc_ring[i][0];
3599 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3600 rxbd->rx_bd_len = bp->rx_buf_use_size;
3601 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3602 }
3603 if (i == (bp->rx_max_ring - 1))
3604 j = 0;
3605 else
3606 j = i + 1;
3607 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3608 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3609 0xffffffff;
3610 }
3611
3612 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3613 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3614 val |= 0x02 << 8;
3615 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3616
3617 val = (u64) bp->rx_desc_mapping[0] >> 32;
3618 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3619
3620 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3621 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3622
3623 for (i = 0; i < bp->rx_ring_size; i++) {
3624 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3625 break;
3626 }
3627 prod = NEXT_RX_BD(prod);
3628 ring_prod = RX_RING_IDX(prod);
3629 }
3630 bp->rx_prod = prod;
3631
3632 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3633
3634 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3635 }
3636
3637 static void
3638 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3639 {
3640 u32 num_rings, max;
3641
3642 bp->rx_ring_size = size;
3643 num_rings = 1;
3644 while (size > MAX_RX_DESC_CNT) {
3645 size -= MAX_RX_DESC_CNT;
3646 num_rings++;
3647 }
3648 /* round to next power of 2 */
3649 max = MAX_RX_RINGS;
3650 while ((max & num_rings) == 0)
3651 max >>= 1;
3652
3653 if (num_rings != max)
3654 max <<= 1;
3655
3656 bp->rx_max_ring = max;
3657 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3658 }
3659
3660 static void
3661 bnx2_free_tx_skbs(struct bnx2 *bp)
3662 {
3663 int i;
3664
3665 if (bp->tx_buf_ring == NULL)
3666 return;
3667
3668 for (i = 0; i < TX_DESC_CNT; ) {
3669 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3670 struct sk_buff *skb = tx_buf->skb;
3671 int j, last;
3672
3673 if (skb == NULL) {
3674 i++;
3675 continue;
3676 }
3677
3678 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3679 skb_headlen(skb), PCI_DMA_TODEVICE);
3680
3681 tx_buf->skb = NULL;
3682
3683 last = skb_shinfo(skb)->nr_frags;
3684 for (j = 0; j < last; j++) {
3685 tx_buf = &bp->tx_buf_ring[i + j + 1];
3686 pci_unmap_page(bp->pdev,
3687 pci_unmap_addr(tx_buf, mapping),
3688 skb_shinfo(skb)->frags[j].size,
3689 PCI_DMA_TODEVICE);
3690 }
3691 dev_kfree_skb(skb);
3692 i += j + 1;
3693 }
3694
3695 }
3696
3697 static void
3698 bnx2_free_rx_skbs(struct bnx2 *bp)
3699 {
3700 int i;
3701
3702 if (bp->rx_buf_ring == NULL)
3703 return;
3704
3705 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3706 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3707 struct sk_buff *skb = rx_buf->skb;
3708
3709 if (skb == NULL)
3710 continue;
3711
3712 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3713 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3714
3715 rx_buf->skb = NULL;
3716
3717 dev_kfree_skb(skb);
3718 }
3719 }
3720
3721 static void
3722 bnx2_free_skbs(struct bnx2 *bp)
3723 {
3724 bnx2_free_tx_skbs(bp);
3725 bnx2_free_rx_skbs(bp);
3726 }
3727
3728 static int
3729 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3730 {
3731 int rc;
3732
3733 rc = bnx2_reset_chip(bp, reset_code);
3734 bnx2_free_skbs(bp);
3735 if (rc)
3736 return rc;
3737
3738 if ((rc = bnx2_init_chip(bp)) != 0)
3739 return rc;
3740
3741 bnx2_init_tx_ring(bp);
3742 bnx2_init_rx_ring(bp);
3743 return 0;
3744 }
3745
3746 static int
3747 bnx2_init_nic(struct bnx2 *bp)
3748 {
3749 int rc;
3750
3751 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3752 return rc;
3753
3754 spin_lock_bh(&bp->phy_lock);
3755 bnx2_init_phy(bp);
3756 spin_unlock_bh(&bp->phy_lock);
3757 bnx2_set_link(bp);
3758 return 0;
3759 }
3760
3761 static int
3762 bnx2_test_registers(struct bnx2 *bp)
3763 {
3764 int ret;
3765 int i;
3766 static const struct {
3767 u16 offset;
3768 u16 flags;
3769 u32 rw_mask;
3770 u32 ro_mask;
3771 } reg_tbl[] = {
3772 { 0x006c, 0, 0x00000000, 0x0000003f },
3773 { 0x0090, 0, 0xffffffff, 0x00000000 },
3774 { 0x0094, 0, 0x00000000, 0x00000000 },
3775
3776 { 0x0404, 0, 0x00003f00, 0x00000000 },
3777 { 0x0418, 0, 0x00000000, 0xffffffff },
3778 { 0x041c, 0, 0x00000000, 0xffffffff },
3779 { 0x0420, 0, 0x00000000, 0x80ffffff },
3780 { 0x0424, 0, 0x00000000, 0x00000000 },
3781 { 0x0428, 0, 0x00000000, 0x00000001 },
3782 { 0x0450, 0, 0x00000000, 0x0000ffff },
3783 { 0x0454, 0, 0x00000000, 0xffffffff },
3784 { 0x0458, 0, 0x00000000, 0xffffffff },
3785
3786 { 0x0808, 0, 0x00000000, 0xffffffff },
3787 { 0x0854, 0, 0x00000000, 0xffffffff },
3788 { 0x0868, 0, 0x00000000, 0x77777777 },
3789 { 0x086c, 0, 0x00000000, 0x77777777 },
3790 { 0x0870, 0, 0x00000000, 0x77777777 },
3791 { 0x0874, 0, 0x00000000, 0x77777777 },
3792
3793 { 0x0c00, 0, 0x00000000, 0x00000001 },
3794 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3795 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3796
3797 { 0x1000, 0, 0x00000000, 0x00000001 },
3798 { 0x1004, 0, 0x00000000, 0x000f0001 },
3799
3800 { 0x1408, 0, 0x01c00800, 0x00000000 },
3801 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3802 { 0x14a8, 0, 0x00000000, 0x000001ff },
3803 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3804 { 0x14b0, 0, 0x00000002, 0x00000001 },
3805 { 0x14b8, 0, 0x00000000, 0x00000000 },
3806 { 0x14c0, 0, 0x00000000, 0x00000009 },
3807 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3808 { 0x14cc, 0, 0x00000000, 0x00000001 },
3809 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3810
3811 { 0x1800, 0, 0x00000000, 0x00000001 },
3812 { 0x1804, 0, 0x00000000, 0x00000003 },
3813
3814 { 0x2800, 0, 0x00000000, 0x00000001 },
3815 { 0x2804, 0, 0x00000000, 0x00003f01 },
3816 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3817 { 0x2810, 0, 0xffff0000, 0x00000000 },
3818 { 0x2814, 0, 0xffff0000, 0x00000000 },
3819 { 0x2818, 0, 0xffff0000, 0x00000000 },
3820 { 0x281c, 0, 0xffff0000, 0x00000000 },
3821 { 0x2834, 0, 0xffffffff, 0x00000000 },
3822 { 0x2840, 0, 0x00000000, 0xffffffff },
3823 { 0x2844, 0, 0x00000000, 0xffffffff },
3824 { 0x2848, 0, 0xffffffff, 0x00000000 },
3825 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3826
3827 { 0x2c00, 0, 0x00000000, 0x00000011 },
3828 { 0x2c04, 0, 0x00000000, 0x00030007 },
3829
3830 { 0x3c00, 0, 0x00000000, 0x00000001 },
3831 { 0x3c04, 0, 0x00000000, 0x00070000 },
3832 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3833 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3834 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3835 { 0x3c14, 0, 0x00000000, 0xffffffff },
3836 { 0x3c18, 0, 0x00000000, 0xffffffff },
3837 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3838 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3839
3840 { 0x5004, 0, 0x00000000, 0x0000007f },
3841 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3842 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3843
3844 { 0x5c00, 0, 0x00000000, 0x00000001 },
3845 { 0x5c04, 0, 0x00000000, 0x0003000f },
3846 { 0x5c08, 0, 0x00000003, 0x00000000 },
3847 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3848 { 0x5c10, 0, 0x00000000, 0xffffffff },
3849 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3850 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3851 { 0x5c88, 0, 0x00000000, 0x00077373 },
3852 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3853
3854 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3855 { 0x680c, 0, 0xffffffff, 0x00000000 },
3856 { 0x6810, 0, 0xffffffff, 0x00000000 },
3857 { 0x6814, 0, 0xffffffff, 0x00000000 },
3858 { 0x6818, 0, 0xffffffff, 0x00000000 },
3859 { 0x681c, 0, 0xffffffff, 0x00000000 },
3860 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3861 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3862 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3863 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3864 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3865 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3866 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3867 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3868 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3869 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3870 { 0x684c, 0, 0xffffffff, 0x00000000 },
3871 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3872 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3873 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3874 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3875 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3876 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3877
3878 { 0xffff, 0, 0x00000000, 0x00000000 },
3879 };
3880
3881 ret = 0;
3882 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3883 u32 offset, rw_mask, ro_mask, save_val, val;
3884
3885 offset = (u32) reg_tbl[i].offset;
3886 rw_mask = reg_tbl[i].rw_mask;
3887 ro_mask = reg_tbl[i].ro_mask;
3888
3889 save_val = readl(bp->regview + offset);
3890
3891 writel(0, bp->regview + offset);
3892
3893 val = readl(bp->regview + offset);
3894 if ((val & rw_mask) != 0) {
3895 goto reg_test_err;
3896 }
3897
3898 if ((val & ro_mask) != (save_val & ro_mask)) {
3899 goto reg_test_err;
3900 }
3901
3902 writel(0xffffffff, bp->regview + offset);
3903
3904 val = readl(bp->regview + offset);
3905 if ((val & rw_mask) != rw_mask) {
3906 goto reg_test_err;
3907 }
3908
3909 if ((val & ro_mask) != (save_val & ro_mask)) {
3910 goto reg_test_err;
3911 }
3912
3913 writel(save_val, bp->regview + offset);
3914 continue;
3915
3916 reg_test_err:
3917 writel(save_val, bp->regview + offset);
3918 ret = -ENODEV;
3919 break;
3920 }
3921 return ret;
3922 }
3923
3924 static int
3925 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3926 {
3927 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3928 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3929 int i;
3930
3931 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3932 u32 offset;
3933
3934 for (offset = 0; offset < size; offset += 4) {
3935
3936 REG_WR_IND(bp, start + offset, test_pattern[i]);
3937
3938 if (REG_RD_IND(bp, start + offset) !=
3939 test_pattern[i]) {
3940 return -ENODEV;
3941 }
3942 }
3943 }
3944 return 0;
3945 }
3946
3947 static int
3948 bnx2_test_memory(struct bnx2 *bp)
3949 {
3950 int ret = 0;
3951 int i;
3952 static const struct {
3953 u32 offset;
3954 u32 len;
3955 } mem_tbl[] = {
3956 { 0x60000, 0x4000 },
3957 { 0xa0000, 0x3000 },
3958 { 0xe0000, 0x4000 },
3959 { 0x120000, 0x4000 },
3960 { 0x1a0000, 0x4000 },
3961 { 0x160000, 0x4000 },
3962 { 0xffffffff, 0 },
3963 };
3964
3965 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3966 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3967 mem_tbl[i].len)) != 0) {
3968 return ret;
3969 }
3970 }
3971
3972 return ret;
3973 }
3974
3975 #define BNX2_MAC_LOOPBACK 0
3976 #define BNX2_PHY_LOOPBACK 1
3977
3978 static int
3979 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3980 {
3981 unsigned int pkt_size, num_pkts, i;
3982 struct sk_buff *skb, *rx_skb;
3983 unsigned char *packet;
3984 u16 rx_start_idx, rx_idx;
3985 dma_addr_t map;
3986 struct tx_bd *txbd;
3987 struct sw_bd *rx_buf;
3988 struct l2_fhdr *rx_hdr;
3989 int ret = -ENODEV;
3990
3991 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3992 bp->loopback = MAC_LOOPBACK;
3993 bnx2_set_mac_loopback(bp);
3994 }
3995 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3996 bp->loopback = PHY_LOOPBACK;
3997 bnx2_set_phy_loopback(bp);
3998 }
3999 else
4000 return -EINVAL;
4001
4002 pkt_size = 1514;
4003 skb = netdev_alloc_skb(bp->dev, pkt_size);
4004 if (!skb)
4005 return -ENOMEM;
4006 packet = skb_put(skb, pkt_size);
4007 memcpy(packet, bp->dev->dev_addr, 6);
4008 memset(packet + 6, 0x0, 8);
4009 for (i = 14; i < pkt_size; i++)
4010 packet[i] = (unsigned char) (i & 0xff);
4011
4012 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4013 PCI_DMA_TODEVICE);
4014
4015 REG_WR(bp, BNX2_HC_COMMAND,
4016 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4017
4018 REG_RD(bp, BNX2_HC_COMMAND);
4019
4020 udelay(5);
4021 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4022
4023 num_pkts = 0;
4024
4025 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4026
4027 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4028 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4029 txbd->tx_bd_mss_nbytes = pkt_size;
4030 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4031
4032 num_pkts++;
4033 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4034 bp->tx_prod_bseq += pkt_size;
4035
4036 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4037 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4038
4039 udelay(100);
4040
4041 REG_WR(bp, BNX2_HC_COMMAND,
4042 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4043
4044 REG_RD(bp, BNX2_HC_COMMAND);
4045
4046 udelay(5);
4047
4048 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4049 dev_kfree_skb(skb);
4050
4051 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4052 goto loopback_test_done;
4053 }
4054
4055 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4056 if (rx_idx != rx_start_idx + num_pkts) {
4057 goto loopback_test_done;
4058 }
4059
4060 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4061 rx_skb = rx_buf->skb;
4062
4063 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4064 skb_reserve(rx_skb, bp->rx_offset);
4065
4066 pci_dma_sync_single_for_cpu(bp->pdev,
4067 pci_unmap_addr(rx_buf, mapping),
4068 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4069
4070 if (rx_hdr->l2_fhdr_status &
4071 (L2_FHDR_ERRORS_BAD_CRC |
4072 L2_FHDR_ERRORS_PHY_DECODE |
4073 L2_FHDR_ERRORS_ALIGNMENT |
4074 L2_FHDR_ERRORS_TOO_SHORT |
4075 L2_FHDR_ERRORS_GIANT_FRAME)) {
4076
4077 goto loopback_test_done;
4078 }
4079
4080 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4081 goto loopback_test_done;
4082 }
4083
4084 for (i = 14; i < pkt_size; i++) {
4085 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4086 goto loopback_test_done;
4087 }
4088 }
4089
4090 ret = 0;
4091
4092 loopback_test_done:
4093 bp->loopback = 0;
4094 return ret;
4095 }
4096
4097 #define BNX2_MAC_LOOPBACK_FAILED 1
4098 #define BNX2_PHY_LOOPBACK_FAILED 2
4099 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4100 BNX2_PHY_LOOPBACK_FAILED)
4101
4102 static int
4103 bnx2_test_loopback(struct bnx2 *bp)
4104 {
4105 int rc = 0;
4106
4107 if (!netif_running(bp->dev))
4108 return BNX2_LOOPBACK_FAILED;
4109
4110 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4111 spin_lock_bh(&bp->phy_lock);
4112 bnx2_init_phy(bp);
4113 spin_unlock_bh(&bp->phy_lock);
4114 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4115 rc |= BNX2_MAC_LOOPBACK_FAILED;
4116 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4117 rc |= BNX2_PHY_LOOPBACK_FAILED;
4118 return rc;
4119 }
4120
4121 #define NVRAM_SIZE 0x200
4122 #define CRC32_RESIDUAL 0xdebb20e3
4123
4124 static int
4125 bnx2_test_nvram(struct bnx2 *bp)
4126 {
4127 u32 buf[NVRAM_SIZE / 4];
4128 u8 *data = (u8 *) buf;
4129 int rc = 0;
4130 u32 magic, csum;
4131
4132 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4133 goto test_nvram_done;
4134
4135 magic = be32_to_cpu(buf[0]);
4136 if (magic != 0x669955aa) {
4137 rc = -ENODEV;
4138 goto test_nvram_done;
4139 }
4140
4141 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4142 goto test_nvram_done;
4143
4144 csum = ether_crc_le(0x100, data);
4145 if (csum != CRC32_RESIDUAL) {
4146 rc = -ENODEV;
4147 goto test_nvram_done;
4148 }
4149
4150 csum = ether_crc_le(0x100, data + 0x100);
4151 if (csum != CRC32_RESIDUAL) {
4152 rc = -ENODEV;
4153 }
4154
4155 test_nvram_done:
4156 return rc;
4157 }
4158
4159 static int
4160 bnx2_test_link(struct bnx2 *bp)
4161 {
4162 u32 bmsr;
4163
4164 spin_lock_bh(&bp->phy_lock);
4165 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4166 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4167 spin_unlock_bh(&bp->phy_lock);
4168
4169 if (bmsr & BMSR_LSTATUS) {
4170 return 0;
4171 }
4172 return -ENODEV;
4173 }
4174
4175 static int
4176 bnx2_test_intr(struct bnx2 *bp)
4177 {
4178 int i;
4179 u16 status_idx;
4180
4181 if (!netif_running(bp->dev))
4182 return -ENODEV;
4183
4184 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4185
4186 /* This register is not touched during run-time. */
4187 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4188 REG_RD(bp, BNX2_HC_COMMAND);
4189
4190 for (i = 0; i < 10; i++) {
4191 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4192 status_idx) {
4193
4194 break;
4195 }
4196
4197 msleep_interruptible(10);
4198 }
4199 if (i < 10)
4200 return 0;
4201
4202 return -ENODEV;
4203 }
4204
4205 static void
4206 bnx2_5706_serdes_timer(struct bnx2 *bp)
4207 {
4208 spin_lock(&bp->phy_lock);
4209 if (bp->serdes_an_pending)
4210 bp->serdes_an_pending--;
4211 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4212 u32 bmcr;
4213
4214 bp->current_interval = bp->timer_interval;
4215
4216 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4217
4218 if (bmcr & BMCR_ANENABLE) {
4219 u32 phy1, phy2;
4220
4221 bnx2_write_phy(bp, 0x1c, 0x7c00);
4222 bnx2_read_phy(bp, 0x1c, &phy1);
4223
4224 bnx2_write_phy(bp, 0x17, 0x0f01);
4225 bnx2_read_phy(bp, 0x15, &phy2);
4226 bnx2_write_phy(bp, 0x17, 0x0f01);
4227 bnx2_read_phy(bp, 0x15, &phy2);
4228
4229 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4230 !(phy2 & 0x20)) { /* no CONFIG */
4231
4232 bmcr &= ~BMCR_ANENABLE;
4233 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4234 bnx2_write_phy(bp, MII_BMCR, bmcr);
4235 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4236 }
4237 }
4238 }
4239 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4240 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4241 u32 phy2;
4242
4243 bnx2_write_phy(bp, 0x17, 0x0f01);
4244 bnx2_read_phy(bp, 0x15, &phy2);
4245 if (phy2 & 0x20) {
4246 u32 bmcr;
4247
4248 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4249 bmcr |= BMCR_ANENABLE;
4250 bnx2_write_phy(bp, MII_BMCR, bmcr);
4251
4252 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4253 }
4254 } else
4255 bp->current_interval = bp->timer_interval;
4256
4257 spin_unlock(&bp->phy_lock);
4258 }
4259
4260 static void
4261 bnx2_5708_serdes_timer(struct bnx2 *bp)
4262 {
4263 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4264 bp->serdes_an_pending = 0;
4265 return;
4266 }
4267
4268 spin_lock(&bp->phy_lock);
4269 if (bp->serdes_an_pending)
4270 bp->serdes_an_pending--;
4271 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4272 u32 bmcr;
4273
4274 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4275
4276 if (bmcr & BMCR_ANENABLE) {
4277 bmcr &= ~BMCR_ANENABLE;
4278 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4279 bnx2_write_phy(bp, MII_BMCR, bmcr);
4280 bp->current_interval = SERDES_FORCED_TIMEOUT;
4281 } else {
4282 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4283 bmcr |= BMCR_ANENABLE;
4284 bnx2_write_phy(bp, MII_BMCR, bmcr);
4285 bp->serdes_an_pending = 2;
4286 bp->current_interval = bp->timer_interval;
4287 }
4288
4289 } else
4290 bp->current_interval = bp->timer_interval;
4291
4292 spin_unlock(&bp->phy_lock);
4293 }
4294
4295 static void
4296 bnx2_timer(unsigned long data)
4297 {
4298 struct bnx2 *bp = (struct bnx2 *) data;
4299 u32 msg;
4300
4301 if (!netif_running(bp->dev))
4302 return;
4303
4304 if (atomic_read(&bp->intr_sem) != 0)
4305 goto bnx2_restart_timer;
4306
4307 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4308 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4309
4310 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4311
4312 if (bp->phy_flags & PHY_SERDES_FLAG) {
4313 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4314 bnx2_5706_serdes_timer(bp);
4315 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4316 bnx2_5708_serdes_timer(bp);
4317 }
4318
4319 bnx2_restart_timer:
4320 mod_timer(&bp->timer, jiffies + bp->current_interval);
4321 }
4322
4323 /* Called with rtnl_lock */
4324 static int
4325 bnx2_open(struct net_device *dev)
4326 {
4327 struct bnx2 *bp = netdev_priv(dev);
4328 int rc;
4329
4330 bnx2_set_power_state(bp, PCI_D0);
4331 bnx2_disable_int(bp);
4332
4333 rc = bnx2_alloc_mem(bp);
4334 if (rc)
4335 return rc;
4336
4337 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4338 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4339 !disable_msi) {
4340
4341 if (pci_enable_msi(bp->pdev) == 0) {
4342 bp->flags |= USING_MSI_FLAG;
4343 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4344 dev);
4345 }
4346 else {
4347 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4348 IRQF_SHARED, dev->name, dev);
4349 }
4350 }
4351 else {
4352 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4353 dev->name, dev);
4354 }
4355 if (rc) {
4356 bnx2_free_mem(bp);
4357 return rc;
4358 }
4359
4360 rc = bnx2_init_nic(bp);
4361
4362 if (rc) {
4363 free_irq(bp->pdev->irq, dev);
4364 if (bp->flags & USING_MSI_FLAG) {
4365 pci_disable_msi(bp->pdev);
4366 bp->flags &= ~USING_MSI_FLAG;
4367 }
4368 bnx2_free_skbs(bp);
4369 bnx2_free_mem(bp);
4370 return rc;
4371 }
4372
4373 mod_timer(&bp->timer, jiffies + bp->current_interval);
4374
4375 atomic_set(&bp->intr_sem, 0);
4376
4377 bnx2_enable_int(bp);
4378
4379 if (bp->flags & USING_MSI_FLAG) {
4380 /* Test MSI to make sure it is working
4381 * If MSI test fails, go back to INTx mode
4382 */
4383 if (bnx2_test_intr(bp) != 0) {
4384 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4385 " using MSI, switching to INTx mode. Please"
4386 " report this failure to the PCI maintainer"
4387 " and include system chipset information.\n",
4388 bp->dev->name);
4389
4390 bnx2_disable_int(bp);
4391 free_irq(bp->pdev->irq, dev);
4392 pci_disable_msi(bp->pdev);
4393 bp->flags &= ~USING_MSI_FLAG;
4394
4395 rc = bnx2_init_nic(bp);
4396
4397 if (!rc) {
4398 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4399 IRQF_SHARED, dev->name, dev);
4400 }
4401 if (rc) {
4402 bnx2_free_skbs(bp);
4403 bnx2_free_mem(bp);
4404 del_timer_sync(&bp->timer);
4405 return rc;
4406 }
4407 bnx2_enable_int(bp);
4408 }
4409 }
4410 if (bp->flags & USING_MSI_FLAG) {
4411 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4412 }
4413
4414 netif_start_queue(dev);
4415
4416 return 0;
4417 }
4418
4419 static void
4420 bnx2_reset_task(struct work_struct *work)
4421 {
4422 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4423
4424 if (!netif_running(bp->dev))
4425 return;
4426
4427 bp->in_reset_task = 1;
4428 bnx2_netif_stop(bp);
4429
4430 bnx2_init_nic(bp);
4431
4432 atomic_set(&bp->intr_sem, 1);
4433 bnx2_netif_start(bp);
4434 bp->in_reset_task = 0;
4435 }
4436
4437 static void
4438 bnx2_tx_timeout(struct net_device *dev)
4439 {
4440 struct bnx2 *bp = netdev_priv(dev);
4441
4442 /* This allows the netif to be shutdown gracefully before resetting */
4443 schedule_work(&bp->reset_task);
4444 }
4445
4446 #ifdef BCM_VLAN
4447 /* Called with rtnl_lock */
4448 static void
4449 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4450 {
4451 struct bnx2 *bp = netdev_priv(dev);
4452
4453 bnx2_netif_stop(bp);
4454
4455 bp->vlgrp = vlgrp;
4456 bnx2_set_rx_mode(dev);
4457
4458 bnx2_netif_start(bp);
4459 }
4460
4461 /* Called with rtnl_lock */
4462 static void
4463 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4464 {
4465 struct bnx2 *bp = netdev_priv(dev);
4466
4467 bnx2_netif_stop(bp);
4468 vlan_group_set_device(bp->vlgrp, vid, NULL);
4469 bnx2_set_rx_mode(dev);
4470
4471 bnx2_netif_start(bp);
4472 }
4473 #endif
4474
4475 /* Called with netif_tx_lock.
4476 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4477 * netif_wake_queue().
4478 */
4479 static int
4480 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4481 {
4482 struct bnx2 *bp = netdev_priv(dev);
4483 dma_addr_t mapping;
4484 struct tx_bd *txbd;
4485 struct sw_bd *tx_buf;
4486 u32 len, vlan_tag_flags, last_frag, mss;
4487 u16 prod, ring_prod;
4488 int i;
4489
4490 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4491 netif_stop_queue(dev);
4492 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4493 dev->name);
4494
4495 return NETDEV_TX_BUSY;
4496 }
4497 len = skb_headlen(skb);
4498 prod = bp->tx_prod;
4499 ring_prod = TX_RING_IDX(prod);
4500
4501 vlan_tag_flags = 0;
4502 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4503 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4504 }
4505
4506 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4507 vlan_tag_flags |=
4508 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4509 }
4510 if ((mss = skb_shinfo(skb)->gso_size) &&
4511 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4512 u32 tcp_opt_len, ip_tcp_len;
4513
4514 if (skb_header_cloned(skb) &&
4515 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4516 dev_kfree_skb(skb);
4517 return NETDEV_TX_OK;
4518 }
4519
4520 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4521 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4522
4523 tcp_opt_len = 0;
4524 if (skb->h.th->doff > 5) {
4525 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4526 }
4527 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4528
4529 skb->nh.iph->check = 0;
4530 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4531 skb->h.th->check =
4532 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4533 skb->nh.iph->daddr,
4534 0, IPPROTO_TCP, 0);
4535
4536 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4537 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4538 (tcp_opt_len >> 2)) << 8;
4539 }
4540 }
4541 else
4542 {
4543 mss = 0;
4544 }
4545
4546 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4547
4548 tx_buf = &bp->tx_buf_ring[ring_prod];
4549 tx_buf->skb = skb;
4550 pci_unmap_addr_set(tx_buf, mapping, mapping);
4551
4552 txbd = &bp->tx_desc_ring[ring_prod];
4553
4554 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4555 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4556 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4557 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4558
4559 last_frag = skb_shinfo(skb)->nr_frags;
4560
4561 for (i = 0; i < last_frag; i++) {
4562 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4563
4564 prod = NEXT_TX_BD(prod);
4565 ring_prod = TX_RING_IDX(prod);
4566 txbd = &bp->tx_desc_ring[ring_prod];
4567
4568 len = frag->size;
4569 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4570 len, PCI_DMA_TODEVICE);
4571 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4572 mapping, mapping);
4573
4574 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4575 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4576 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4577 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4578
4579 }
4580 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4581
4582 prod = NEXT_TX_BD(prod);
4583 bp->tx_prod_bseq += skb->len;
4584
4585 REG_WR16(bp, bp->tx_bidx_addr, prod);
4586 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4587
4588 mmiowb();
4589
4590 bp->tx_prod = prod;
4591 dev->trans_start = jiffies;
4592
4593 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4594 netif_stop_queue(dev);
4595 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4596 netif_wake_queue(dev);
4597 }
4598
4599 return NETDEV_TX_OK;
4600 }
4601
4602 /* Called with rtnl_lock */
4603 static int
4604 bnx2_close(struct net_device *dev)
4605 {
4606 struct bnx2 *bp = netdev_priv(dev);
4607 u32 reset_code;
4608
4609 /* Calling flush_scheduled_work() may deadlock because
4610 * linkwatch_event() may be on the workqueue and it will try to get
4611 * the rtnl_lock which we are holding.
4612 */
4613 while (bp->in_reset_task)
4614 msleep(1);
4615
4616 bnx2_netif_stop(bp);
4617 del_timer_sync(&bp->timer);
4618 if (bp->flags & NO_WOL_FLAG)
4619 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4620 else if (bp->wol)
4621 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4622 else
4623 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4624 bnx2_reset_chip(bp, reset_code);
4625 free_irq(bp->pdev->irq, dev);
4626 if (bp->flags & USING_MSI_FLAG) {
4627 pci_disable_msi(bp->pdev);
4628 bp->flags &= ~USING_MSI_FLAG;
4629 }
4630 bnx2_free_skbs(bp);
4631 bnx2_free_mem(bp);
4632 bp->link_up = 0;
4633 netif_carrier_off(bp->dev);
4634 bnx2_set_power_state(bp, PCI_D3hot);
4635 return 0;
4636 }
4637
4638 #define GET_NET_STATS64(ctr) \
4639 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4640 (unsigned long) (ctr##_lo)
4641
4642 #define GET_NET_STATS32(ctr) \
4643 (ctr##_lo)
4644
4645 #if (BITS_PER_LONG == 64)
4646 #define GET_NET_STATS GET_NET_STATS64
4647 #else
4648 #define GET_NET_STATS GET_NET_STATS32
4649 #endif
4650
4651 static struct net_device_stats *
4652 bnx2_get_stats(struct net_device *dev)
4653 {
4654 struct bnx2 *bp = netdev_priv(dev);
4655 struct statistics_block *stats_blk = bp->stats_blk;
4656 struct net_device_stats *net_stats = &bp->net_stats;
4657
4658 if (bp->stats_blk == NULL) {
4659 return net_stats;
4660 }
4661 net_stats->rx_packets =
4662 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4663 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4664 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4665
4666 net_stats->tx_packets =
4667 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4668 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4669 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4670
4671 net_stats->rx_bytes =
4672 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4673
4674 net_stats->tx_bytes =
4675 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4676
4677 net_stats->multicast =
4678 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4679
4680 net_stats->collisions =
4681 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4682
4683 net_stats->rx_length_errors =
4684 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4685 stats_blk->stat_EtherStatsOverrsizePkts);
4686
4687 net_stats->rx_over_errors =
4688 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4689
4690 net_stats->rx_frame_errors =
4691 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4692
4693 net_stats->rx_crc_errors =
4694 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4695
4696 net_stats->rx_errors = net_stats->rx_length_errors +
4697 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4698 net_stats->rx_crc_errors;
4699
4700 net_stats->tx_aborted_errors =
4701 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4702 stats_blk->stat_Dot3StatsLateCollisions);
4703
4704 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4705 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4706 net_stats->tx_carrier_errors = 0;
4707 else {
4708 net_stats->tx_carrier_errors =
4709 (unsigned long)
4710 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4711 }
4712
4713 net_stats->tx_errors =
4714 (unsigned long)
4715 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4716 +
4717 net_stats->tx_aborted_errors +
4718 net_stats->tx_carrier_errors;
4719
4720 net_stats->rx_missed_errors =
4721 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4722 stats_blk->stat_FwRxDrop);
4723
4724 return net_stats;
4725 }
4726
4727 /* All ethtool functions called with rtnl_lock */
4728
4729 static int
4730 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4731 {
4732 struct bnx2 *bp = netdev_priv(dev);
4733
4734 cmd->supported = SUPPORTED_Autoneg;
4735 if (bp->phy_flags & PHY_SERDES_FLAG) {
4736 cmd->supported |= SUPPORTED_1000baseT_Full |
4737 SUPPORTED_FIBRE;
4738
4739 cmd->port = PORT_FIBRE;
4740 }
4741 else {
4742 cmd->supported |= SUPPORTED_10baseT_Half |
4743 SUPPORTED_10baseT_Full |
4744 SUPPORTED_100baseT_Half |
4745 SUPPORTED_100baseT_Full |
4746 SUPPORTED_1000baseT_Full |
4747 SUPPORTED_TP;
4748
4749 cmd->port = PORT_TP;
4750 }
4751
4752 cmd->advertising = bp->advertising;
4753
4754 if (bp->autoneg & AUTONEG_SPEED) {
4755 cmd->autoneg = AUTONEG_ENABLE;
4756 }
4757 else {
4758 cmd->autoneg = AUTONEG_DISABLE;
4759 }
4760
4761 if (netif_carrier_ok(dev)) {
4762 cmd->speed = bp->line_speed;
4763 cmd->duplex = bp->duplex;
4764 }
4765 else {
4766 cmd->speed = -1;
4767 cmd->duplex = -1;
4768 }
4769
4770 cmd->transceiver = XCVR_INTERNAL;
4771 cmd->phy_address = bp->phy_addr;
4772
4773 return 0;
4774 }
4775
4776 static int
4777 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4778 {
4779 struct bnx2 *bp = netdev_priv(dev);
4780 u8 autoneg = bp->autoneg;
4781 u8 req_duplex = bp->req_duplex;
4782 u16 req_line_speed = bp->req_line_speed;
4783 u32 advertising = bp->advertising;
4784
4785 if (cmd->autoneg == AUTONEG_ENABLE) {
4786 autoneg |= AUTONEG_SPEED;
4787
4788 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4789
4790 /* allow advertising 1 speed */
4791 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4792 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4793 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4794 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4795
4796 if (bp->phy_flags & PHY_SERDES_FLAG)
4797 return -EINVAL;
4798
4799 advertising = cmd->advertising;
4800
4801 }
4802 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4803 advertising = cmd->advertising;
4804 }
4805 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4806 return -EINVAL;
4807 }
4808 else {
4809 if (bp->phy_flags & PHY_SERDES_FLAG) {
4810 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4811 }
4812 else {
4813 advertising = ETHTOOL_ALL_COPPER_SPEED;
4814 }
4815 }
4816 advertising |= ADVERTISED_Autoneg;
4817 }
4818 else {
4819 if (bp->phy_flags & PHY_SERDES_FLAG) {
4820 if ((cmd->speed != SPEED_1000 &&
4821 cmd->speed != SPEED_2500) ||
4822 (cmd->duplex != DUPLEX_FULL))
4823 return -EINVAL;
4824
4825 if (cmd->speed == SPEED_2500 &&
4826 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4827 return -EINVAL;
4828 }
4829 else if (cmd->speed == SPEED_1000) {
4830 return -EINVAL;
4831 }
4832 autoneg &= ~AUTONEG_SPEED;
4833 req_line_speed = cmd->speed;
4834 req_duplex = cmd->duplex;
4835 advertising = 0;
4836 }
4837
4838 bp->autoneg = autoneg;
4839 bp->advertising = advertising;
4840 bp->req_line_speed = req_line_speed;
4841 bp->req_duplex = req_duplex;
4842
4843 spin_lock_bh(&bp->phy_lock);
4844
4845 bnx2_setup_phy(bp);
4846
4847 spin_unlock_bh(&bp->phy_lock);
4848
4849 return 0;
4850 }
4851
4852 static void
4853 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4854 {
4855 struct bnx2 *bp = netdev_priv(dev);
4856
4857 strcpy(info->driver, DRV_MODULE_NAME);
4858 strcpy(info->version, DRV_MODULE_VERSION);
4859 strcpy(info->bus_info, pci_name(bp->pdev));
4860 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4861 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4862 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4863 info->fw_version[1] = info->fw_version[3] = '.';
4864 info->fw_version[5] = 0;
4865 }
4866
4867 #define BNX2_REGDUMP_LEN (32 * 1024)
4868
4869 static int
4870 bnx2_get_regs_len(struct net_device *dev)
4871 {
4872 return BNX2_REGDUMP_LEN;
4873 }
4874
4875 static void
4876 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4877 {
4878 u32 *p = _p, i, offset;
4879 u8 *orig_p = _p;
4880 struct bnx2 *bp = netdev_priv(dev);
4881 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4882 0x0800, 0x0880, 0x0c00, 0x0c10,
4883 0x0c30, 0x0d08, 0x1000, 0x101c,
4884 0x1040, 0x1048, 0x1080, 0x10a4,
4885 0x1400, 0x1490, 0x1498, 0x14f0,
4886 0x1500, 0x155c, 0x1580, 0x15dc,
4887 0x1600, 0x1658, 0x1680, 0x16d8,
4888 0x1800, 0x1820, 0x1840, 0x1854,
4889 0x1880, 0x1894, 0x1900, 0x1984,
4890 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4891 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4892 0x2000, 0x2030, 0x23c0, 0x2400,
4893 0x2800, 0x2820, 0x2830, 0x2850,
4894 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4895 0x3c00, 0x3c94, 0x4000, 0x4010,
4896 0x4080, 0x4090, 0x43c0, 0x4458,
4897 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4898 0x4fc0, 0x5010, 0x53c0, 0x5444,
4899 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4900 0x5fc0, 0x6000, 0x6400, 0x6428,
4901 0x6800, 0x6848, 0x684c, 0x6860,
4902 0x6888, 0x6910, 0x8000 };
4903
4904 regs->version = 0;
4905
4906 memset(p, 0, BNX2_REGDUMP_LEN);
4907
4908 if (!netif_running(bp->dev))
4909 return;
4910
4911 i = 0;
4912 offset = reg_boundaries[0];
4913 p += offset;
4914 while (offset < BNX2_REGDUMP_LEN) {
4915 *p++ = REG_RD(bp, offset);
4916 offset += 4;
4917 if (offset == reg_boundaries[i + 1]) {
4918 offset = reg_boundaries[i + 2];
4919 p = (u32 *) (orig_p + offset);
4920 i += 2;
4921 }
4922 }
4923 }
4924
4925 static void
4926 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4927 {
4928 struct bnx2 *bp = netdev_priv(dev);
4929
4930 if (bp->flags & NO_WOL_FLAG) {
4931 wol->supported = 0;
4932 wol->wolopts = 0;
4933 }
4934 else {
4935 wol->supported = WAKE_MAGIC;
4936 if (bp->wol)
4937 wol->wolopts = WAKE_MAGIC;
4938 else
4939 wol->wolopts = 0;
4940 }
4941 memset(&wol->sopass, 0, sizeof(wol->sopass));
4942 }
4943
4944 static int
4945 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4946 {
4947 struct bnx2 *bp = netdev_priv(dev);
4948
4949 if (wol->wolopts & ~WAKE_MAGIC)
4950 return -EINVAL;
4951
4952 if (wol->wolopts & WAKE_MAGIC) {
4953 if (bp->flags & NO_WOL_FLAG)
4954 return -EINVAL;
4955
4956 bp->wol = 1;
4957 }
4958 else {
4959 bp->wol = 0;
4960 }
4961 return 0;
4962 }
4963
4964 static int
4965 bnx2_nway_reset(struct net_device *dev)
4966 {
4967 struct bnx2 *bp = netdev_priv(dev);
4968 u32 bmcr;
4969
4970 if (!(bp->autoneg & AUTONEG_SPEED)) {
4971 return -EINVAL;
4972 }
4973
4974 spin_lock_bh(&bp->phy_lock);
4975
4976 /* Force a link down visible on the other side */
4977 if (bp->phy_flags & PHY_SERDES_FLAG) {
4978 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4979 spin_unlock_bh(&bp->phy_lock);
4980
4981 msleep(20);
4982
4983 spin_lock_bh(&bp->phy_lock);
4984
4985 bp->current_interval = SERDES_AN_TIMEOUT;
4986 bp->serdes_an_pending = 1;
4987 mod_timer(&bp->timer, jiffies + bp->current_interval);
4988 }
4989
4990 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4991 bmcr &= ~BMCR_LOOPBACK;
4992 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4993
4994 spin_unlock_bh(&bp->phy_lock);
4995
4996 return 0;
4997 }
4998
4999 static int
5000 bnx2_get_eeprom_len(struct net_device *dev)
5001 {
5002 struct bnx2 *bp = netdev_priv(dev);
5003
5004 if (bp->flash_info == NULL)
5005 return 0;
5006
5007 return (int) bp->flash_size;
5008 }
5009
5010 static int
5011 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5012 u8 *eebuf)
5013 {
5014 struct bnx2 *bp = netdev_priv(dev);
5015 int rc;
5016
5017 /* parameters already validated in ethtool_get_eeprom */
5018
5019 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5020
5021 return rc;
5022 }
5023
5024 static int
5025 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5026 u8 *eebuf)
5027 {
5028 struct bnx2 *bp = netdev_priv(dev);
5029 int rc;
5030
5031 /* parameters already validated in ethtool_set_eeprom */
5032
5033 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5034
5035 return rc;
5036 }
5037
5038 static int
5039 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5040 {
5041 struct bnx2 *bp = netdev_priv(dev);
5042
5043 memset(coal, 0, sizeof(struct ethtool_coalesce));
5044
5045 coal->rx_coalesce_usecs = bp->rx_ticks;
5046 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5047 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5048 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5049
5050 coal->tx_coalesce_usecs = bp->tx_ticks;
5051 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5052 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5053 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5054
5055 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5056
5057 return 0;
5058 }
5059
5060 static int
5061 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5062 {
5063 struct bnx2 *bp = netdev_priv(dev);
5064
5065 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5066 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5067
5068 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5069 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5070
5071 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5072 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5073
5074 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5075 if (bp->rx_quick_cons_trip_int > 0xff)
5076 bp->rx_quick_cons_trip_int = 0xff;
5077
5078 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5079 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5080
5081 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5082 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5083
5084 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5085 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5086
5087 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5088 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5089 0xff;
5090
5091 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5092 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5093 bp->stats_ticks &= 0xffff00;
5094
5095 if (netif_running(bp->dev)) {
5096 bnx2_netif_stop(bp);
5097 bnx2_init_nic(bp);
5098 bnx2_netif_start(bp);
5099 }
5100
5101 return 0;
5102 }
5103
5104 static void
5105 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5106 {
5107 struct bnx2 *bp = netdev_priv(dev);
5108
5109 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5110 ering->rx_mini_max_pending = 0;
5111 ering->rx_jumbo_max_pending = 0;
5112
5113 ering->rx_pending = bp->rx_ring_size;
5114 ering->rx_mini_pending = 0;
5115 ering->rx_jumbo_pending = 0;
5116
5117 ering->tx_max_pending = MAX_TX_DESC_CNT;
5118 ering->tx_pending = bp->tx_ring_size;
5119 }
5120
5121 static int
5122 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5123 {
5124 struct bnx2 *bp = netdev_priv(dev);
5125
5126 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5127 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5128 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5129
5130 return -EINVAL;
5131 }
5132 if (netif_running(bp->dev)) {
5133 bnx2_netif_stop(bp);
5134 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5135 bnx2_free_skbs(bp);
5136 bnx2_free_mem(bp);
5137 }
5138
5139 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5140 bp->tx_ring_size = ering->tx_pending;
5141
5142 if (netif_running(bp->dev)) {
5143 int rc;
5144
5145 rc = bnx2_alloc_mem(bp);
5146 if (rc)
5147 return rc;
5148 bnx2_init_nic(bp);
5149 bnx2_netif_start(bp);
5150 }
5151
5152 return 0;
5153 }
5154
5155 static void
5156 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5157 {
5158 struct bnx2 *bp = netdev_priv(dev);
5159
5160 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5161 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5162 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5163 }
5164
5165 static int
5166 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5167 {
5168 struct bnx2 *bp = netdev_priv(dev);
5169
5170 bp->req_flow_ctrl = 0;
5171 if (epause->rx_pause)
5172 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5173 if (epause->tx_pause)
5174 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5175
5176 if (epause->autoneg) {
5177 bp->autoneg |= AUTONEG_FLOW_CTRL;
5178 }
5179 else {
5180 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5181 }
5182
5183 spin_lock_bh(&bp->phy_lock);
5184
5185 bnx2_setup_phy(bp);
5186
5187 spin_unlock_bh(&bp->phy_lock);
5188
5189 return 0;
5190 }
5191
5192 static u32
5193 bnx2_get_rx_csum(struct net_device *dev)
5194 {
5195 struct bnx2 *bp = netdev_priv(dev);
5196
5197 return bp->rx_csum;
5198 }
5199
5200 static int
5201 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5202 {
5203 struct bnx2 *bp = netdev_priv(dev);
5204
5205 bp->rx_csum = data;
5206 return 0;
5207 }
5208
5209 static int
5210 bnx2_set_tso(struct net_device *dev, u32 data)
5211 {
5212 if (data)
5213 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5214 else
5215 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5216 return 0;
5217 }
5218
5219 #define BNX2_NUM_STATS 46
5220
5221 static struct {
5222 char string[ETH_GSTRING_LEN];
5223 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5224 { "rx_bytes" },
5225 { "rx_error_bytes" },
5226 { "tx_bytes" },
5227 { "tx_error_bytes" },
5228 { "rx_ucast_packets" },
5229 { "rx_mcast_packets" },
5230 { "rx_bcast_packets" },
5231 { "tx_ucast_packets" },
5232 { "tx_mcast_packets" },
5233 { "tx_bcast_packets" },
5234 { "tx_mac_errors" },
5235 { "tx_carrier_errors" },
5236 { "rx_crc_errors" },
5237 { "rx_align_errors" },
5238 { "tx_single_collisions" },
5239 { "tx_multi_collisions" },
5240 { "tx_deferred" },
5241 { "tx_excess_collisions" },
5242 { "tx_late_collisions" },
5243 { "tx_total_collisions" },
5244 { "rx_fragments" },
5245 { "rx_jabbers" },
5246 { "rx_undersize_packets" },
5247 { "rx_oversize_packets" },
5248 { "rx_64_byte_packets" },
5249 { "rx_65_to_127_byte_packets" },
5250 { "rx_128_to_255_byte_packets" },
5251 { "rx_256_to_511_byte_packets" },
5252 { "rx_512_to_1023_byte_packets" },
5253 { "rx_1024_to_1522_byte_packets" },
5254 { "rx_1523_to_9022_byte_packets" },
5255 { "tx_64_byte_packets" },
5256 { "tx_65_to_127_byte_packets" },
5257 { "tx_128_to_255_byte_packets" },
5258 { "tx_256_to_511_byte_packets" },
5259 { "tx_512_to_1023_byte_packets" },
5260 { "tx_1024_to_1522_byte_packets" },
5261 { "tx_1523_to_9022_byte_packets" },
5262 { "rx_xon_frames" },
5263 { "rx_xoff_frames" },
5264 { "tx_xon_frames" },
5265 { "tx_xoff_frames" },
5266 { "rx_mac_ctrl_frames" },
5267 { "rx_filtered_packets" },
5268 { "rx_discards" },
5269 { "rx_fw_discards" },
5270 };
5271
5272 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5273
5274 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5275 STATS_OFFSET32(stat_IfHCInOctets_hi),
5276 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5277 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5278 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5279 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5280 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5281 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5282 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5283 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5284 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5285 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5286 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5287 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5288 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5289 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5290 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5291 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5292 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5293 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5294 STATS_OFFSET32(stat_EtherStatsCollisions),
5295 STATS_OFFSET32(stat_EtherStatsFragments),
5296 STATS_OFFSET32(stat_EtherStatsJabbers),
5297 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5298 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5299 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5300 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5301 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5302 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5303 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5304 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5305 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5306 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5307 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5308 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5309 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5310 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5311 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5312 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5313 STATS_OFFSET32(stat_XonPauseFramesReceived),
5314 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5315 STATS_OFFSET32(stat_OutXonSent),
5316 STATS_OFFSET32(stat_OutXoffSent),
5317 STATS_OFFSET32(stat_MacControlFramesReceived),
5318 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5319 STATS_OFFSET32(stat_IfInMBUFDiscards),
5320 STATS_OFFSET32(stat_FwRxDrop),
5321 };
5322
5323 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5324 * skipped because of errata.
5325 */
5326 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5327 8,0,8,8,8,8,8,8,8,8,
5328 4,0,4,4,4,4,4,4,4,4,
5329 4,4,4,4,4,4,4,4,4,4,
5330 4,4,4,4,4,4,4,4,4,4,
5331 4,4,4,4,4,4,
5332 };
5333
5334 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5335 8,0,8,8,8,8,8,8,8,8,
5336 4,4,4,4,4,4,4,4,4,4,
5337 4,4,4,4,4,4,4,4,4,4,
5338 4,4,4,4,4,4,4,4,4,4,
5339 4,4,4,4,4,4,
5340 };
5341
5342 #define BNX2_NUM_TESTS 6
5343
5344 static struct {
5345 char string[ETH_GSTRING_LEN];
5346 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5347 { "register_test (offline)" },
5348 { "memory_test (offline)" },
5349 { "loopback_test (offline)" },
5350 { "nvram_test (online)" },
5351 { "interrupt_test (online)" },
5352 { "link_test (online)" },
5353 };
5354
5355 static int
5356 bnx2_self_test_count(struct net_device *dev)
5357 {
5358 return BNX2_NUM_TESTS;
5359 }
5360
5361 static void
5362 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5363 {
5364 struct bnx2 *bp = netdev_priv(dev);
5365
5366 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5367 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5368 int i;
5369
5370 bnx2_netif_stop(bp);
5371 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5372 bnx2_free_skbs(bp);
5373
5374 if (bnx2_test_registers(bp) != 0) {
5375 buf[0] = 1;
5376 etest->flags |= ETH_TEST_FL_FAILED;
5377 }
5378 if (bnx2_test_memory(bp) != 0) {
5379 buf[1] = 1;
5380 etest->flags |= ETH_TEST_FL_FAILED;
5381 }
5382 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5383 etest->flags |= ETH_TEST_FL_FAILED;
5384
5385 if (!netif_running(bp->dev)) {
5386 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5387 }
5388 else {
5389 bnx2_init_nic(bp);
5390 bnx2_netif_start(bp);
5391 }
5392
5393 /* wait for link up */
5394 for (i = 0; i < 7; i++) {
5395 if (bp->link_up)
5396 break;
5397 msleep_interruptible(1000);
5398 }
5399 }
5400
5401 if (bnx2_test_nvram(bp) != 0) {
5402 buf[3] = 1;
5403 etest->flags |= ETH_TEST_FL_FAILED;
5404 }
5405 if (bnx2_test_intr(bp) != 0) {
5406 buf[4] = 1;
5407 etest->flags |= ETH_TEST_FL_FAILED;
5408 }
5409
5410 if (bnx2_test_link(bp) != 0) {
5411 buf[5] = 1;
5412 etest->flags |= ETH_TEST_FL_FAILED;
5413
5414 }
5415 }
5416
5417 static void
5418 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5419 {
5420 switch (stringset) {
5421 case ETH_SS_STATS:
5422 memcpy(buf, bnx2_stats_str_arr,
5423 sizeof(bnx2_stats_str_arr));
5424 break;
5425 case ETH_SS_TEST:
5426 memcpy(buf, bnx2_tests_str_arr,
5427 sizeof(bnx2_tests_str_arr));
5428 break;
5429 }
5430 }
5431
5432 static int
5433 bnx2_get_stats_count(struct net_device *dev)
5434 {
5435 return BNX2_NUM_STATS;
5436 }
5437
5438 static void
5439 bnx2_get_ethtool_stats(struct net_device *dev,
5440 struct ethtool_stats *stats, u64 *buf)
5441 {
5442 struct bnx2 *bp = netdev_priv(dev);
5443 int i;
5444 u32 *hw_stats = (u32 *) bp->stats_blk;
5445 u8 *stats_len_arr = NULL;
5446
5447 if (hw_stats == NULL) {
5448 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5449 return;
5450 }
5451
5452 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5453 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5454 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5455 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5456 stats_len_arr = bnx2_5706_stats_len_arr;
5457 else
5458 stats_len_arr = bnx2_5708_stats_len_arr;
5459
5460 for (i = 0; i < BNX2_NUM_STATS; i++) {
5461 if (stats_len_arr[i] == 0) {
5462 /* skip this counter */
5463 buf[i] = 0;
5464 continue;
5465 }
5466 if (stats_len_arr[i] == 4) {
5467 /* 4-byte counter */
5468 buf[i] = (u64)
5469 *(hw_stats + bnx2_stats_offset_arr[i]);
5470 continue;
5471 }
5472 /* 8-byte counter */
5473 buf[i] = (((u64) *(hw_stats +
5474 bnx2_stats_offset_arr[i])) << 32) +
5475 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5476 }
5477 }
5478
5479 static int
5480 bnx2_phys_id(struct net_device *dev, u32 data)
5481 {
5482 struct bnx2 *bp = netdev_priv(dev);
5483 int i;
5484 u32 save;
5485
5486 if (data == 0)
5487 data = 2;
5488
5489 save = REG_RD(bp, BNX2_MISC_CFG);
5490 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5491
5492 for (i = 0; i < (data * 2); i++) {
5493 if ((i % 2) == 0) {
5494 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5495 }
5496 else {
5497 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5498 BNX2_EMAC_LED_1000MB_OVERRIDE |
5499 BNX2_EMAC_LED_100MB_OVERRIDE |
5500 BNX2_EMAC_LED_10MB_OVERRIDE |
5501 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5502 BNX2_EMAC_LED_TRAFFIC);
5503 }
5504 msleep_interruptible(500);
5505 if (signal_pending(current))
5506 break;
5507 }
5508 REG_WR(bp, BNX2_EMAC_LED, 0);
5509 REG_WR(bp, BNX2_MISC_CFG, save);
5510 return 0;
5511 }
5512
5513 static const struct ethtool_ops bnx2_ethtool_ops = {
5514 .get_settings = bnx2_get_settings,
5515 .set_settings = bnx2_set_settings,
5516 .get_drvinfo = bnx2_get_drvinfo,
5517 .get_regs_len = bnx2_get_regs_len,
5518 .get_regs = bnx2_get_regs,
5519 .get_wol = bnx2_get_wol,
5520 .set_wol = bnx2_set_wol,
5521 .nway_reset = bnx2_nway_reset,
5522 .get_link = ethtool_op_get_link,
5523 .get_eeprom_len = bnx2_get_eeprom_len,
5524 .get_eeprom = bnx2_get_eeprom,
5525 .set_eeprom = bnx2_set_eeprom,
5526 .get_coalesce = bnx2_get_coalesce,
5527 .set_coalesce = bnx2_set_coalesce,
5528 .get_ringparam = bnx2_get_ringparam,
5529 .set_ringparam = bnx2_set_ringparam,
5530 .get_pauseparam = bnx2_get_pauseparam,
5531 .set_pauseparam = bnx2_set_pauseparam,
5532 .get_rx_csum = bnx2_get_rx_csum,
5533 .set_rx_csum = bnx2_set_rx_csum,
5534 .get_tx_csum = ethtool_op_get_tx_csum,
5535 .set_tx_csum = ethtool_op_set_tx_csum,
5536 .get_sg = ethtool_op_get_sg,
5537 .set_sg = ethtool_op_set_sg,
5538 .get_tso = ethtool_op_get_tso,
5539 .set_tso = bnx2_set_tso,
5540 .self_test_count = bnx2_self_test_count,
5541 .self_test = bnx2_self_test,
5542 .get_strings = bnx2_get_strings,
5543 .phys_id = bnx2_phys_id,
5544 .get_stats_count = bnx2_get_stats_count,
5545 .get_ethtool_stats = bnx2_get_ethtool_stats,
5546 .get_perm_addr = ethtool_op_get_perm_addr,
5547 };
5548
5549 /* Called with rtnl_lock */
5550 static int
5551 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5552 {
5553 struct mii_ioctl_data *data = if_mii(ifr);
5554 struct bnx2 *bp = netdev_priv(dev);
5555 int err;
5556
5557 switch(cmd) {
5558 case SIOCGMIIPHY:
5559 data->phy_id = bp->phy_addr;
5560
5561 /* fallthru */
5562 case SIOCGMIIREG: {
5563 u32 mii_regval;
5564
5565 spin_lock_bh(&bp->phy_lock);
5566 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5567 spin_unlock_bh(&bp->phy_lock);
5568
5569 data->val_out = mii_regval;
5570
5571 return err;
5572 }
5573
5574 case SIOCSMIIREG:
5575 if (!capable(CAP_NET_ADMIN))
5576 return -EPERM;
5577
5578 spin_lock_bh(&bp->phy_lock);
5579 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5580 spin_unlock_bh(&bp->phy_lock);
5581
5582 return err;
5583
5584 default:
5585 /* do nothing */
5586 break;
5587 }
5588 return -EOPNOTSUPP;
5589 }
5590
5591 /* Called with rtnl_lock */
5592 static int
5593 bnx2_change_mac_addr(struct net_device *dev, void *p)
5594 {
5595 struct sockaddr *addr = p;
5596 struct bnx2 *bp = netdev_priv(dev);
5597
5598 if (!is_valid_ether_addr(addr->sa_data))
5599 return -EINVAL;
5600
5601 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5602 if (netif_running(dev))
5603 bnx2_set_mac_addr(bp);
5604
5605 return 0;
5606 }
5607
5608 /* Called with rtnl_lock */
5609 static int
5610 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5611 {
5612 struct bnx2 *bp = netdev_priv(dev);
5613
5614 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5615 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5616 return -EINVAL;
5617
5618 dev->mtu = new_mtu;
5619 if (netif_running(dev)) {
5620 bnx2_netif_stop(bp);
5621
5622 bnx2_init_nic(bp);
5623
5624 bnx2_netif_start(bp);
5625 }
5626 return 0;
5627 }
5628
5629 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5630 static void
5631 poll_bnx2(struct net_device *dev)
5632 {
5633 struct bnx2 *bp = netdev_priv(dev);
5634
5635 disable_irq(bp->pdev->irq);
5636 bnx2_interrupt(bp->pdev->irq, dev);
5637 enable_irq(bp->pdev->irq);
5638 }
5639 #endif
5640
5641 static void __devinit
5642 bnx2_get_5709_media(struct bnx2 *bp)
5643 {
5644 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5645 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5646 u32 strap;
5647
5648 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5649 return;
5650 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5651 bp->phy_flags |= PHY_SERDES_FLAG;
5652 return;
5653 }
5654
5655 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5656 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5657 else
5658 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5659
5660 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5661 switch (strap) {
5662 case 0x4:
5663 case 0x5:
5664 case 0x6:
5665 bp->phy_flags |= PHY_SERDES_FLAG;
5666 return;
5667 }
5668 } else {
5669 switch (strap) {
5670 case 0x1:
5671 case 0x2:
5672 case 0x4:
5673 bp->phy_flags |= PHY_SERDES_FLAG;
5674 return;
5675 }
5676 }
5677 }
5678
5679 static int __devinit
5680 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5681 {
5682 struct bnx2 *bp;
5683 unsigned long mem_len;
5684 int rc;
5685 u32 reg;
5686
5687 SET_MODULE_OWNER(dev);
5688 SET_NETDEV_DEV(dev, &pdev->dev);
5689 bp = netdev_priv(dev);
5690
5691 bp->flags = 0;
5692 bp->phy_flags = 0;
5693
5694 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5695 rc = pci_enable_device(pdev);
5696 if (rc) {
5697 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5698 goto err_out;
5699 }
5700
5701 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5702 dev_err(&pdev->dev,
5703 "Cannot find PCI device base address, aborting.\n");
5704 rc = -ENODEV;
5705 goto err_out_disable;
5706 }
5707
5708 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5709 if (rc) {
5710 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5711 goto err_out_disable;
5712 }
5713
5714 pci_set_master(pdev);
5715
5716 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5717 if (bp->pm_cap == 0) {
5718 dev_err(&pdev->dev,
5719 "Cannot find power management capability, aborting.\n");
5720 rc = -EIO;
5721 goto err_out_release;
5722 }
5723
5724 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5725 bp->flags |= USING_DAC_FLAG;
5726 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5727 dev_err(&pdev->dev,
5728 "pci_set_consistent_dma_mask failed, aborting.\n");
5729 rc = -EIO;
5730 goto err_out_release;
5731 }
5732 }
5733 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5734 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5735 rc = -EIO;
5736 goto err_out_release;
5737 }
5738
5739 bp->dev = dev;
5740 bp->pdev = pdev;
5741
5742 spin_lock_init(&bp->phy_lock);
5743 INIT_WORK(&bp->reset_task, bnx2_reset_task);
5744
5745 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5746 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5747 dev->mem_end = dev->mem_start + mem_len;
5748 dev->irq = pdev->irq;
5749
5750 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5751
5752 if (!bp->regview) {
5753 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5754 rc = -ENOMEM;
5755 goto err_out_release;
5756 }
5757
5758 /* Configure byte swap and enable write to the reg_window registers.
5759 * Rely on CPU to do target byte swapping on big endian systems
5760 * The chip's target access swapping will not swap all accesses
5761 */
5762 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5763 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5764 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5765
5766 bnx2_set_power_state(bp, PCI_D0);
5767
5768 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5769
5770 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5771 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5772 if (bp->pcix_cap == 0) {
5773 dev_err(&pdev->dev,
5774 "Cannot find PCIX capability, aborting.\n");
5775 rc = -EIO;
5776 goto err_out_unmap;
5777 }
5778 }
5779
5780 /* Get bus information. */
5781 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5782 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5783 u32 clkreg;
5784
5785 bp->flags |= PCIX_FLAG;
5786
5787 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5788
5789 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5790 switch (clkreg) {
5791 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5792 bp->bus_speed_mhz = 133;
5793 break;
5794
5795 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5796 bp->bus_speed_mhz = 100;
5797 break;
5798
5799 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5800 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5801 bp->bus_speed_mhz = 66;
5802 break;
5803
5804 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5805 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5806 bp->bus_speed_mhz = 50;
5807 break;
5808
5809 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5810 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5811 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5812 bp->bus_speed_mhz = 33;
5813 break;
5814 }
5815 }
5816 else {
5817 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5818 bp->bus_speed_mhz = 66;
5819 else
5820 bp->bus_speed_mhz = 33;
5821 }
5822
5823 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5824 bp->flags |= PCI_32BIT_FLAG;
5825
5826 /* 5706A0 may falsely detect SERR and PERR. */
5827 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5828 reg = REG_RD(bp, PCI_COMMAND);
5829 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5830 REG_WR(bp, PCI_COMMAND, reg);
5831 }
5832 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5833 !(bp->flags & PCIX_FLAG)) {
5834
5835 dev_err(&pdev->dev,
5836 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5837 goto err_out_unmap;
5838 }
5839
5840 bnx2_init_nvram(bp);
5841
5842 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5843
5844 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5845 BNX2_SHM_HDR_SIGNATURE_SIG) {
5846 u32 off = PCI_FUNC(pdev->devfn) << 2;
5847
5848 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
5849 } else
5850 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5851
5852 /* Get the permanent MAC address. First we need to make sure the
5853 * firmware is actually running.
5854 */
5855 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5856
5857 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5858 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5859 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5860 rc = -ENODEV;
5861 goto err_out_unmap;
5862 }
5863
5864 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5865
5866 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5867 bp->mac_addr[0] = (u8) (reg >> 8);
5868 bp->mac_addr[1] = (u8) reg;
5869
5870 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5871 bp->mac_addr[2] = (u8) (reg >> 24);
5872 bp->mac_addr[3] = (u8) (reg >> 16);
5873 bp->mac_addr[4] = (u8) (reg >> 8);
5874 bp->mac_addr[5] = (u8) reg;
5875
5876 bp->tx_ring_size = MAX_TX_DESC_CNT;
5877 bnx2_set_rx_ring_size(bp, 255);
5878
5879 bp->rx_csum = 1;
5880
5881 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5882
5883 bp->tx_quick_cons_trip_int = 20;
5884 bp->tx_quick_cons_trip = 20;
5885 bp->tx_ticks_int = 80;
5886 bp->tx_ticks = 80;
5887
5888 bp->rx_quick_cons_trip_int = 6;
5889 bp->rx_quick_cons_trip = 6;
5890 bp->rx_ticks_int = 18;
5891 bp->rx_ticks = 18;
5892
5893 bp->stats_ticks = 1000000 & 0xffff00;
5894
5895 bp->timer_interval = HZ;
5896 bp->current_interval = HZ;
5897
5898 bp->phy_addr = 1;
5899
5900 /* Disable WOL support if we are running on a SERDES chip. */
5901 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5902 bnx2_get_5709_media(bp);
5903 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5904 bp->phy_flags |= PHY_SERDES_FLAG;
5905
5906 if (bp->phy_flags & PHY_SERDES_FLAG) {
5907 bp->flags |= NO_WOL_FLAG;
5908 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5909 bp->phy_addr = 2;
5910 reg = REG_RD_IND(bp, bp->shmem_base +
5911 BNX2_SHARED_HW_CFG_CONFIG);
5912 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5913 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5914 }
5915 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5916 CHIP_NUM(bp) == CHIP_NUM_5708)
5917 bp->phy_flags |= PHY_CRC_FIX_FLAG;
5918 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
5919 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
5920
5921 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5922 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5923 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5924 bp->flags |= NO_WOL_FLAG;
5925
5926 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5927 bp->tx_quick_cons_trip_int =
5928 bp->tx_quick_cons_trip;
5929 bp->tx_ticks_int = bp->tx_ticks;
5930 bp->rx_quick_cons_trip_int =
5931 bp->rx_quick_cons_trip;
5932 bp->rx_ticks_int = bp->rx_ticks;
5933 bp->comp_prod_trip_int = bp->comp_prod_trip;
5934 bp->com_ticks_int = bp->com_ticks;
5935 bp->cmd_ticks_int = bp->cmd_ticks;
5936 }
5937
5938 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5939 *
5940 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5941 * with byte enables disabled on the unused 32-bit word. This is legal
5942 * but causes problems on the AMD 8132 which will eventually stop
5943 * responding after a while.
5944 *
5945 * AMD believes this incompatibility is unique to the 5706, and
5946 * prefers to locally disable MSI rather than globally disabling it.
5947 */
5948 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5949 struct pci_dev *amd_8132 = NULL;
5950
5951 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5952 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5953 amd_8132))) {
5954 u8 rev;
5955
5956 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5957 if (rev >= 0x10 && rev <= 0x13) {
5958 disable_msi = 1;
5959 pci_dev_put(amd_8132);
5960 break;
5961 }
5962 }
5963 }
5964
5965 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5966 bp->req_line_speed = 0;
5967 if (bp->phy_flags & PHY_SERDES_FLAG) {
5968 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5969
5970 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5971 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5972 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5973 bp->autoneg = 0;
5974 bp->req_line_speed = bp->line_speed = SPEED_1000;
5975 bp->req_duplex = DUPLEX_FULL;
5976 }
5977 }
5978 else {
5979 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5980 }
5981
5982 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5983
5984 init_timer(&bp->timer);
5985 bp->timer.expires = RUN_AT(bp->timer_interval);
5986 bp->timer.data = (unsigned long) bp;
5987 bp->timer.function = bnx2_timer;
5988
5989 return 0;
5990
5991 err_out_unmap:
5992 if (bp->regview) {
5993 iounmap(bp->regview);
5994 bp->regview = NULL;
5995 }
5996
5997 err_out_release:
5998 pci_release_regions(pdev);
5999
6000 err_out_disable:
6001 pci_disable_device(pdev);
6002 pci_set_drvdata(pdev, NULL);
6003
6004 err_out:
6005 return rc;
6006 }
6007
6008 static int __devinit
6009 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6010 {
6011 static int version_printed = 0;
6012 struct net_device *dev = NULL;
6013 struct bnx2 *bp;
6014 int rc, i;
6015
6016 if (version_printed++ == 0)
6017 printk(KERN_INFO "%s", version);
6018
6019 /* dev zeroed in init_etherdev */
6020 dev = alloc_etherdev(sizeof(*bp));
6021
6022 if (!dev)
6023 return -ENOMEM;
6024
6025 rc = bnx2_init_board(pdev, dev);
6026 if (rc < 0) {
6027 free_netdev(dev);
6028 return rc;
6029 }
6030
6031 dev->open = bnx2_open;
6032 dev->hard_start_xmit = bnx2_start_xmit;
6033 dev->stop = bnx2_close;
6034 dev->get_stats = bnx2_get_stats;
6035 dev->set_multicast_list = bnx2_set_rx_mode;
6036 dev->do_ioctl = bnx2_ioctl;
6037 dev->set_mac_address = bnx2_change_mac_addr;
6038 dev->change_mtu = bnx2_change_mtu;
6039 dev->tx_timeout = bnx2_tx_timeout;
6040 dev->watchdog_timeo = TX_TIMEOUT;
6041 #ifdef BCM_VLAN
6042 dev->vlan_rx_register = bnx2_vlan_rx_register;
6043 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6044 #endif
6045 dev->poll = bnx2_poll;
6046 dev->ethtool_ops = &bnx2_ethtool_ops;
6047 dev->weight = 64;
6048
6049 bp = netdev_priv(dev);
6050
6051 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6052 dev->poll_controller = poll_bnx2;
6053 #endif
6054
6055 if ((rc = register_netdev(dev))) {
6056 dev_err(&pdev->dev, "Cannot register net device\n");
6057 if (bp->regview)
6058 iounmap(bp->regview);
6059 pci_release_regions(pdev);
6060 pci_disable_device(pdev);
6061 pci_set_drvdata(pdev, NULL);
6062 free_netdev(dev);
6063 return rc;
6064 }
6065
6066 pci_set_drvdata(pdev, dev);
6067
6068 memcpy(dev->dev_addr, bp->mac_addr, 6);
6069 memcpy(dev->perm_addr, bp->mac_addr, 6);
6070 bp->name = board_info[ent->driver_data].name,
6071 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6072 "IRQ %d, ",
6073 dev->name,
6074 bp->name,
6075 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6076 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6077 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6078 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6079 bp->bus_speed_mhz,
6080 dev->base_addr,
6081 bp->pdev->irq);
6082
6083 printk("node addr ");
6084 for (i = 0; i < 6; i++)
6085 printk("%2.2x", dev->dev_addr[i]);
6086 printk("\n");
6087
6088 dev->features |= NETIF_F_SG;
6089 if (bp->flags & USING_DAC_FLAG)
6090 dev->features |= NETIF_F_HIGHDMA;
6091 dev->features |= NETIF_F_IP_CSUM;
6092 #ifdef BCM_VLAN
6093 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6094 #endif
6095 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6096
6097 netif_carrier_off(bp->dev);
6098
6099 return 0;
6100 }
6101
6102 static void __devexit
6103 bnx2_remove_one(struct pci_dev *pdev)
6104 {
6105 struct net_device *dev = pci_get_drvdata(pdev);
6106 struct bnx2 *bp = netdev_priv(dev);
6107
6108 flush_scheduled_work();
6109
6110 unregister_netdev(dev);
6111
6112 if (bp->regview)
6113 iounmap(bp->regview);
6114
6115 free_netdev(dev);
6116 pci_release_regions(pdev);
6117 pci_disable_device(pdev);
6118 pci_set_drvdata(pdev, NULL);
6119 }
6120
6121 static int
6122 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6123 {
6124 struct net_device *dev = pci_get_drvdata(pdev);
6125 struct bnx2 *bp = netdev_priv(dev);
6126 u32 reset_code;
6127
6128 if (!netif_running(dev))
6129 return 0;
6130
6131 flush_scheduled_work();
6132 bnx2_netif_stop(bp);
6133 netif_device_detach(dev);
6134 del_timer_sync(&bp->timer);
6135 if (bp->flags & NO_WOL_FLAG)
6136 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6137 else if (bp->wol)
6138 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6139 else
6140 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6141 bnx2_reset_chip(bp, reset_code);
6142 bnx2_free_skbs(bp);
6143 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6144 return 0;
6145 }
6146
6147 static int
6148 bnx2_resume(struct pci_dev *pdev)
6149 {
6150 struct net_device *dev = pci_get_drvdata(pdev);
6151 struct bnx2 *bp = netdev_priv(dev);
6152
6153 if (!netif_running(dev))
6154 return 0;
6155
6156 bnx2_set_power_state(bp, PCI_D0);
6157 netif_device_attach(dev);
6158 bnx2_init_nic(bp);
6159 bnx2_netif_start(bp);
6160 return 0;
6161 }
6162
6163 static struct pci_driver bnx2_pci_driver = {
6164 .name = DRV_MODULE_NAME,
6165 .id_table = bnx2_pci_tbl,
6166 .probe = bnx2_init_one,
6167 .remove = __devexit_p(bnx2_remove_one),
6168 .suspend = bnx2_suspend,
6169 .resume = bnx2_resume,
6170 };
6171
6172 static int __init bnx2_init(void)
6173 {
6174 return pci_register_driver(&bnx2_pci_driver);
6175 }
6176
6177 static void __exit bnx2_cleanup(void)
6178 {
6179 pci_unregister_driver(&bnx2_pci_driver);
6180 }
6181
6182 module_init(bnx2_init);
6183 module_exit(bnx2_cleanup);
6184
6185
6186