]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/bnx2.c
[BNX2]: Fix bug when rx ring is full
[mirror_ubuntu-bionic-kernel.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
206cc83c 3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12#include "bnx2.h"
13#include "bnx2_fw.h"
14
15#define DRV_MODULE_NAME "bnx2"
16#define PFX DRV_MODULE_NAME ": "
206cc83c
MC
17#define DRV_MODULE_VERSION "1.4.31"
18#define DRV_MODULE_RELDATE "January 19, 2006"
b6016b76
MC
19
20#define RUN_AT(x) (jiffies + (x))
21
22/* Time in jiffies before concluding the transmitter is hung. */
23#define TX_TIMEOUT (5*HZ)
24
25static char version[] __devinitdata =
26 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 29MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
30MODULE_LICENSE("GPL");
31MODULE_VERSION(DRV_MODULE_VERSION);
32
33static int disable_msi = 0;
34
35module_param(disable_msi, int, 0);
36MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
37
38typedef enum {
39 BCM5706 = 0,
40 NC370T,
41 NC370I,
42 BCM5706S,
43 NC370F,
5b0c76ad
MC
44 BCM5708,
45 BCM5708S,
b6016b76
MC
46} board_t;
47
48/* indexed by board_t, above */
f71e1309 49static const struct {
b6016b76
MC
50 char *name;
51} board_info[] __devinitdata = {
52 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
53 { "HP NC370T Multifunction Gigabit Server Adapter" },
54 { "HP NC370i Multifunction Gigabit Server Adapter" },
55 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
56 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
57 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
58 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
b6016b76
MC
59 };
60
61static struct pci_device_id bnx2_pci_tbl[] = {
62 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
63 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
64 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
65 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
66 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
67 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
68 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
69 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
70 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
71 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
72 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
73 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
74 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
75 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
b6016b76
MC
76 { 0, }
77};
78
79static struct flash_spec flash_table[] =
80{
81 /* Slow EEPROM */
37137709 82 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
b6016b76
MC
83 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
84 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
85 "EEPROM - slow"},
37137709
MC
86 /* Expansion entry 0001 */
87 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76 88 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
89 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
90 "Entry 0001"},
b6016b76
MC
91 /* Saifun SA25F010 (non-buffered flash) */
92 /* strap, cfg1, & write1 need updates */
37137709 93 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
94 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
95 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
96 "Non-buffered flash (128kB)"},
97 /* Saifun SA25F020 (non-buffered flash) */
98 /* strap, cfg1, & write1 need updates */
37137709 99 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
100 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
101 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
102 "Non-buffered flash (256kB)"},
37137709
MC
103 /* Expansion entry 0100 */
104 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
105 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
106 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
107 "Entry 0100"},
108 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
109 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
110 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
111 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
112 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
113 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
114 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
115 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
116 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
117 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
118 /* Saifun SA25F005 (non-buffered flash) */
119 /* strap, cfg1, & write1 need updates */
120 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
121 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
122 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
123 "Non-buffered flash (64kB)"},
124 /* Fast EEPROM */
125 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
126 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 "EEPROM - fast"},
129 /* Expansion entry 1001 */
130 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
131 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 "Entry 1001"},
134 /* Expansion entry 1010 */
135 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 1010"},
139 /* ATMEL AT45DB011B (buffered flash) */
140 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
141 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
142 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
143 "Buffered flash (128kB)"},
144 /* Expansion entry 1100 */
145 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
148 "Entry 1100"},
149 /* Expansion entry 1101 */
150 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
151 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
153 "Entry 1101"},
154 /* Ateml Expansion entry 1110 */
155 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
156 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
157 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
158 "Entry 1110 (Atmel)"},
159 /* ATMEL AT45DB021B (buffered flash) */
160 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
161 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
162 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
163 "Buffered flash (256kB)"},
b6016b76
MC
164};
165
166MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
167
e89bbf10
MC
168static inline u32 bnx2_tx_avail(struct bnx2 *bp)
169{
170 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
171
172 if (diff > MAX_TX_DESC_CNT)
173 diff = (diff & MAX_TX_DESC_CNT) - 1;
174 return (bp->tx_ring_size - diff);
175}
176
b6016b76
MC
177static u32
178bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
179{
180 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
181 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
182}
183
184static void
185bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
186{
187 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
188 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
189}
190
191static void
192bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
193{
194 offset += cid_addr;
195 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
196 REG_WR(bp, BNX2_CTX_DATA, val);
197}
198
199static int
200bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
201{
202 u32 val1;
203 int i, ret;
204
205 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
206 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
207 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
208
209 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
210 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
211
212 udelay(40);
213 }
214
215 val1 = (bp->phy_addr << 21) | (reg << 16) |
216 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
217 BNX2_EMAC_MDIO_COMM_START_BUSY;
218 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
219
220 for (i = 0; i < 50; i++) {
221 udelay(10);
222
223 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
224 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
225 udelay(5);
226
227 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
228 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
229
230 break;
231 }
232 }
233
234 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
235 *val = 0x0;
236 ret = -EBUSY;
237 }
238 else {
239 *val = val1;
240 ret = 0;
241 }
242
243 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
244 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
245 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
246
247 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
248 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
249
250 udelay(40);
251 }
252
253 return ret;
254}
255
256static int
257bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
258{
259 u32 val1;
260 int i, ret;
261
262 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
263 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
264 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
265
266 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
267 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
268
269 udelay(40);
270 }
271
272 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
273 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
274 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
275 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
276
277 for (i = 0; i < 50; i++) {
278 udelay(10);
279
280 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
281 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
282 udelay(5);
283 break;
284 }
285 }
286
287 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
288 ret = -EBUSY;
289 else
290 ret = 0;
291
292 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
293 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
294 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
295
296 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
297 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
298
299 udelay(40);
300 }
301
302 return ret;
303}
304
305static void
306bnx2_disable_int(struct bnx2 *bp)
307{
308 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
309 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
310 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
311}
312
313static void
314bnx2_enable_int(struct bnx2 *bp)
315{
316 u32 val;
317
1269a8a6
MC
318 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
319 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
320 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
321
b6016b76
MC
322 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
323 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
324
325 val = REG_RD(bp, BNX2_HC_COMMAND);
326 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
327}
328
329static void
330bnx2_disable_int_sync(struct bnx2 *bp)
331{
332 atomic_inc(&bp->intr_sem);
333 bnx2_disable_int(bp);
334 synchronize_irq(bp->pdev->irq);
335}
336
337static void
338bnx2_netif_stop(struct bnx2 *bp)
339{
340 bnx2_disable_int_sync(bp);
341 if (netif_running(bp->dev)) {
342 netif_poll_disable(bp->dev);
343 netif_tx_disable(bp->dev);
344 bp->dev->trans_start = jiffies; /* prevent tx timeout */
345 }
346}
347
348static void
349bnx2_netif_start(struct bnx2 *bp)
350{
351 if (atomic_dec_and_test(&bp->intr_sem)) {
352 if (netif_running(bp->dev)) {
353 netif_wake_queue(bp->dev);
354 netif_poll_enable(bp->dev);
355 bnx2_enable_int(bp);
356 }
357 }
358}
359
360static void
361bnx2_free_mem(struct bnx2 *bp)
362{
363 if (bp->stats_blk) {
364 pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
365 bp->stats_blk, bp->stats_blk_mapping);
366 bp->stats_blk = NULL;
367 }
368 if (bp->status_blk) {
369 pci_free_consistent(bp->pdev, sizeof(struct status_block),
370 bp->status_blk, bp->status_blk_mapping);
371 bp->status_blk = NULL;
372 }
373 if (bp->tx_desc_ring) {
374 pci_free_consistent(bp->pdev,
375 sizeof(struct tx_bd) * TX_DESC_CNT,
376 bp->tx_desc_ring, bp->tx_desc_mapping);
377 bp->tx_desc_ring = NULL;
378 }
b4558ea9
JJ
379 kfree(bp->tx_buf_ring);
380 bp->tx_buf_ring = NULL;
b6016b76
MC
381 if (bp->rx_desc_ring) {
382 pci_free_consistent(bp->pdev,
383 sizeof(struct rx_bd) * RX_DESC_CNT,
384 bp->rx_desc_ring, bp->rx_desc_mapping);
385 bp->rx_desc_ring = NULL;
386 }
b4558ea9
JJ
387 kfree(bp->rx_buf_ring);
388 bp->rx_buf_ring = NULL;
b6016b76
MC
389}
390
391static int
392bnx2_alloc_mem(struct bnx2 *bp)
393{
394 bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
395 GFP_KERNEL);
396 if (bp->tx_buf_ring == NULL)
397 return -ENOMEM;
398
399 memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
400 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
401 sizeof(struct tx_bd) *
402 TX_DESC_CNT,
403 &bp->tx_desc_mapping);
404 if (bp->tx_desc_ring == NULL)
405 goto alloc_mem_err;
406
407 bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT,
408 GFP_KERNEL);
409 if (bp->rx_buf_ring == NULL)
410 goto alloc_mem_err;
411
412 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT);
413 bp->rx_desc_ring = pci_alloc_consistent(bp->pdev,
414 sizeof(struct rx_bd) *
415 RX_DESC_CNT,
416 &bp->rx_desc_mapping);
417 if (bp->rx_desc_ring == NULL)
418 goto alloc_mem_err;
419
420 bp->status_blk = pci_alloc_consistent(bp->pdev,
421 sizeof(struct status_block),
422 &bp->status_blk_mapping);
423 if (bp->status_blk == NULL)
424 goto alloc_mem_err;
425
426 memset(bp->status_blk, 0, sizeof(struct status_block));
427
428 bp->stats_blk = pci_alloc_consistent(bp->pdev,
429 sizeof(struct statistics_block),
430 &bp->stats_blk_mapping);
431 if (bp->stats_blk == NULL)
432 goto alloc_mem_err;
433
434 memset(bp->stats_blk, 0, sizeof(struct statistics_block));
435
436 return 0;
437
438alloc_mem_err:
439 bnx2_free_mem(bp);
440 return -ENOMEM;
441}
442
e3648b3d
MC
443static void
444bnx2_report_fw_link(struct bnx2 *bp)
445{
446 u32 fw_link_status = 0;
447
448 if (bp->link_up) {
449 u32 bmsr;
450
451 switch (bp->line_speed) {
452 case SPEED_10:
453 if (bp->duplex == DUPLEX_HALF)
454 fw_link_status = BNX2_LINK_STATUS_10HALF;
455 else
456 fw_link_status = BNX2_LINK_STATUS_10FULL;
457 break;
458 case SPEED_100:
459 if (bp->duplex == DUPLEX_HALF)
460 fw_link_status = BNX2_LINK_STATUS_100HALF;
461 else
462 fw_link_status = BNX2_LINK_STATUS_100FULL;
463 break;
464 case SPEED_1000:
465 if (bp->duplex == DUPLEX_HALF)
466 fw_link_status = BNX2_LINK_STATUS_1000HALF;
467 else
468 fw_link_status = BNX2_LINK_STATUS_1000FULL;
469 break;
470 case SPEED_2500:
471 if (bp->duplex == DUPLEX_HALF)
472 fw_link_status = BNX2_LINK_STATUS_2500HALF;
473 else
474 fw_link_status = BNX2_LINK_STATUS_2500FULL;
475 break;
476 }
477
478 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
479
480 if (bp->autoneg) {
481 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
482
483 bnx2_read_phy(bp, MII_BMSR, &bmsr);
484 bnx2_read_phy(bp, MII_BMSR, &bmsr);
485
486 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
487 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
488 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
489 else
490 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
491 }
492 }
493 else
494 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
495
496 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
497}
498
b6016b76
MC
499static void
500bnx2_report_link(struct bnx2 *bp)
501{
502 if (bp->link_up) {
503 netif_carrier_on(bp->dev);
504 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
505
506 printk("%d Mbps ", bp->line_speed);
507
508 if (bp->duplex == DUPLEX_FULL)
509 printk("full duplex");
510 else
511 printk("half duplex");
512
513 if (bp->flow_ctrl) {
514 if (bp->flow_ctrl & FLOW_CTRL_RX) {
515 printk(", receive ");
516 if (bp->flow_ctrl & FLOW_CTRL_TX)
517 printk("& transmit ");
518 }
519 else {
520 printk(", transmit ");
521 }
522 printk("flow control ON");
523 }
524 printk("\n");
525 }
526 else {
527 netif_carrier_off(bp->dev);
528 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
529 }
e3648b3d
MC
530
531 bnx2_report_fw_link(bp);
b6016b76
MC
532}
533
534static void
535bnx2_resolve_flow_ctrl(struct bnx2 *bp)
536{
537 u32 local_adv, remote_adv;
538
539 bp->flow_ctrl = 0;
540 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
541 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
542
543 if (bp->duplex == DUPLEX_FULL) {
544 bp->flow_ctrl = bp->req_flow_ctrl;
545 }
546 return;
547 }
548
549 if (bp->duplex != DUPLEX_FULL) {
550 return;
551 }
552
5b0c76ad
MC
553 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
554 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
555 u32 val;
556
557 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
558 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
559 bp->flow_ctrl |= FLOW_CTRL_TX;
560 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
561 bp->flow_ctrl |= FLOW_CTRL_RX;
562 return;
563 }
564
b6016b76
MC
565 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
566 bnx2_read_phy(bp, MII_LPA, &remote_adv);
567
568 if (bp->phy_flags & PHY_SERDES_FLAG) {
569 u32 new_local_adv = 0;
570 u32 new_remote_adv = 0;
571
572 if (local_adv & ADVERTISE_1000XPAUSE)
573 new_local_adv |= ADVERTISE_PAUSE_CAP;
574 if (local_adv & ADVERTISE_1000XPSE_ASYM)
575 new_local_adv |= ADVERTISE_PAUSE_ASYM;
576 if (remote_adv & ADVERTISE_1000XPAUSE)
577 new_remote_adv |= ADVERTISE_PAUSE_CAP;
578 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
579 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
580
581 local_adv = new_local_adv;
582 remote_adv = new_remote_adv;
583 }
584
585 /* See Table 28B-3 of 802.3ab-1999 spec. */
586 if (local_adv & ADVERTISE_PAUSE_CAP) {
587 if(local_adv & ADVERTISE_PAUSE_ASYM) {
588 if (remote_adv & ADVERTISE_PAUSE_CAP) {
589 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
590 }
591 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
592 bp->flow_ctrl = FLOW_CTRL_RX;
593 }
594 }
595 else {
596 if (remote_adv & ADVERTISE_PAUSE_CAP) {
597 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
598 }
599 }
600 }
601 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
602 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
603 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
604
605 bp->flow_ctrl = FLOW_CTRL_TX;
606 }
607 }
608}
609
610static int
5b0c76ad
MC
611bnx2_5708s_linkup(struct bnx2 *bp)
612{
613 u32 val;
614
615 bp->link_up = 1;
616 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
617 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
618 case BCM5708S_1000X_STAT1_SPEED_10:
619 bp->line_speed = SPEED_10;
620 break;
621 case BCM5708S_1000X_STAT1_SPEED_100:
622 bp->line_speed = SPEED_100;
623 break;
624 case BCM5708S_1000X_STAT1_SPEED_1G:
625 bp->line_speed = SPEED_1000;
626 break;
627 case BCM5708S_1000X_STAT1_SPEED_2G5:
628 bp->line_speed = SPEED_2500;
629 break;
630 }
631 if (val & BCM5708S_1000X_STAT1_FD)
632 bp->duplex = DUPLEX_FULL;
633 else
634 bp->duplex = DUPLEX_HALF;
635
636 return 0;
637}
638
639static int
640bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
641{
642 u32 bmcr, local_adv, remote_adv, common;
643
644 bp->link_up = 1;
645 bp->line_speed = SPEED_1000;
646
647 bnx2_read_phy(bp, MII_BMCR, &bmcr);
648 if (bmcr & BMCR_FULLDPLX) {
649 bp->duplex = DUPLEX_FULL;
650 }
651 else {
652 bp->duplex = DUPLEX_HALF;
653 }
654
655 if (!(bmcr & BMCR_ANENABLE)) {
656 return 0;
657 }
658
659 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
660 bnx2_read_phy(bp, MII_LPA, &remote_adv);
661
662 common = local_adv & remote_adv;
663 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
664
665 if (common & ADVERTISE_1000XFULL) {
666 bp->duplex = DUPLEX_FULL;
667 }
668 else {
669 bp->duplex = DUPLEX_HALF;
670 }
671 }
672
673 return 0;
674}
675
676static int
677bnx2_copper_linkup(struct bnx2 *bp)
678{
679 u32 bmcr;
680
681 bnx2_read_phy(bp, MII_BMCR, &bmcr);
682 if (bmcr & BMCR_ANENABLE) {
683 u32 local_adv, remote_adv, common;
684
685 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
686 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
687
688 common = local_adv & (remote_adv >> 2);
689 if (common & ADVERTISE_1000FULL) {
690 bp->line_speed = SPEED_1000;
691 bp->duplex = DUPLEX_FULL;
692 }
693 else if (common & ADVERTISE_1000HALF) {
694 bp->line_speed = SPEED_1000;
695 bp->duplex = DUPLEX_HALF;
696 }
697 else {
698 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
699 bnx2_read_phy(bp, MII_LPA, &remote_adv);
700
701 common = local_adv & remote_adv;
702 if (common & ADVERTISE_100FULL) {
703 bp->line_speed = SPEED_100;
704 bp->duplex = DUPLEX_FULL;
705 }
706 else if (common & ADVERTISE_100HALF) {
707 bp->line_speed = SPEED_100;
708 bp->duplex = DUPLEX_HALF;
709 }
710 else if (common & ADVERTISE_10FULL) {
711 bp->line_speed = SPEED_10;
712 bp->duplex = DUPLEX_FULL;
713 }
714 else if (common & ADVERTISE_10HALF) {
715 bp->line_speed = SPEED_10;
716 bp->duplex = DUPLEX_HALF;
717 }
718 else {
719 bp->line_speed = 0;
720 bp->link_up = 0;
721 }
722 }
723 }
724 else {
725 if (bmcr & BMCR_SPEED100) {
726 bp->line_speed = SPEED_100;
727 }
728 else {
729 bp->line_speed = SPEED_10;
730 }
731 if (bmcr & BMCR_FULLDPLX) {
732 bp->duplex = DUPLEX_FULL;
733 }
734 else {
735 bp->duplex = DUPLEX_HALF;
736 }
737 }
738
739 return 0;
740}
741
742static int
743bnx2_set_mac_link(struct bnx2 *bp)
744{
745 u32 val;
746
747 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
748 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
749 (bp->duplex == DUPLEX_HALF)) {
750 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
751 }
752
753 /* Configure the EMAC mode register. */
754 val = REG_RD(bp, BNX2_EMAC_MODE);
755
756 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad
MC
757 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
758 BNX2_EMAC_MODE_25G);
b6016b76
MC
759
760 if (bp->link_up) {
5b0c76ad
MC
761 switch (bp->line_speed) {
762 case SPEED_10:
763 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
764 val |= BNX2_EMAC_MODE_PORT_MII_10;
765 break;
766 }
767 /* fall through */
768 case SPEED_100:
769 val |= BNX2_EMAC_MODE_PORT_MII;
770 break;
771 case SPEED_2500:
772 val |= BNX2_EMAC_MODE_25G;
773 /* fall through */
774 case SPEED_1000:
775 val |= BNX2_EMAC_MODE_PORT_GMII;
776 break;
777 }
b6016b76
MC
778 }
779 else {
780 val |= BNX2_EMAC_MODE_PORT_GMII;
781 }
782
783 /* Set the MAC to operate in the appropriate duplex mode. */
784 if (bp->duplex == DUPLEX_HALF)
785 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
786 REG_WR(bp, BNX2_EMAC_MODE, val);
787
788 /* Enable/disable rx PAUSE. */
789 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
790
791 if (bp->flow_ctrl & FLOW_CTRL_RX)
792 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
793 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
794
795 /* Enable/disable tx PAUSE. */
796 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
797 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
798
799 if (bp->flow_ctrl & FLOW_CTRL_TX)
800 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
801 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
802
803 /* Acknowledge the interrupt. */
804 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
805
806 return 0;
807}
808
809static int
810bnx2_set_link(struct bnx2 *bp)
811{
812 u32 bmsr;
813 u8 link_up;
814
815 if (bp->loopback == MAC_LOOPBACK) {
816 bp->link_up = 1;
817 return 0;
818 }
819
820 link_up = bp->link_up;
821
822 bnx2_read_phy(bp, MII_BMSR, &bmsr);
823 bnx2_read_phy(bp, MII_BMSR, &bmsr);
824
825 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
826 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
827 u32 val;
828
829 val = REG_RD(bp, BNX2_EMAC_STATUS);
830 if (val & BNX2_EMAC_STATUS_LINK)
831 bmsr |= BMSR_LSTATUS;
832 else
833 bmsr &= ~BMSR_LSTATUS;
834 }
835
836 if (bmsr & BMSR_LSTATUS) {
837 bp->link_up = 1;
838
839 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
840 if (CHIP_NUM(bp) == CHIP_NUM_5706)
841 bnx2_5706s_linkup(bp);
842 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
843 bnx2_5708s_linkup(bp);
b6016b76
MC
844 }
845 else {
846 bnx2_copper_linkup(bp);
847 }
848 bnx2_resolve_flow_ctrl(bp);
849 }
850 else {
851 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
852 (bp->autoneg & AUTONEG_SPEED)) {
853
854 u32 bmcr;
855
856 bnx2_read_phy(bp, MII_BMCR, &bmcr);
857 if (!(bmcr & BMCR_ANENABLE)) {
858 bnx2_write_phy(bp, MII_BMCR, bmcr |
859 BMCR_ANENABLE);
860 }
861 }
862 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
863 bp->link_up = 0;
864 }
865
866 if (bp->link_up != link_up) {
867 bnx2_report_link(bp);
868 }
869
870 bnx2_set_mac_link(bp);
871
872 return 0;
873}
874
875static int
876bnx2_reset_phy(struct bnx2 *bp)
877{
878 int i;
879 u32 reg;
880
881 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
882
883#define PHY_RESET_MAX_WAIT 100
884 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
885 udelay(10);
886
887 bnx2_read_phy(bp, MII_BMCR, &reg);
888 if (!(reg & BMCR_RESET)) {
889 udelay(20);
890 break;
891 }
892 }
893 if (i == PHY_RESET_MAX_WAIT) {
894 return -EBUSY;
895 }
896 return 0;
897}
898
899static u32
900bnx2_phy_get_pause_adv(struct bnx2 *bp)
901{
902 u32 adv = 0;
903
904 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
905 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
906
907 if (bp->phy_flags & PHY_SERDES_FLAG) {
908 adv = ADVERTISE_1000XPAUSE;
909 }
910 else {
911 adv = ADVERTISE_PAUSE_CAP;
912 }
913 }
914 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
915 if (bp->phy_flags & PHY_SERDES_FLAG) {
916 adv = ADVERTISE_1000XPSE_ASYM;
917 }
918 else {
919 adv = ADVERTISE_PAUSE_ASYM;
920 }
921 }
922 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
923 if (bp->phy_flags & PHY_SERDES_FLAG) {
924 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
925 }
926 else {
927 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
928 }
929 }
930 return adv;
931}
932
933static int
934bnx2_setup_serdes_phy(struct bnx2 *bp)
935{
5b0c76ad 936 u32 adv, bmcr, up1;
b6016b76
MC
937 u32 new_adv = 0;
938
939 if (!(bp->autoneg & AUTONEG_SPEED)) {
940 u32 new_bmcr;
5b0c76ad
MC
941 int force_link_down = 0;
942
943 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
944 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
945 if (up1 & BCM5708S_UP1_2G5) {
946 up1 &= ~BCM5708S_UP1_2G5;
947 bnx2_write_phy(bp, BCM5708S_UP1, up1);
948 force_link_down = 1;
949 }
950 }
951
952 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
953 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
b6016b76
MC
954
955 bnx2_read_phy(bp, MII_BMCR, &bmcr);
956 new_bmcr = bmcr & ~BMCR_ANENABLE;
957 new_bmcr |= BMCR_SPEED1000;
958 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 959 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
960 new_bmcr |= BMCR_FULLDPLX;
961 }
962 else {
5b0c76ad 963 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
964 new_bmcr &= ~BMCR_FULLDPLX;
965 }
5b0c76ad 966 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
967 /* Force a link down visible on the other side */
968 if (bp->link_up) {
5b0c76ad
MC
969 bnx2_write_phy(bp, MII_ADVERTISE, adv &
970 ~(ADVERTISE_1000XFULL |
971 ADVERTISE_1000XHALF));
b6016b76
MC
972 bnx2_write_phy(bp, MII_BMCR, bmcr |
973 BMCR_ANRESTART | BMCR_ANENABLE);
974
975 bp->link_up = 0;
976 netif_carrier_off(bp->dev);
5b0c76ad 977 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
b6016b76 978 }
5b0c76ad 979 bnx2_write_phy(bp, MII_ADVERTISE, adv);
b6016b76
MC
980 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
981 }
982 return 0;
983 }
984
5b0c76ad
MC
985 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
986 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
987 up1 |= BCM5708S_UP1_2G5;
988 bnx2_write_phy(bp, BCM5708S_UP1, up1);
989 }
990
b6016b76
MC
991 if (bp->advertising & ADVERTISED_1000baseT_Full)
992 new_adv |= ADVERTISE_1000XFULL;
993
994 new_adv |= bnx2_phy_get_pause_adv(bp);
995
996 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
997 bnx2_read_phy(bp, MII_BMCR, &bmcr);
998
999 bp->serdes_an_pending = 0;
1000 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1001 /* Force a link down visible on the other side */
1002 if (bp->link_up) {
1003 int i;
1004
1005 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1006 for (i = 0; i < 110; i++) {
1007 udelay(100);
1008 }
1009 }
1010
1011 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1012 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1013 BMCR_ANENABLE);
cd339a0e
MC
1014 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1015 /* Speed up link-up time when the link partner
1016 * does not autonegotiate which is very common
1017 * in blade servers. Some blade servers use
1018 * IPMI for kerboard input and it's important
1019 * to minimize link disruptions. Autoneg. involves
1020 * exchanging base pages plus 3 next pages and
1021 * normally completes in about 120 msec.
1022 */
1023 bp->current_interval = SERDES_AN_TIMEOUT;
1024 bp->serdes_an_pending = 1;
1025 mod_timer(&bp->timer, jiffies + bp->current_interval);
1026 }
b6016b76
MC
1027 }
1028
1029 return 0;
1030}
1031
1032#define ETHTOOL_ALL_FIBRE_SPEED \
1033 (ADVERTISED_1000baseT_Full)
1034
1035#define ETHTOOL_ALL_COPPER_SPEED \
1036 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1037 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1038 ADVERTISED_1000baseT_Full)
1039
1040#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1041 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1042
1043#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1044
1045static int
1046bnx2_setup_copper_phy(struct bnx2 *bp)
1047{
1048 u32 bmcr;
1049 u32 new_bmcr;
1050
1051 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1052
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 u32 adv_reg, adv1000_reg;
1055 u32 new_adv_reg = 0;
1056 u32 new_adv1000_reg = 0;
1057
1058 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1059 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1060 ADVERTISE_PAUSE_ASYM);
1061
1062 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1063 adv1000_reg &= PHY_ALL_1000_SPEED;
1064
1065 if (bp->advertising & ADVERTISED_10baseT_Half)
1066 new_adv_reg |= ADVERTISE_10HALF;
1067 if (bp->advertising & ADVERTISED_10baseT_Full)
1068 new_adv_reg |= ADVERTISE_10FULL;
1069 if (bp->advertising & ADVERTISED_100baseT_Half)
1070 new_adv_reg |= ADVERTISE_100HALF;
1071 if (bp->advertising & ADVERTISED_100baseT_Full)
1072 new_adv_reg |= ADVERTISE_100FULL;
1073 if (bp->advertising & ADVERTISED_1000baseT_Full)
1074 new_adv1000_reg |= ADVERTISE_1000FULL;
1075
1076 new_adv_reg |= ADVERTISE_CSMA;
1077
1078 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1079
1080 if ((adv1000_reg != new_adv1000_reg) ||
1081 (adv_reg != new_adv_reg) ||
1082 ((bmcr & BMCR_ANENABLE) == 0)) {
1083
1084 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1085 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1086 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1087 BMCR_ANENABLE);
1088 }
1089 else if (bp->link_up) {
1090 /* Flow ctrl may have changed from auto to forced */
1091 /* or vice-versa. */
1092
1093 bnx2_resolve_flow_ctrl(bp);
1094 bnx2_set_mac_link(bp);
1095 }
1096 return 0;
1097 }
1098
1099 new_bmcr = 0;
1100 if (bp->req_line_speed == SPEED_100) {
1101 new_bmcr |= BMCR_SPEED100;
1102 }
1103 if (bp->req_duplex == DUPLEX_FULL) {
1104 new_bmcr |= BMCR_FULLDPLX;
1105 }
1106 if (new_bmcr != bmcr) {
1107 u32 bmsr;
1108 int i = 0;
1109
1110 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1111 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1112
1113 if (bmsr & BMSR_LSTATUS) {
1114 /* Force link down */
1115 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1116 do {
1117 udelay(100);
1118 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1119 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1120 i++;
1121 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1122 }
1123
1124 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1125
1126 /* Normally, the new speed is setup after the link has
1127 * gone down and up again. In some cases, link will not go
1128 * down so we need to set up the new speed here.
1129 */
1130 if (bmsr & BMSR_LSTATUS) {
1131 bp->line_speed = bp->req_line_speed;
1132 bp->duplex = bp->req_duplex;
1133 bnx2_resolve_flow_ctrl(bp);
1134 bnx2_set_mac_link(bp);
1135 }
1136 }
1137 return 0;
1138}
1139
1140static int
1141bnx2_setup_phy(struct bnx2 *bp)
1142{
1143 if (bp->loopback == MAC_LOOPBACK)
1144 return 0;
1145
1146 if (bp->phy_flags & PHY_SERDES_FLAG) {
1147 return (bnx2_setup_serdes_phy(bp));
1148 }
1149 else {
1150 return (bnx2_setup_copper_phy(bp));
1151 }
1152}
1153
1154static int
5b0c76ad
MC
1155bnx2_init_5708s_phy(struct bnx2 *bp)
1156{
1157 u32 val;
1158
1159 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1160 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1161 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1162
1163 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1164 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1165 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1166
1167 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1168 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1169 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1170
1171 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1172 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1173 val |= BCM5708S_UP1_2G5;
1174 bnx2_write_phy(bp, BCM5708S_UP1, val);
1175 }
1176
1177 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1178 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1179 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1180 /* increase tx signal amplitude */
1181 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1182 BCM5708S_BLK_ADDR_TX_MISC);
1183 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1184 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1185 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1186 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1187 }
1188
e3648b3d 1189 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1190 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1191
1192 if (val) {
1193 u32 is_backplane;
1194
e3648b3d 1195 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1196 BNX2_SHARED_HW_CFG_CONFIG);
1197 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1198 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1199 BCM5708S_BLK_ADDR_TX_MISC);
1200 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1201 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1202 BCM5708S_BLK_ADDR_DIG);
1203 }
1204 }
1205 return 0;
1206}
1207
1208static int
1209bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76
MC
1210{
1211 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1212
1213 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1214 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1215 }
1216
1217 if (bp->dev->mtu > 1500) {
1218 u32 val;
1219
1220 /* Set extended packet length bit */
1221 bnx2_write_phy(bp, 0x18, 0x7);
1222 bnx2_read_phy(bp, 0x18, &val);
1223 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1224
1225 bnx2_write_phy(bp, 0x1c, 0x6c00);
1226 bnx2_read_phy(bp, 0x1c, &val);
1227 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1228 }
1229 else {
1230 u32 val;
1231
1232 bnx2_write_phy(bp, 0x18, 0x7);
1233 bnx2_read_phy(bp, 0x18, &val);
1234 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1235
1236 bnx2_write_phy(bp, 0x1c, 0x6c00);
1237 bnx2_read_phy(bp, 0x1c, &val);
1238 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1239 }
1240
1241 return 0;
1242}
1243
1244static int
1245bnx2_init_copper_phy(struct bnx2 *bp)
1246{
5b0c76ad
MC
1247 u32 val;
1248
b6016b76
MC
1249 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1250
1251 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1252 bnx2_write_phy(bp, 0x18, 0x0c00);
1253 bnx2_write_phy(bp, 0x17, 0x000a);
1254 bnx2_write_phy(bp, 0x15, 0x310b);
1255 bnx2_write_phy(bp, 0x17, 0x201f);
1256 bnx2_write_phy(bp, 0x15, 0x9506);
1257 bnx2_write_phy(bp, 0x17, 0x401f);
1258 bnx2_write_phy(bp, 0x15, 0x14e2);
1259 bnx2_write_phy(bp, 0x18, 0x0400);
1260 }
1261
1262 if (bp->dev->mtu > 1500) {
b6016b76
MC
1263 /* Set extended packet length bit */
1264 bnx2_write_phy(bp, 0x18, 0x7);
1265 bnx2_read_phy(bp, 0x18, &val);
1266 bnx2_write_phy(bp, 0x18, val | 0x4000);
1267
1268 bnx2_read_phy(bp, 0x10, &val);
1269 bnx2_write_phy(bp, 0x10, val | 0x1);
1270 }
1271 else {
b6016b76
MC
1272 bnx2_write_phy(bp, 0x18, 0x7);
1273 bnx2_read_phy(bp, 0x18, &val);
1274 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1275
1276 bnx2_read_phy(bp, 0x10, &val);
1277 bnx2_write_phy(bp, 0x10, val & ~0x1);
1278 }
1279
5b0c76ad
MC
1280 /* ethernet@wirespeed */
1281 bnx2_write_phy(bp, 0x18, 0x7007);
1282 bnx2_read_phy(bp, 0x18, &val);
1283 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1284 return 0;
1285}
1286
1287
1288static int
1289bnx2_init_phy(struct bnx2 *bp)
1290{
1291 u32 val;
1292 int rc = 0;
1293
1294 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1295 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1296
1297 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1298
1299 bnx2_reset_phy(bp);
1300
1301 bnx2_read_phy(bp, MII_PHYSID1, &val);
1302 bp->phy_id = val << 16;
1303 bnx2_read_phy(bp, MII_PHYSID2, &val);
1304 bp->phy_id |= val & 0xffff;
1305
1306 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1307 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1308 rc = bnx2_init_5706s_phy(bp);
1309 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1310 rc = bnx2_init_5708s_phy(bp);
b6016b76
MC
1311 }
1312 else {
1313 rc = bnx2_init_copper_phy(bp);
1314 }
1315
1316 bnx2_setup_phy(bp);
1317
1318 return rc;
1319}
1320
1321static int
1322bnx2_set_mac_loopback(struct bnx2 *bp)
1323{
1324 u32 mac_mode;
1325
1326 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1327 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1328 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1329 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1330 bp->link_up = 1;
1331 return 0;
1332}
1333
bc5a0690
MC
1334static int bnx2_test_link(struct bnx2 *);
1335
1336static int
1337bnx2_set_phy_loopback(struct bnx2 *bp)
1338{
1339 u32 mac_mode;
1340 int rc, i;
1341
1342 spin_lock_bh(&bp->phy_lock);
1343 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1344 BMCR_SPEED1000);
1345 spin_unlock_bh(&bp->phy_lock);
1346 if (rc)
1347 return rc;
1348
1349 for (i = 0; i < 10; i++) {
1350 if (bnx2_test_link(bp) == 0)
1351 break;
1352 udelay(10);
1353 }
1354
1355 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1356 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1357 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1358 BNX2_EMAC_MODE_25G);
1359
1360 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1361 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1362 bp->link_up = 1;
1363 return 0;
1364}
1365
b6016b76 1366static int
b090ae2b 1367bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
1368{
1369 int i;
1370 u32 val;
1371
b6016b76
MC
1372 bp->fw_wr_seq++;
1373 msg_data |= bp->fw_wr_seq;
1374
e3648b3d 1375 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
1376
1377 /* wait for an acknowledgement. */
b090ae2b
MC
1378 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1379 msleep(10);
b6016b76 1380
e3648b3d 1381 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
1382
1383 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1384 break;
1385 }
b090ae2b
MC
1386 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1387 return 0;
b6016b76
MC
1388
1389 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
1390 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1391 if (!silent)
1392 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1393 "%x\n", msg_data);
b6016b76
MC
1394
1395 msg_data &= ~BNX2_DRV_MSG_CODE;
1396 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1397
e3648b3d 1398 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 1399
b6016b76
MC
1400 return -EBUSY;
1401 }
1402
b090ae2b
MC
1403 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1404 return -EIO;
1405
b6016b76
MC
1406 return 0;
1407}
1408
1409static void
1410bnx2_init_context(struct bnx2 *bp)
1411{
1412 u32 vcid;
1413
1414 vcid = 96;
1415 while (vcid) {
1416 u32 vcid_addr, pcid_addr, offset;
1417
1418 vcid--;
1419
1420 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1421 u32 new_vcid;
1422
1423 vcid_addr = GET_PCID_ADDR(vcid);
1424 if (vcid & 0x8) {
1425 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1426 }
1427 else {
1428 new_vcid = vcid;
1429 }
1430 pcid_addr = GET_PCID_ADDR(new_vcid);
1431 }
1432 else {
1433 vcid_addr = GET_CID_ADDR(vcid);
1434 pcid_addr = vcid_addr;
1435 }
1436
1437 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1438 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1439
1440 /* Zero out the context. */
1441 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1442 CTX_WR(bp, 0x00, offset, 0);
1443 }
1444
1445 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1446 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1447 }
1448}
1449
1450static int
1451bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1452{
1453 u16 *good_mbuf;
1454 u32 good_mbuf_cnt;
1455 u32 val;
1456
1457 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1458 if (good_mbuf == NULL) {
1459 printk(KERN_ERR PFX "Failed to allocate memory in "
1460 "bnx2_alloc_bad_rbuf\n");
1461 return -ENOMEM;
1462 }
1463
1464 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1465 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1466
1467 good_mbuf_cnt = 0;
1468
1469 /* Allocate a bunch of mbufs and save the good ones in an array. */
1470 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1471 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1472 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1473
1474 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1475
1476 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1477
1478 /* The addresses with Bit 9 set are bad memory blocks. */
1479 if (!(val & (1 << 9))) {
1480 good_mbuf[good_mbuf_cnt] = (u16) val;
1481 good_mbuf_cnt++;
1482 }
1483
1484 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1485 }
1486
1487 /* Free the good ones back to the mbuf pool thus discarding
1488 * all the bad ones. */
1489 while (good_mbuf_cnt) {
1490 good_mbuf_cnt--;
1491
1492 val = good_mbuf[good_mbuf_cnt];
1493 val = (val << 9) | val | 1;
1494
1495 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1496 }
1497 kfree(good_mbuf);
1498 return 0;
1499}
1500
1501static void
1502bnx2_set_mac_addr(struct bnx2 *bp)
1503{
1504 u32 val;
1505 u8 *mac_addr = bp->dev->dev_addr;
1506
1507 val = (mac_addr[0] << 8) | mac_addr[1];
1508
1509 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1510
1511 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1512 (mac_addr[4] << 8) | mac_addr[5];
1513
1514 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1515}
1516
1517static inline int
1518bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1519{
1520 struct sk_buff *skb;
1521 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1522 dma_addr_t mapping;
1523 struct rx_bd *rxbd = &bp->rx_desc_ring[index];
1524 unsigned long align;
1525
1526 skb = dev_alloc_skb(bp->rx_buf_size);
1527 if (skb == NULL) {
1528 return -ENOMEM;
1529 }
1530
1531 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1532 skb_reserve(skb, 8 - align);
1533 }
1534
1535 skb->dev = bp->dev;
1536 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1537 PCI_DMA_FROMDEVICE);
1538
1539 rx_buf->skb = skb;
1540 pci_unmap_addr_set(rx_buf, mapping, mapping);
1541
1542 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1543 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1544
1545 bp->rx_prod_bseq += bp->rx_buf_use_size;
1546
1547 return 0;
1548}
1549
1550static void
1551bnx2_phy_int(struct bnx2 *bp)
1552{
1553 u32 new_link_state, old_link_state;
1554
1555 new_link_state = bp->status_blk->status_attn_bits &
1556 STATUS_ATTN_BITS_LINK_STATE;
1557 old_link_state = bp->status_blk->status_attn_bits_ack &
1558 STATUS_ATTN_BITS_LINK_STATE;
1559 if (new_link_state != old_link_state) {
1560 if (new_link_state) {
1561 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1562 STATUS_ATTN_BITS_LINK_STATE);
1563 }
1564 else {
1565 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1566 STATUS_ATTN_BITS_LINK_STATE);
1567 }
1568 bnx2_set_link(bp);
1569 }
1570}
1571
1572static void
1573bnx2_tx_int(struct bnx2 *bp)
1574{
f4e418f7 1575 struct status_block *sblk = bp->status_blk;
b6016b76
MC
1576 u16 hw_cons, sw_cons, sw_ring_cons;
1577 int tx_free_bd = 0;
1578
f4e418f7 1579 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
b6016b76
MC
1580 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1581 hw_cons++;
1582 }
1583 sw_cons = bp->tx_cons;
1584
1585 while (sw_cons != hw_cons) {
1586 struct sw_bd *tx_buf;
1587 struct sk_buff *skb;
1588 int i, last;
1589
1590 sw_ring_cons = TX_RING_IDX(sw_cons);
1591
1592 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1593 skb = tx_buf->skb;
1594#ifdef BCM_TSO
1595 /* partial BD completions possible with TSO packets */
1596 if (skb_shinfo(skb)->tso_size) {
1597 u16 last_idx, last_ring_idx;
1598
1599 last_idx = sw_cons +
1600 skb_shinfo(skb)->nr_frags + 1;
1601 last_ring_idx = sw_ring_cons +
1602 skb_shinfo(skb)->nr_frags + 1;
1603 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1604 last_idx++;
1605 }
1606 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1607 break;
1608 }
1609 }
1610#endif
1611 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1612 skb_headlen(skb), PCI_DMA_TODEVICE);
1613
1614 tx_buf->skb = NULL;
1615 last = skb_shinfo(skb)->nr_frags;
1616
1617 for (i = 0; i < last; i++) {
1618 sw_cons = NEXT_TX_BD(sw_cons);
1619
1620 pci_unmap_page(bp->pdev,
1621 pci_unmap_addr(
1622 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1623 mapping),
1624 skb_shinfo(skb)->frags[i].size,
1625 PCI_DMA_TODEVICE);
1626 }
1627
1628 sw_cons = NEXT_TX_BD(sw_cons);
1629
1630 tx_free_bd += last + 1;
1631
1632 dev_kfree_skb_irq(skb);
1633
f4e418f7
MC
1634 hw_cons = bp->hw_tx_cons =
1635 sblk->status_tx_quick_consumer_index0;
1636
b6016b76
MC
1637 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1638 hw_cons++;
1639 }
1640 }
1641
e89bbf10 1642 bp->tx_cons = sw_cons;
b6016b76
MC
1643
1644 if (unlikely(netif_queue_stopped(bp->dev))) {
c770a65c 1645 spin_lock(&bp->tx_lock);
b6016b76 1646 if ((netif_queue_stopped(bp->dev)) &&
e89bbf10 1647 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
b6016b76
MC
1648
1649 netif_wake_queue(bp->dev);
1650 }
c770a65c 1651 spin_unlock(&bp->tx_lock);
b6016b76 1652 }
b6016b76
MC
1653}
1654
1655static inline void
1656bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1657 u16 cons, u16 prod)
1658{
236b6394
MC
1659 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1660 struct rx_bd *cons_bd, *prod_bd;
1661
1662 cons_rx_buf = &bp->rx_buf_ring[cons];
1663 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
1664
1665 pci_dma_sync_single_for_device(bp->pdev,
1666 pci_unmap_addr(cons_rx_buf, mapping),
1667 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1668
236b6394 1669 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 1670
236b6394 1671 prod_rx_buf->skb = skb;
b6016b76 1672
236b6394
MC
1673 if (cons == prod)
1674 return;
b6016b76 1675
236b6394
MC
1676 pci_unmap_addr_set(prod_rx_buf, mapping,
1677 pci_unmap_addr(cons_rx_buf, mapping));
1678
1679 cons_bd = &bp->rx_desc_ring[cons];
1680 prod_bd = &bp->rx_desc_ring[prod];
1681 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1682 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
1683}
1684
1685static int
1686bnx2_rx_int(struct bnx2 *bp, int budget)
1687{
f4e418f7 1688 struct status_block *sblk = bp->status_blk;
b6016b76
MC
1689 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1690 struct l2_fhdr *rx_hdr;
1691 int rx_pkt = 0;
1692
f4e418f7 1693 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
b6016b76
MC
1694 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1695 hw_cons++;
1696 }
1697 sw_cons = bp->rx_cons;
1698 sw_prod = bp->rx_prod;
1699
1700 /* Memory barrier necessary as speculative reads of the rx
1701 * buffer can be ahead of the index in the status block
1702 */
1703 rmb();
1704 while (sw_cons != hw_cons) {
1705 unsigned int len;
ade2bfe7 1706 u32 status;
b6016b76
MC
1707 struct sw_bd *rx_buf;
1708 struct sk_buff *skb;
236b6394 1709 dma_addr_t dma_addr;
b6016b76
MC
1710
1711 sw_ring_cons = RX_RING_IDX(sw_cons);
1712 sw_ring_prod = RX_RING_IDX(sw_prod);
1713
1714 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1715 skb = rx_buf->skb;
236b6394
MC
1716
1717 rx_buf->skb = NULL;
1718
1719 dma_addr = pci_unmap_addr(rx_buf, mapping);
1720
1721 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
1722 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1723
1724 rx_hdr = (struct l2_fhdr *) skb->data;
1725 len = rx_hdr->l2_fhdr_pkt_len - 4;
1726
ade2bfe7 1727 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
1728 (L2_FHDR_ERRORS_BAD_CRC |
1729 L2_FHDR_ERRORS_PHY_DECODE |
1730 L2_FHDR_ERRORS_ALIGNMENT |
1731 L2_FHDR_ERRORS_TOO_SHORT |
1732 L2_FHDR_ERRORS_GIANT_FRAME)) {
1733
1734 goto reuse_rx;
1735 }
1736
1737 /* Since we don't have a jumbo ring, copy small packets
1738 * if mtu > 1500
1739 */
1740 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1741 struct sk_buff *new_skb;
1742
1743 new_skb = dev_alloc_skb(len + 2);
1744 if (new_skb == NULL)
1745 goto reuse_rx;
1746
1747 /* aligned copy */
1748 memcpy(new_skb->data,
1749 skb->data + bp->rx_offset - 2,
1750 len + 2);
1751
1752 skb_reserve(new_skb, 2);
1753 skb_put(new_skb, len);
1754 new_skb->dev = bp->dev;
1755
1756 bnx2_reuse_rx_skb(bp, skb,
1757 sw_ring_cons, sw_ring_prod);
1758
1759 skb = new_skb;
1760 }
1761 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
236b6394 1762 pci_unmap_single(bp->pdev, dma_addr,
b6016b76
MC
1763 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1764
1765 skb_reserve(skb, bp->rx_offset);
1766 skb_put(skb, len);
1767 }
1768 else {
1769reuse_rx:
1770 bnx2_reuse_rx_skb(bp, skb,
1771 sw_ring_cons, sw_ring_prod);
1772 goto next_rx;
1773 }
1774
1775 skb->protocol = eth_type_trans(skb, bp->dev);
1776
1777 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1778 (htons(skb->protocol) != 0x8100)) {
1779
1780 dev_kfree_skb_irq(skb);
1781 goto next_rx;
1782
1783 }
1784
b6016b76
MC
1785 skb->ip_summed = CHECKSUM_NONE;
1786 if (bp->rx_csum &&
1787 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1788 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1789
ade2bfe7
MC
1790 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1791 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
1792 skb->ip_summed = CHECKSUM_UNNECESSARY;
1793 }
1794
1795#ifdef BCM_VLAN
1796 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1797 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1798 rx_hdr->l2_fhdr_vlan_tag);
1799 }
1800 else
1801#endif
1802 netif_receive_skb(skb);
1803
1804 bp->dev->last_rx = jiffies;
1805 rx_pkt++;
1806
1807next_rx:
b6016b76
MC
1808 sw_cons = NEXT_RX_BD(sw_cons);
1809 sw_prod = NEXT_RX_BD(sw_prod);
1810
1811 if ((rx_pkt == budget))
1812 break;
f4e418f7
MC
1813
1814 /* Refresh hw_cons to see if there is new work */
1815 if (sw_cons == hw_cons) {
1816 hw_cons = bp->hw_rx_cons =
1817 sblk->status_rx_quick_consumer_index0;
1818 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1819 hw_cons++;
1820 rmb();
1821 }
b6016b76
MC
1822 }
1823 bp->rx_cons = sw_cons;
1824 bp->rx_prod = sw_prod;
1825
1826 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1827
1828 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1829
1830 mmiowb();
1831
1832 return rx_pkt;
1833
1834}
1835
1836/* MSI ISR - The only difference between this and the INTx ISR
1837 * is that the MSI interrupt is always serviced.
1838 */
1839static irqreturn_t
1840bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1841{
1842 struct net_device *dev = dev_instance;
972ec0d4 1843 struct bnx2 *bp = netdev_priv(dev);
b6016b76 1844
c921e4c4 1845 prefetch(bp->status_blk);
b6016b76
MC
1846 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1847 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1848 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1849
1850 /* Return here if interrupt is disabled. */
73eef4cd
MC
1851 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1852 return IRQ_HANDLED;
b6016b76 1853
73eef4cd 1854 netif_rx_schedule(dev);
b6016b76 1855
73eef4cd 1856 return IRQ_HANDLED;
b6016b76
MC
1857}
1858
1859static irqreturn_t
1860bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1861{
1862 struct net_device *dev = dev_instance;
972ec0d4 1863 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
1864
1865 /* When using INTx, it is possible for the interrupt to arrive
1866 * at the CPU before the status block posted prior to the
1867 * interrupt. Reading a register will flush the status block.
1868 * When using MSI, the MSI message will always complete after
1869 * the status block write.
1870 */
c921e4c4 1871 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
b6016b76
MC
1872 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1873 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 1874 return IRQ_NONE;
b6016b76
MC
1875
1876 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1877 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1878 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1879
1880 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
1881 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1882 return IRQ_HANDLED;
b6016b76 1883
73eef4cd 1884 netif_rx_schedule(dev);
b6016b76 1885
73eef4cd 1886 return IRQ_HANDLED;
b6016b76
MC
1887}
1888
f4e418f7
MC
1889static inline int
1890bnx2_has_work(struct bnx2 *bp)
1891{
1892 struct status_block *sblk = bp->status_blk;
1893
1894 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1895 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1896 return 1;
1897
1898 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1899 bp->link_up)
1900 return 1;
1901
1902 return 0;
1903}
1904
b6016b76
MC
1905static int
1906bnx2_poll(struct net_device *dev, int *budget)
1907{
972ec0d4 1908 struct bnx2 *bp = netdev_priv(dev);
b6016b76 1909
b6016b76
MC
1910 if ((bp->status_blk->status_attn_bits &
1911 STATUS_ATTN_BITS_LINK_STATE) !=
1912 (bp->status_blk->status_attn_bits_ack &
1913 STATUS_ATTN_BITS_LINK_STATE)) {
1914
c770a65c 1915 spin_lock(&bp->phy_lock);
b6016b76 1916 bnx2_phy_int(bp);
c770a65c 1917 spin_unlock(&bp->phy_lock);
b6016b76
MC
1918 }
1919
f4e418f7 1920 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
b6016b76 1921 bnx2_tx_int(bp);
b6016b76 1922
f4e418f7 1923 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
b6016b76
MC
1924 int orig_budget = *budget;
1925 int work_done;
1926
1927 if (orig_budget > dev->quota)
1928 orig_budget = dev->quota;
1929
1930 work_done = bnx2_rx_int(bp, orig_budget);
1931 *budget -= work_done;
1932 dev->quota -= work_done;
b6016b76
MC
1933 }
1934
f4e418f7
MC
1935 bp->last_status_idx = bp->status_blk->status_idx;
1936 rmb();
1937
1938 if (!bnx2_has_work(bp)) {
b6016b76 1939 netif_rx_complete(dev);
1269a8a6
MC
1940 if (likely(bp->flags & USING_MSI_FLAG)) {
1941 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1942 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1943 bp->last_status_idx);
1944 return 0;
1945 }
1946 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1947 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1948 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1949 bp->last_status_idx);
1950
b6016b76 1951 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1269a8a6
MC
1952 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1953 bp->last_status_idx);
b6016b76
MC
1954 return 0;
1955 }
1956
1957 return 1;
1958}
1959
1960/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1961 * from set_multicast.
1962 */
1963static void
1964bnx2_set_rx_mode(struct net_device *dev)
1965{
972ec0d4 1966 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
1967 u32 rx_mode, sort_mode;
1968 int i;
b6016b76 1969
c770a65c 1970 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1971
1972 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1973 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1974 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1975#ifdef BCM_VLAN
e29054f9 1976 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 1977 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 1978#else
e29054f9
MC
1979 if (!(bp->flags & ASF_ENABLE_FLAG))
1980 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
1981#endif
1982 if (dev->flags & IFF_PROMISC) {
1983 /* Promiscuous mode. */
1984 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
1985 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
1986 }
1987 else if (dev->flags & IFF_ALLMULTI) {
1988 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
1989 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
1990 0xffffffff);
1991 }
1992 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
1993 }
1994 else {
1995 /* Accept one or more multicast(s). */
1996 struct dev_mc_list *mclist;
1997 u32 mc_filter[NUM_MC_HASH_REGISTERS];
1998 u32 regidx;
1999 u32 bit;
2000 u32 crc;
2001
2002 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2003
2004 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2005 i++, mclist = mclist->next) {
2006
2007 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2008 bit = crc & 0xff;
2009 regidx = (bit & 0xe0) >> 5;
2010 bit &= 0x1f;
2011 mc_filter[regidx] |= (1 << bit);
2012 }
2013
2014 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2015 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2016 mc_filter[i]);
2017 }
2018
2019 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2020 }
2021
2022 if (rx_mode != bp->rx_mode) {
2023 bp->rx_mode = rx_mode;
2024 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2025 }
2026
2027 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2028 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2029 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2030
c770a65c 2031 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
2032}
2033
2034static void
2035load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2036 u32 rv2p_proc)
2037{
2038 int i;
2039 u32 val;
2040
2041
2042 for (i = 0; i < rv2p_code_len; i += 8) {
2043 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2044 rv2p_code++;
2045 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2046 rv2p_code++;
2047
2048 if (rv2p_proc == RV2P_PROC1) {
2049 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2050 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2051 }
2052 else {
2053 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2054 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2055 }
2056 }
2057
2058 /* Reset the processor, un-stall is done later. */
2059 if (rv2p_proc == RV2P_PROC1) {
2060 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2061 }
2062 else {
2063 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2064 }
2065}
2066
2067static void
2068load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2069{
2070 u32 offset;
2071 u32 val;
2072
2073 /* Halt the CPU. */
2074 val = REG_RD_IND(bp, cpu_reg->mode);
2075 val |= cpu_reg->mode_value_halt;
2076 REG_WR_IND(bp, cpu_reg->mode, val);
2077 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2078
2079 /* Load the Text area. */
2080 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2081 if (fw->text) {
2082 int j;
2083
2084 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2085 REG_WR_IND(bp, offset, fw->text[j]);
2086 }
2087 }
2088
2089 /* Load the Data area. */
2090 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2091 if (fw->data) {
2092 int j;
2093
2094 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2095 REG_WR_IND(bp, offset, fw->data[j]);
2096 }
2097 }
2098
2099 /* Load the SBSS area. */
2100 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2101 if (fw->sbss) {
2102 int j;
2103
2104 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2105 REG_WR_IND(bp, offset, fw->sbss[j]);
2106 }
2107 }
2108
2109 /* Load the BSS area. */
2110 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2111 if (fw->bss) {
2112 int j;
2113
2114 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2115 REG_WR_IND(bp, offset, fw->bss[j]);
2116 }
2117 }
2118
2119 /* Load the Read-Only area. */
2120 offset = cpu_reg->spad_base +
2121 (fw->rodata_addr - cpu_reg->mips_view_base);
2122 if (fw->rodata) {
2123 int j;
2124
2125 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2126 REG_WR_IND(bp, offset, fw->rodata[j]);
2127 }
2128 }
2129
2130 /* Clear the pre-fetch instruction. */
2131 REG_WR_IND(bp, cpu_reg->inst, 0);
2132 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2133
2134 /* Start the CPU. */
2135 val = REG_RD_IND(bp, cpu_reg->mode);
2136 val &= ~cpu_reg->mode_value_halt;
2137 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2138 REG_WR_IND(bp, cpu_reg->mode, val);
2139}
2140
2141static void
2142bnx2_init_cpus(struct bnx2 *bp)
2143{
2144 struct cpu_reg cpu_reg;
2145 struct fw_info fw;
2146
2147 /* Initialize the RV2P processor. */
2148 load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2149 load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2150
2151 /* Initialize the RX Processor. */
2152 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2153 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2154 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2155 cpu_reg.state = BNX2_RXP_CPU_STATE;
2156 cpu_reg.state_value_clear = 0xffffff;
2157 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2158 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2159 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2160 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2161 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2162 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2163 cpu_reg.mips_view_base = 0x8000000;
2164
2165 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2166 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2167 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2168 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2169
2170 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2171 fw.text_len = bnx2_RXP_b06FwTextLen;
2172 fw.text_index = 0;
2173 fw.text = bnx2_RXP_b06FwText;
2174
2175 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2176 fw.data_len = bnx2_RXP_b06FwDataLen;
2177 fw.data_index = 0;
2178 fw.data = bnx2_RXP_b06FwData;
2179
2180 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2181 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2182 fw.sbss_index = 0;
2183 fw.sbss = bnx2_RXP_b06FwSbss;
2184
2185 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2186 fw.bss_len = bnx2_RXP_b06FwBssLen;
2187 fw.bss_index = 0;
2188 fw.bss = bnx2_RXP_b06FwBss;
2189
2190 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2191 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2192 fw.rodata_index = 0;
2193 fw.rodata = bnx2_RXP_b06FwRodata;
2194
2195 load_cpu_fw(bp, &cpu_reg, &fw);
2196
2197 /* Initialize the TX Processor. */
2198 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2199 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2200 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2201 cpu_reg.state = BNX2_TXP_CPU_STATE;
2202 cpu_reg.state_value_clear = 0xffffff;
2203 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2204 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2205 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2206 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2207 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2208 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2209 cpu_reg.mips_view_base = 0x8000000;
2210
2211 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2212 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2213 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2214 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2215
2216 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2217 fw.text_len = bnx2_TXP_b06FwTextLen;
2218 fw.text_index = 0;
2219 fw.text = bnx2_TXP_b06FwText;
2220
2221 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2222 fw.data_len = bnx2_TXP_b06FwDataLen;
2223 fw.data_index = 0;
2224 fw.data = bnx2_TXP_b06FwData;
2225
2226 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2227 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2228 fw.sbss_index = 0;
2229 fw.sbss = bnx2_TXP_b06FwSbss;
2230
2231 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2232 fw.bss_len = bnx2_TXP_b06FwBssLen;
2233 fw.bss_index = 0;
2234 fw.bss = bnx2_TXP_b06FwBss;
2235
2236 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2237 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2238 fw.rodata_index = 0;
2239 fw.rodata = bnx2_TXP_b06FwRodata;
2240
2241 load_cpu_fw(bp, &cpu_reg, &fw);
2242
2243 /* Initialize the TX Patch-up Processor. */
2244 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2245 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2246 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2247 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2248 cpu_reg.state_value_clear = 0xffffff;
2249 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2250 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2251 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2252 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2253 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2254 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2255 cpu_reg.mips_view_base = 0x8000000;
2256
2257 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2258 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2259 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2260 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2261
2262 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2263 fw.text_len = bnx2_TPAT_b06FwTextLen;
2264 fw.text_index = 0;
2265 fw.text = bnx2_TPAT_b06FwText;
2266
2267 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2268 fw.data_len = bnx2_TPAT_b06FwDataLen;
2269 fw.data_index = 0;
2270 fw.data = bnx2_TPAT_b06FwData;
2271
2272 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2273 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2274 fw.sbss_index = 0;
2275 fw.sbss = bnx2_TPAT_b06FwSbss;
2276
2277 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2278 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2279 fw.bss_index = 0;
2280 fw.bss = bnx2_TPAT_b06FwBss;
2281
2282 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2283 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2284 fw.rodata_index = 0;
2285 fw.rodata = bnx2_TPAT_b06FwRodata;
2286
2287 load_cpu_fw(bp, &cpu_reg, &fw);
2288
2289 /* Initialize the Completion Processor. */
2290 cpu_reg.mode = BNX2_COM_CPU_MODE;
2291 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2292 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2293 cpu_reg.state = BNX2_COM_CPU_STATE;
2294 cpu_reg.state_value_clear = 0xffffff;
2295 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2296 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2297 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2298 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2299 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2300 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2301 cpu_reg.mips_view_base = 0x8000000;
2302
2303 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2304 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2305 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2306 fw.start_addr = bnx2_COM_b06FwStartAddr;
2307
2308 fw.text_addr = bnx2_COM_b06FwTextAddr;
2309 fw.text_len = bnx2_COM_b06FwTextLen;
2310 fw.text_index = 0;
2311 fw.text = bnx2_COM_b06FwText;
2312
2313 fw.data_addr = bnx2_COM_b06FwDataAddr;
2314 fw.data_len = bnx2_COM_b06FwDataLen;
2315 fw.data_index = 0;
2316 fw.data = bnx2_COM_b06FwData;
2317
2318 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2319 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2320 fw.sbss_index = 0;
2321 fw.sbss = bnx2_COM_b06FwSbss;
2322
2323 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2324 fw.bss_len = bnx2_COM_b06FwBssLen;
2325 fw.bss_index = 0;
2326 fw.bss = bnx2_COM_b06FwBss;
2327
2328 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2329 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2330 fw.rodata_index = 0;
2331 fw.rodata = bnx2_COM_b06FwRodata;
2332
2333 load_cpu_fw(bp, &cpu_reg, &fw);
2334
2335}
2336
2337static int
829ca9a3 2338bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
2339{
2340 u16 pmcsr;
2341
2342 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2343
2344 switch (state) {
829ca9a3 2345 case PCI_D0: {
b6016b76
MC
2346 u32 val;
2347
2348 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2349 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2350 PCI_PM_CTRL_PME_STATUS);
2351
2352 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2353 /* delay required during transition out of D3hot */
2354 msleep(20);
2355
2356 val = REG_RD(bp, BNX2_EMAC_MODE);
2357 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2358 val &= ~BNX2_EMAC_MODE_MPKT;
2359 REG_WR(bp, BNX2_EMAC_MODE, val);
2360
2361 val = REG_RD(bp, BNX2_RPM_CONFIG);
2362 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2363 REG_WR(bp, BNX2_RPM_CONFIG, val);
2364 break;
2365 }
829ca9a3 2366 case PCI_D3hot: {
b6016b76
MC
2367 int i;
2368 u32 val, wol_msg;
2369
2370 if (bp->wol) {
2371 u32 advertising;
2372 u8 autoneg;
2373
2374 autoneg = bp->autoneg;
2375 advertising = bp->advertising;
2376
2377 bp->autoneg = AUTONEG_SPEED;
2378 bp->advertising = ADVERTISED_10baseT_Half |
2379 ADVERTISED_10baseT_Full |
2380 ADVERTISED_100baseT_Half |
2381 ADVERTISED_100baseT_Full |
2382 ADVERTISED_Autoneg;
2383
2384 bnx2_setup_copper_phy(bp);
2385
2386 bp->autoneg = autoneg;
2387 bp->advertising = advertising;
2388
2389 bnx2_set_mac_addr(bp);
2390
2391 val = REG_RD(bp, BNX2_EMAC_MODE);
2392
2393 /* Enable port mode. */
2394 val &= ~BNX2_EMAC_MODE_PORT;
2395 val |= BNX2_EMAC_MODE_PORT_MII |
2396 BNX2_EMAC_MODE_MPKT_RCVD |
2397 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76
MC
2398 BNX2_EMAC_MODE_MPKT;
2399
2400 REG_WR(bp, BNX2_EMAC_MODE, val);
2401
2402 /* receive all multicast */
2403 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2404 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2405 0xffffffff);
2406 }
2407 REG_WR(bp, BNX2_EMAC_RX_MODE,
2408 BNX2_EMAC_RX_MODE_SORT_MODE);
2409
2410 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2411 BNX2_RPM_SORT_USER0_MC_EN;
2412 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2413 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2414 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2415 BNX2_RPM_SORT_USER0_ENA);
2416
2417 /* Need to enable EMAC and RPM for WOL. */
2418 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2419 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2420 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2421 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2422
2423 val = REG_RD(bp, BNX2_RPM_CONFIG);
2424 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2425 REG_WR(bp, BNX2_RPM_CONFIG, val);
2426
2427 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2428 }
2429 else {
2430 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2431 }
2432
dda1e390
MC
2433 if (!(bp->flags & NO_WOL_FLAG))
2434 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
2435
2436 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2437 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2438 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2439
2440 if (bp->wol)
2441 pmcsr |= 3;
2442 }
2443 else {
2444 pmcsr |= 3;
2445 }
2446 if (bp->wol) {
2447 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2448 }
2449 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2450 pmcsr);
2451
2452 /* No more memory access after this point until
2453 * device is brought back to D0.
2454 */
2455 udelay(50);
2456 break;
2457 }
2458 default:
2459 return -EINVAL;
2460 }
2461 return 0;
2462}
2463
2464static int
2465bnx2_acquire_nvram_lock(struct bnx2 *bp)
2466{
2467 u32 val;
2468 int j;
2469
2470 /* Request access to the flash interface. */
2471 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2472 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2473 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2474 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2475 break;
2476
2477 udelay(5);
2478 }
2479
2480 if (j >= NVRAM_TIMEOUT_COUNT)
2481 return -EBUSY;
2482
2483 return 0;
2484}
2485
2486static int
2487bnx2_release_nvram_lock(struct bnx2 *bp)
2488{
2489 int j;
2490 u32 val;
2491
2492 /* Relinquish nvram interface. */
2493 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2494
2495 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2496 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2497 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2498 break;
2499
2500 udelay(5);
2501 }
2502
2503 if (j >= NVRAM_TIMEOUT_COUNT)
2504 return -EBUSY;
2505
2506 return 0;
2507}
2508
2509
2510static int
2511bnx2_enable_nvram_write(struct bnx2 *bp)
2512{
2513 u32 val;
2514
2515 val = REG_RD(bp, BNX2_MISC_CFG);
2516 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2517
2518 if (!bp->flash_info->buffered) {
2519 int j;
2520
2521 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2522 REG_WR(bp, BNX2_NVM_COMMAND,
2523 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2524
2525 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2526 udelay(5);
2527
2528 val = REG_RD(bp, BNX2_NVM_COMMAND);
2529 if (val & BNX2_NVM_COMMAND_DONE)
2530 break;
2531 }
2532
2533 if (j >= NVRAM_TIMEOUT_COUNT)
2534 return -EBUSY;
2535 }
2536 return 0;
2537}
2538
2539static void
2540bnx2_disable_nvram_write(struct bnx2 *bp)
2541{
2542 u32 val;
2543
2544 val = REG_RD(bp, BNX2_MISC_CFG);
2545 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2546}
2547
2548
2549static void
2550bnx2_enable_nvram_access(struct bnx2 *bp)
2551{
2552 u32 val;
2553
2554 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2555 /* Enable both bits, even on read. */
2556 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2557 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2558}
2559
2560static void
2561bnx2_disable_nvram_access(struct bnx2 *bp)
2562{
2563 u32 val;
2564
2565 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2566 /* Disable both bits, even after read. */
2567 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2568 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2569 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2570}
2571
2572static int
2573bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2574{
2575 u32 cmd;
2576 int j;
2577
2578 if (bp->flash_info->buffered)
2579 /* Buffered flash, no erase needed */
2580 return 0;
2581
2582 /* Build an erase command */
2583 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2584 BNX2_NVM_COMMAND_DOIT;
2585
2586 /* Need to clear DONE bit separately. */
2587 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2588
2589 /* Address of the NVRAM to read from. */
2590 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2591
2592 /* Issue an erase command. */
2593 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2594
2595 /* Wait for completion. */
2596 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2597 u32 val;
2598
2599 udelay(5);
2600
2601 val = REG_RD(bp, BNX2_NVM_COMMAND);
2602 if (val & BNX2_NVM_COMMAND_DONE)
2603 break;
2604 }
2605
2606 if (j >= NVRAM_TIMEOUT_COUNT)
2607 return -EBUSY;
2608
2609 return 0;
2610}
2611
2612static int
2613bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2614{
2615 u32 cmd;
2616 int j;
2617
2618 /* Build the command word. */
2619 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2620
2621 /* Calculate an offset of a buffered flash. */
2622 if (bp->flash_info->buffered) {
2623 offset = ((offset / bp->flash_info->page_size) <<
2624 bp->flash_info->page_bits) +
2625 (offset % bp->flash_info->page_size);
2626 }
2627
2628 /* Need to clear DONE bit separately. */
2629 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2630
2631 /* Address of the NVRAM to read from. */
2632 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2633
2634 /* Issue a read command. */
2635 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2636
2637 /* Wait for completion. */
2638 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2639 u32 val;
2640
2641 udelay(5);
2642
2643 val = REG_RD(bp, BNX2_NVM_COMMAND);
2644 if (val & BNX2_NVM_COMMAND_DONE) {
2645 val = REG_RD(bp, BNX2_NVM_READ);
2646
2647 val = be32_to_cpu(val);
2648 memcpy(ret_val, &val, 4);
2649 break;
2650 }
2651 }
2652 if (j >= NVRAM_TIMEOUT_COUNT)
2653 return -EBUSY;
2654
2655 return 0;
2656}
2657
2658
2659static int
2660bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2661{
2662 u32 cmd, val32;
2663 int j;
2664
2665 /* Build the command word. */
2666 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2667
2668 /* Calculate an offset of a buffered flash. */
2669 if (bp->flash_info->buffered) {
2670 offset = ((offset / bp->flash_info->page_size) <<
2671 bp->flash_info->page_bits) +
2672 (offset % bp->flash_info->page_size);
2673 }
2674
2675 /* Need to clear DONE bit separately. */
2676 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2677
2678 memcpy(&val32, val, 4);
2679 val32 = cpu_to_be32(val32);
2680
2681 /* Write the data. */
2682 REG_WR(bp, BNX2_NVM_WRITE, val32);
2683
2684 /* Address of the NVRAM to write to. */
2685 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2686
2687 /* Issue the write command. */
2688 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2689
2690 /* Wait for completion. */
2691 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2692 udelay(5);
2693
2694 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2695 break;
2696 }
2697 if (j >= NVRAM_TIMEOUT_COUNT)
2698 return -EBUSY;
2699
2700 return 0;
2701}
2702
2703static int
2704bnx2_init_nvram(struct bnx2 *bp)
2705{
2706 u32 val;
2707 int j, entry_count, rc;
2708 struct flash_spec *flash;
2709
2710 /* Determine the selected interface. */
2711 val = REG_RD(bp, BNX2_NVM_CFG1);
2712
2713 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2714
2715 rc = 0;
2716 if (val & 0x40000000) {
2717
2718 /* Flash interface has been reconfigured */
2719 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
2720 j++, flash++) {
2721 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2722 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
2723 bp->flash_info = flash;
2724 break;
2725 }
2726 }
2727 }
2728 else {
37137709 2729 u32 mask;
b6016b76
MC
2730 /* Not yet been reconfigured */
2731
37137709
MC
2732 if (val & (1 << 23))
2733 mask = FLASH_BACKUP_STRAP_MASK;
2734 else
2735 mask = FLASH_STRAP_MASK;
2736
b6016b76
MC
2737 for (j = 0, flash = &flash_table[0]; j < entry_count;
2738 j++, flash++) {
2739
37137709 2740 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
2741 bp->flash_info = flash;
2742
2743 /* Request access to the flash interface. */
2744 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2745 return rc;
2746
2747 /* Enable access to flash interface */
2748 bnx2_enable_nvram_access(bp);
2749
2750 /* Reconfigure the flash interface */
2751 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2752 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2753 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2754 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2755
2756 /* Disable access to flash interface */
2757 bnx2_disable_nvram_access(bp);
2758 bnx2_release_nvram_lock(bp);
2759
2760 break;
2761 }
2762 }
2763 } /* if (val & 0x40000000) */
2764
2765 if (j == entry_count) {
2766 bp->flash_info = NULL;
2f23c523 2767 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 2768 return -ENODEV;
b6016b76
MC
2769 }
2770
1122db71
MC
2771 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2772 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2773 if (val)
2774 bp->flash_size = val;
2775 else
2776 bp->flash_size = bp->flash_info->total_size;
2777
b6016b76
MC
2778 return rc;
2779}
2780
2781static int
2782bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2783 int buf_size)
2784{
2785 int rc = 0;
2786 u32 cmd_flags, offset32, len32, extra;
2787
2788 if (buf_size == 0)
2789 return 0;
2790
2791 /* Request access to the flash interface. */
2792 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2793 return rc;
2794
2795 /* Enable access to flash interface */
2796 bnx2_enable_nvram_access(bp);
2797
2798 len32 = buf_size;
2799 offset32 = offset;
2800 extra = 0;
2801
2802 cmd_flags = 0;
2803
2804 if (offset32 & 3) {
2805 u8 buf[4];
2806 u32 pre_len;
2807
2808 offset32 &= ~3;
2809 pre_len = 4 - (offset & 3);
2810
2811 if (pre_len >= len32) {
2812 pre_len = len32;
2813 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2814 BNX2_NVM_COMMAND_LAST;
2815 }
2816 else {
2817 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2818 }
2819
2820 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2821
2822 if (rc)
2823 return rc;
2824
2825 memcpy(ret_buf, buf + (offset & 3), pre_len);
2826
2827 offset32 += 4;
2828 ret_buf += pre_len;
2829 len32 -= pre_len;
2830 }
2831 if (len32 & 3) {
2832 extra = 4 - (len32 & 3);
2833 len32 = (len32 + 4) & ~3;
2834 }
2835
2836 if (len32 == 4) {
2837 u8 buf[4];
2838
2839 if (cmd_flags)
2840 cmd_flags = BNX2_NVM_COMMAND_LAST;
2841 else
2842 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2843 BNX2_NVM_COMMAND_LAST;
2844
2845 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2846
2847 memcpy(ret_buf, buf, 4 - extra);
2848 }
2849 else if (len32 > 0) {
2850 u8 buf[4];
2851
2852 /* Read the first word. */
2853 if (cmd_flags)
2854 cmd_flags = 0;
2855 else
2856 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2857
2858 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2859
2860 /* Advance to the next dword. */
2861 offset32 += 4;
2862 ret_buf += 4;
2863 len32 -= 4;
2864
2865 while (len32 > 4 && rc == 0) {
2866 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2867
2868 /* Advance to the next dword. */
2869 offset32 += 4;
2870 ret_buf += 4;
2871 len32 -= 4;
2872 }
2873
2874 if (rc)
2875 return rc;
2876
2877 cmd_flags = BNX2_NVM_COMMAND_LAST;
2878 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2879
2880 memcpy(ret_buf, buf, 4 - extra);
2881 }
2882
2883 /* Disable access to flash interface */
2884 bnx2_disable_nvram_access(bp);
2885
2886 bnx2_release_nvram_lock(bp);
2887
2888 return rc;
2889}
2890
2891static int
2892bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2893 int buf_size)
2894{
2895 u32 written, offset32, len32;
2896 u8 *buf, start[4], end[4];
2897 int rc = 0;
2898 int align_start, align_end;
2899
2900 buf = data_buf;
2901 offset32 = offset;
2902 len32 = buf_size;
2903 align_start = align_end = 0;
2904
2905 if ((align_start = (offset32 & 3))) {
2906 offset32 &= ~3;
2907 len32 += align_start;
2908 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2909 return rc;
2910 }
2911
2912 if (len32 & 3) {
2913 if ((len32 > 4) || !align_start) {
2914 align_end = 4 - (len32 & 3);
2915 len32 += align_end;
2916 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2917 end, 4))) {
2918 return rc;
2919 }
2920 }
2921 }
2922
2923 if (align_start || align_end) {
2924 buf = kmalloc(len32, GFP_KERNEL);
2925 if (buf == 0)
2926 return -ENOMEM;
2927 if (align_start) {
2928 memcpy(buf, start, 4);
2929 }
2930 if (align_end) {
2931 memcpy(buf + len32 - 4, end, 4);
2932 }
2933 memcpy(buf + align_start, data_buf, buf_size);
2934 }
2935
2936 written = 0;
2937 while ((written < len32) && (rc == 0)) {
2938 u32 page_start, page_end, data_start, data_end;
2939 u32 addr, cmd_flags;
2940 int i;
2941 u8 flash_buffer[264];
2942
2943 /* Find the page_start addr */
2944 page_start = offset32 + written;
2945 page_start -= (page_start % bp->flash_info->page_size);
2946 /* Find the page_end addr */
2947 page_end = page_start + bp->flash_info->page_size;
2948 /* Find the data_start addr */
2949 data_start = (written == 0) ? offset32 : page_start;
2950 /* Find the data_end addr */
2951 data_end = (page_end > offset32 + len32) ?
2952 (offset32 + len32) : page_end;
2953
2954 /* Request access to the flash interface. */
2955 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2956 goto nvram_write_end;
2957
2958 /* Enable access to flash interface */
2959 bnx2_enable_nvram_access(bp);
2960
2961 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2962 if (bp->flash_info->buffered == 0) {
2963 int j;
2964
2965 /* Read the whole page into the buffer
2966 * (non-buffer flash only) */
2967 for (j = 0; j < bp->flash_info->page_size; j += 4) {
2968 if (j == (bp->flash_info->page_size - 4)) {
2969 cmd_flags |= BNX2_NVM_COMMAND_LAST;
2970 }
2971 rc = bnx2_nvram_read_dword(bp,
2972 page_start + j,
2973 &flash_buffer[j],
2974 cmd_flags);
2975
2976 if (rc)
2977 goto nvram_write_end;
2978
2979 cmd_flags = 0;
2980 }
2981 }
2982
2983 /* Enable writes to flash interface (unlock write-protect) */
2984 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
2985 goto nvram_write_end;
2986
2987 /* Erase the page */
2988 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
2989 goto nvram_write_end;
2990
2991 /* Re-enable the write again for the actual write */
2992 bnx2_enable_nvram_write(bp);
2993
2994 /* Loop to write back the buffer data from page_start to
2995 * data_start */
2996 i = 0;
2997 if (bp->flash_info->buffered == 0) {
2998 for (addr = page_start; addr < data_start;
2999 addr += 4, i += 4) {
3000
3001 rc = bnx2_nvram_write_dword(bp, addr,
3002 &flash_buffer[i], cmd_flags);
3003
3004 if (rc != 0)
3005 goto nvram_write_end;
3006
3007 cmd_flags = 0;
3008 }
3009 }
3010
3011 /* Loop to write the new data from data_start to data_end */
3012 for (addr = data_start; addr < data_end; addr += 4, i++) {
3013 if ((addr == page_end - 4) ||
3014 ((bp->flash_info->buffered) &&
3015 (addr == data_end - 4))) {
3016
3017 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3018 }
3019 rc = bnx2_nvram_write_dword(bp, addr, buf,
3020 cmd_flags);
3021
3022 if (rc != 0)
3023 goto nvram_write_end;
3024
3025 cmd_flags = 0;
3026 buf += 4;
3027 }
3028
3029 /* Loop to write back the buffer data from data_end
3030 * to page_end */
3031 if (bp->flash_info->buffered == 0) {
3032 for (addr = data_end; addr < page_end;
3033 addr += 4, i += 4) {
3034
3035 if (addr == page_end-4) {
3036 cmd_flags = BNX2_NVM_COMMAND_LAST;
3037 }
3038 rc = bnx2_nvram_write_dword(bp, addr,
3039 &flash_buffer[i], cmd_flags);
3040
3041 if (rc != 0)
3042 goto nvram_write_end;
3043
3044 cmd_flags = 0;
3045 }
3046 }
3047
3048 /* Disable writes to flash interface (lock write-protect) */
3049 bnx2_disable_nvram_write(bp);
3050
3051 /* Disable access to flash interface */
3052 bnx2_disable_nvram_access(bp);
3053 bnx2_release_nvram_lock(bp);
3054
3055 /* Increment written */
3056 written += data_end - data_start;
3057 }
3058
3059nvram_write_end:
3060 if (align_start || align_end)
3061 kfree(buf);
3062 return rc;
3063}
3064
3065static int
3066bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3067{
3068 u32 val;
3069 int i, rc = 0;
3070
3071 /* Wait for the current PCI transaction to complete before
3072 * issuing a reset. */
3073 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3074 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3075 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3076 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3077 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3078 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3079 udelay(5);
3080
b090ae2b
MC
3081 /* Wait for the firmware to tell us it is ok to issue a reset. */
3082 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3083
b6016b76
MC
3084 /* Deposit a driver reset signature so the firmware knows that
3085 * this is a soft reset. */
e3648b3d 3086 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
3087 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3088
b6016b76
MC
3089 /* Do a dummy read to force the chip to complete all current transaction
3090 * before we issue a reset. */
3091 val = REG_RD(bp, BNX2_MISC_ID);
3092
3093 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3094 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3095 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3096
3097 /* Chip reset. */
3098 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3099
3100 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3101 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3102 msleep(15);
3103
3104 /* Reset takes approximate 30 usec */
3105 for (i = 0; i < 10; i++) {
3106 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3107 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3108 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3109 break;
3110 }
3111 udelay(10);
3112 }
3113
3114 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3115 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3116 printk(KERN_ERR PFX "Chip reset did not complete\n");
3117 return -EBUSY;
3118 }
3119
3120 /* Make sure byte swapping is properly configured. */
3121 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3122 if (val != 0x01020304) {
3123 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3124 return -ENODEV;
3125 }
3126
b6016b76 3127 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
3128 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3129 if (rc)
3130 return rc;
b6016b76
MC
3131
3132 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3133 /* Adjust the voltage regular to two steps lower. The default
3134 * of this register is 0x0000000e. */
3135 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3136
3137 /* Remove bad rbuf memory from the free pool. */
3138 rc = bnx2_alloc_bad_rbuf(bp);
3139 }
3140
3141 return rc;
3142}
3143
3144static int
3145bnx2_init_chip(struct bnx2 *bp)
3146{
3147 u32 val;
b090ae2b 3148 int rc;
b6016b76
MC
3149
3150 /* Make sure the interrupt is not active. */
3151 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3152
3153 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3154 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3155#ifdef __BIG_ENDIAN
3156 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3157#endif
3158 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3159 DMA_READ_CHANS << 12 |
3160 DMA_WRITE_CHANS << 16;
3161
3162 val |= (0x2 << 20) | (1 << 11);
3163
dda1e390 3164 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
3165 val |= (1 << 23);
3166
3167 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3168 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3169 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3170
3171 REG_WR(bp, BNX2_DMA_CONFIG, val);
3172
3173 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3174 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3175 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3176 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3177 }
3178
3179 if (bp->flags & PCIX_FLAG) {
3180 u16 val16;
3181
3182 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3183 &val16);
3184 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3185 val16 & ~PCI_X_CMD_ERO);
3186 }
3187
3188 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3189 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3190 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3191 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3192
3193 /* Initialize context mapping and zero out the quick contexts. The
3194 * context block must have already been enabled. */
3195 bnx2_init_context(bp);
3196
3197 bnx2_init_cpus(bp);
3198 bnx2_init_nvram(bp);
3199
3200 bnx2_set_mac_addr(bp);
3201
3202 val = REG_RD(bp, BNX2_MQ_CONFIG);
3203 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3204 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3205 REG_WR(bp, BNX2_MQ_CONFIG, val);
3206
3207 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3208 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3209 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3210
3211 val = (BCM_PAGE_BITS - 8) << 24;
3212 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3213
3214 /* Configure page size. */
3215 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3216 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3217 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3218 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3219
3220 val = bp->mac_addr[0] +
3221 (bp->mac_addr[1] << 8) +
3222 (bp->mac_addr[2] << 16) +
3223 bp->mac_addr[3] +
3224 (bp->mac_addr[4] << 8) +
3225 (bp->mac_addr[5] << 16);
3226 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3227
3228 /* Program the MTU. Also include 4 bytes for CRC32. */
3229 val = bp->dev->mtu + ETH_HLEN + 4;
3230 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3231 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3232 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3233
3234 bp->last_status_idx = 0;
3235 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3236
3237 /* Set up how to generate a link change interrupt. */
3238 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3239
3240 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3241 (u64) bp->status_blk_mapping & 0xffffffff);
3242 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3243
3244 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3245 (u64) bp->stats_blk_mapping & 0xffffffff);
3246 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3247 (u64) bp->stats_blk_mapping >> 32);
3248
3249 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3250 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3251
3252 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3253 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3254
3255 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3256 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3257
3258 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3259
3260 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3261
3262 REG_WR(bp, BNX2_HC_COM_TICKS,
3263 (bp->com_ticks_int << 16) | bp->com_ticks);
3264
3265 REG_WR(bp, BNX2_HC_CMD_TICKS,
3266 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3267
3268 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3269 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3270
3271 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3272 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3273 else {
3274 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3275 BNX2_HC_CONFIG_TX_TMR_MODE |
3276 BNX2_HC_CONFIG_COLLECT_STATS);
3277 }
3278
3279 /* Clear internal stats counters. */
3280 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3281
3282 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3283
e29054f9
MC
3284 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3285 BNX2_PORT_FEATURE_ASF_ENABLED)
3286 bp->flags |= ASF_ENABLE_FLAG;
3287
b6016b76
MC
3288 /* Initialize the receive filter. */
3289 bnx2_set_rx_mode(bp->dev);
3290
b090ae2b
MC
3291 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3292 0);
b6016b76
MC
3293
3294 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3295 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3296
3297 udelay(20);
3298
b090ae2b 3299 return rc;
b6016b76
MC
3300}
3301
3302
3303static void
3304bnx2_init_tx_ring(struct bnx2 *bp)
3305{
3306 struct tx_bd *txbd;
3307 u32 val;
3308
3309 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3310
3311 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3312 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3313
3314 bp->tx_prod = 0;
3315 bp->tx_cons = 0;
f4e418f7 3316 bp->hw_tx_cons = 0;
b6016b76 3317 bp->tx_prod_bseq = 0;
b6016b76
MC
3318
3319 val = BNX2_L2CTX_TYPE_TYPE_L2;
3320 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3321 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3322
3323 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3324 val |= 8 << 16;
3325 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3326
3327 val = (u64) bp->tx_desc_mapping >> 32;
3328 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3329
3330 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3331 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3332}
3333
3334static void
3335bnx2_init_rx_ring(struct bnx2 *bp)
3336{
3337 struct rx_bd *rxbd;
3338 int i;
3339 u16 prod, ring_prod;
3340 u32 val;
3341
3342 /* 8 for CRC and VLAN */
3343 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3344 /* 8 for alignment */
3345 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3346
3347 ring_prod = prod = bp->rx_prod = 0;
3348 bp->rx_cons = 0;
f4e418f7 3349 bp->hw_rx_cons = 0;
b6016b76
MC
3350 bp->rx_prod_bseq = 0;
3351
3352 rxbd = &bp->rx_desc_ring[0];
3353 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
3354 rxbd->rx_bd_len = bp->rx_buf_use_size;
3355 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3356 }
3357
3358 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping >> 32;
3359 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping & 0xffffffff;
3360
3361 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3362 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3363 val |= 0x02 << 8;
3364 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3365
3366 val = (u64) bp->rx_desc_mapping >> 32;
3367 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3368
3369 val = (u64) bp->rx_desc_mapping & 0xffffffff;
3370 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3371
236b6394 3372 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
3373 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3374 break;
3375 }
3376 prod = NEXT_RX_BD(prod);
3377 ring_prod = RX_RING_IDX(prod);
3378 }
3379 bp->rx_prod = prod;
3380
3381 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3382
3383 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3384}
3385
3386static void
3387bnx2_free_tx_skbs(struct bnx2 *bp)
3388{
3389 int i;
3390
3391 if (bp->tx_buf_ring == NULL)
3392 return;
3393
3394 for (i = 0; i < TX_DESC_CNT; ) {
3395 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3396 struct sk_buff *skb = tx_buf->skb;
3397 int j, last;
3398
3399 if (skb == NULL) {
3400 i++;
3401 continue;
3402 }
3403
3404 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3405 skb_headlen(skb), PCI_DMA_TODEVICE);
3406
3407 tx_buf->skb = NULL;
3408
3409 last = skb_shinfo(skb)->nr_frags;
3410 for (j = 0; j < last; j++) {
3411 tx_buf = &bp->tx_buf_ring[i + j + 1];
3412 pci_unmap_page(bp->pdev,
3413 pci_unmap_addr(tx_buf, mapping),
3414 skb_shinfo(skb)->frags[j].size,
3415 PCI_DMA_TODEVICE);
3416 }
3417 dev_kfree_skb_any(skb);
3418 i += j + 1;
3419 }
3420
3421}
3422
3423static void
3424bnx2_free_rx_skbs(struct bnx2 *bp)
3425{
3426 int i;
3427
3428 if (bp->rx_buf_ring == NULL)
3429 return;
3430
3431 for (i = 0; i < RX_DESC_CNT; i++) {
3432 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3433 struct sk_buff *skb = rx_buf->skb;
3434
05d0f1cf 3435 if (skb == NULL)
b6016b76
MC
3436 continue;
3437
3438 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3439 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3440
3441 rx_buf->skb = NULL;
3442
3443 dev_kfree_skb_any(skb);
3444 }
3445}
3446
3447static void
3448bnx2_free_skbs(struct bnx2 *bp)
3449{
3450 bnx2_free_tx_skbs(bp);
3451 bnx2_free_rx_skbs(bp);
3452}
3453
3454static int
3455bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3456{
3457 int rc;
3458
3459 rc = bnx2_reset_chip(bp, reset_code);
3460 bnx2_free_skbs(bp);
3461 if (rc)
3462 return rc;
3463
3464 bnx2_init_chip(bp);
3465 bnx2_init_tx_ring(bp);
3466 bnx2_init_rx_ring(bp);
3467 return 0;
3468}
3469
3470static int
3471bnx2_init_nic(struct bnx2 *bp)
3472{
3473 int rc;
3474
3475 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3476 return rc;
3477
3478 bnx2_init_phy(bp);
3479 bnx2_set_link(bp);
3480 return 0;
3481}
3482
3483static int
3484bnx2_test_registers(struct bnx2 *bp)
3485{
3486 int ret;
3487 int i;
f71e1309 3488 static const struct {
b6016b76
MC
3489 u16 offset;
3490 u16 flags;
3491 u32 rw_mask;
3492 u32 ro_mask;
3493 } reg_tbl[] = {
3494 { 0x006c, 0, 0x00000000, 0x0000003f },
3495 { 0x0090, 0, 0xffffffff, 0x00000000 },
3496 { 0x0094, 0, 0x00000000, 0x00000000 },
3497
3498 { 0x0404, 0, 0x00003f00, 0x00000000 },
3499 { 0x0418, 0, 0x00000000, 0xffffffff },
3500 { 0x041c, 0, 0x00000000, 0xffffffff },
3501 { 0x0420, 0, 0x00000000, 0x80ffffff },
3502 { 0x0424, 0, 0x00000000, 0x00000000 },
3503 { 0x0428, 0, 0x00000000, 0x00000001 },
3504 { 0x0450, 0, 0x00000000, 0x0000ffff },
3505 { 0x0454, 0, 0x00000000, 0xffffffff },
3506 { 0x0458, 0, 0x00000000, 0xffffffff },
3507
3508 { 0x0808, 0, 0x00000000, 0xffffffff },
3509 { 0x0854, 0, 0x00000000, 0xffffffff },
3510 { 0x0868, 0, 0x00000000, 0x77777777 },
3511 { 0x086c, 0, 0x00000000, 0x77777777 },
3512 { 0x0870, 0, 0x00000000, 0x77777777 },
3513 { 0x0874, 0, 0x00000000, 0x77777777 },
3514
3515 { 0x0c00, 0, 0x00000000, 0x00000001 },
3516 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3517 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
b6016b76
MC
3518
3519 { 0x1000, 0, 0x00000000, 0x00000001 },
3520 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
3521
3522 { 0x1408, 0, 0x01c00800, 0x00000000 },
3523 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3524 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 3525 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
3526 { 0x14b0, 0, 0x00000002, 0x00000001 },
3527 { 0x14b8, 0, 0x00000000, 0x00000000 },
3528 { 0x14c0, 0, 0x00000000, 0x00000009 },
3529 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3530 { 0x14cc, 0, 0x00000000, 0x00000001 },
3531 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
3532
3533 { 0x1800, 0, 0x00000000, 0x00000001 },
3534 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
3535
3536 { 0x2800, 0, 0x00000000, 0x00000001 },
3537 { 0x2804, 0, 0x00000000, 0x00003f01 },
3538 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3539 { 0x2810, 0, 0xffff0000, 0x00000000 },
3540 { 0x2814, 0, 0xffff0000, 0x00000000 },
3541 { 0x2818, 0, 0xffff0000, 0x00000000 },
3542 { 0x281c, 0, 0xffff0000, 0x00000000 },
3543 { 0x2834, 0, 0xffffffff, 0x00000000 },
3544 { 0x2840, 0, 0x00000000, 0xffffffff },
3545 { 0x2844, 0, 0x00000000, 0xffffffff },
3546 { 0x2848, 0, 0xffffffff, 0x00000000 },
3547 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3548
3549 { 0x2c00, 0, 0x00000000, 0x00000011 },
3550 { 0x2c04, 0, 0x00000000, 0x00030007 },
3551
b6016b76
MC
3552 { 0x3c00, 0, 0x00000000, 0x00000001 },
3553 { 0x3c04, 0, 0x00000000, 0x00070000 },
3554 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3555 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3556 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3557 { 0x3c14, 0, 0x00000000, 0xffffffff },
3558 { 0x3c18, 0, 0x00000000, 0xffffffff },
3559 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3560 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
3561
3562 { 0x5004, 0, 0x00000000, 0x0000007f },
3563 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3564 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3565
b6016b76
MC
3566 { 0x5c00, 0, 0x00000000, 0x00000001 },
3567 { 0x5c04, 0, 0x00000000, 0x0003000f },
3568 { 0x5c08, 0, 0x00000003, 0x00000000 },
3569 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3570 { 0x5c10, 0, 0x00000000, 0xffffffff },
3571 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3572 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3573 { 0x5c88, 0, 0x00000000, 0x00077373 },
3574 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3575
3576 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3577 { 0x680c, 0, 0xffffffff, 0x00000000 },
3578 { 0x6810, 0, 0xffffffff, 0x00000000 },
3579 { 0x6814, 0, 0xffffffff, 0x00000000 },
3580 { 0x6818, 0, 0xffffffff, 0x00000000 },
3581 { 0x681c, 0, 0xffffffff, 0x00000000 },
3582 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3583 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3584 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3585 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3586 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3587 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3588 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3589 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3590 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3591 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3592 { 0x684c, 0, 0xffffffff, 0x00000000 },
3593 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3594 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3595 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3596 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3597 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3598 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3599
3600 { 0xffff, 0, 0x00000000, 0x00000000 },
3601 };
3602
3603 ret = 0;
3604 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3605 u32 offset, rw_mask, ro_mask, save_val, val;
3606
3607 offset = (u32) reg_tbl[i].offset;
3608 rw_mask = reg_tbl[i].rw_mask;
3609 ro_mask = reg_tbl[i].ro_mask;
3610
14ab9b86 3611 save_val = readl(bp->regview + offset);
b6016b76 3612
14ab9b86 3613 writel(0, bp->regview + offset);
b6016b76 3614
14ab9b86 3615 val = readl(bp->regview + offset);
b6016b76
MC
3616 if ((val & rw_mask) != 0) {
3617 goto reg_test_err;
3618 }
3619
3620 if ((val & ro_mask) != (save_val & ro_mask)) {
3621 goto reg_test_err;
3622 }
3623
14ab9b86 3624 writel(0xffffffff, bp->regview + offset);
b6016b76 3625
14ab9b86 3626 val = readl(bp->regview + offset);
b6016b76
MC
3627 if ((val & rw_mask) != rw_mask) {
3628 goto reg_test_err;
3629 }
3630
3631 if ((val & ro_mask) != (save_val & ro_mask)) {
3632 goto reg_test_err;
3633 }
3634
14ab9b86 3635 writel(save_val, bp->regview + offset);
b6016b76
MC
3636 continue;
3637
3638reg_test_err:
14ab9b86 3639 writel(save_val, bp->regview + offset);
b6016b76
MC
3640 ret = -ENODEV;
3641 break;
3642 }
3643 return ret;
3644}
3645
3646static int
3647bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3648{
f71e1309 3649 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
3650 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3651 int i;
3652
3653 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3654 u32 offset;
3655
3656 for (offset = 0; offset < size; offset += 4) {
3657
3658 REG_WR_IND(bp, start + offset, test_pattern[i]);
3659
3660 if (REG_RD_IND(bp, start + offset) !=
3661 test_pattern[i]) {
3662 return -ENODEV;
3663 }
3664 }
3665 }
3666 return 0;
3667}
3668
3669static int
3670bnx2_test_memory(struct bnx2 *bp)
3671{
3672 int ret = 0;
3673 int i;
f71e1309 3674 static const struct {
b6016b76
MC
3675 u32 offset;
3676 u32 len;
3677 } mem_tbl[] = {
3678 { 0x60000, 0x4000 },
5b0c76ad 3679 { 0xa0000, 0x3000 },
b6016b76
MC
3680 { 0xe0000, 0x4000 },
3681 { 0x120000, 0x4000 },
3682 { 0x1a0000, 0x4000 },
3683 { 0x160000, 0x4000 },
3684 { 0xffffffff, 0 },
3685 };
3686
3687 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3688 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3689 mem_tbl[i].len)) != 0) {
3690 return ret;
3691 }
3692 }
3693
3694 return ret;
3695}
3696
bc5a0690
MC
3697#define BNX2_MAC_LOOPBACK 0
3698#define BNX2_PHY_LOOPBACK 1
3699
b6016b76 3700static int
bc5a0690 3701bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
3702{
3703 unsigned int pkt_size, num_pkts, i;
3704 struct sk_buff *skb, *rx_skb;
3705 unsigned char *packet;
bc5a0690
MC
3706 u16 rx_start_idx, rx_idx;
3707 u32 val;
b6016b76
MC
3708 dma_addr_t map;
3709 struct tx_bd *txbd;
3710 struct sw_bd *rx_buf;
3711 struct l2_fhdr *rx_hdr;
3712 int ret = -ENODEV;
3713
bc5a0690
MC
3714 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3715 bp->loopback = MAC_LOOPBACK;
3716 bnx2_set_mac_loopback(bp);
3717 }
3718 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3719 bp->loopback = 0;
3720 bnx2_set_phy_loopback(bp);
3721 }
3722 else
3723 return -EINVAL;
b6016b76
MC
3724
3725 pkt_size = 1514;
3726 skb = dev_alloc_skb(pkt_size);
b6cbc3b6
JL
3727 if (!skb)
3728 return -ENOMEM;
b6016b76
MC
3729 packet = skb_put(skb, pkt_size);
3730 memcpy(packet, bp->mac_addr, 6);
3731 memset(packet + 6, 0x0, 8);
3732 for (i = 14; i < pkt_size; i++)
3733 packet[i] = (unsigned char) (i & 0xff);
3734
3735 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3736 PCI_DMA_TODEVICE);
3737
3738 val = REG_RD(bp, BNX2_HC_COMMAND);
3739 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3740 REG_RD(bp, BNX2_HC_COMMAND);
3741
3742 udelay(5);
3743 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3744
b6016b76
MC
3745 num_pkts = 0;
3746
bc5a0690 3747 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
3748
3749 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3750 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3751 txbd->tx_bd_mss_nbytes = pkt_size;
3752 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3753
3754 num_pkts++;
bc5a0690
MC
3755 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3756 bp->tx_prod_bseq += pkt_size;
b6016b76 3757
bc5a0690
MC
3758 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3759 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
b6016b76
MC
3760
3761 udelay(100);
3762
3763 val = REG_RD(bp, BNX2_HC_COMMAND);
3764 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3765 REG_RD(bp, BNX2_HC_COMMAND);
3766
3767 udelay(5);
3768
3769 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3770 dev_kfree_skb_irq(skb);
3771
bc5a0690 3772 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
b6016b76
MC
3773 goto loopback_test_done;
3774 }
3775
3776 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3777 if (rx_idx != rx_start_idx + num_pkts) {
3778 goto loopback_test_done;
3779 }
3780
3781 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3782 rx_skb = rx_buf->skb;
3783
3784 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3785 skb_reserve(rx_skb, bp->rx_offset);
3786
3787 pci_dma_sync_single_for_cpu(bp->pdev,
3788 pci_unmap_addr(rx_buf, mapping),
3789 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3790
ade2bfe7 3791 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
3792 (L2_FHDR_ERRORS_BAD_CRC |
3793 L2_FHDR_ERRORS_PHY_DECODE |
3794 L2_FHDR_ERRORS_ALIGNMENT |
3795 L2_FHDR_ERRORS_TOO_SHORT |
3796 L2_FHDR_ERRORS_GIANT_FRAME)) {
3797
3798 goto loopback_test_done;
3799 }
3800
3801 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3802 goto loopback_test_done;
3803 }
3804
3805 for (i = 14; i < pkt_size; i++) {
3806 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3807 goto loopback_test_done;
3808 }
3809 }
3810
3811 ret = 0;
3812
3813loopback_test_done:
3814 bp->loopback = 0;
3815 return ret;
3816}
3817
bc5a0690
MC
3818#define BNX2_MAC_LOOPBACK_FAILED 1
3819#define BNX2_PHY_LOOPBACK_FAILED 2
3820#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3821 BNX2_PHY_LOOPBACK_FAILED)
3822
3823static int
3824bnx2_test_loopback(struct bnx2 *bp)
3825{
3826 int rc = 0;
3827
3828 if (!netif_running(bp->dev))
3829 return BNX2_LOOPBACK_FAILED;
3830
3831 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3832 spin_lock_bh(&bp->phy_lock);
3833 bnx2_init_phy(bp);
3834 spin_unlock_bh(&bp->phy_lock);
3835 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3836 rc |= BNX2_MAC_LOOPBACK_FAILED;
3837 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3838 rc |= BNX2_PHY_LOOPBACK_FAILED;
3839 return rc;
3840}
3841
b6016b76
MC
3842#define NVRAM_SIZE 0x200
3843#define CRC32_RESIDUAL 0xdebb20e3
3844
3845static int
3846bnx2_test_nvram(struct bnx2 *bp)
3847{
3848 u32 buf[NVRAM_SIZE / 4];
3849 u8 *data = (u8 *) buf;
3850 int rc = 0;
3851 u32 magic, csum;
3852
3853 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3854 goto test_nvram_done;
3855
3856 magic = be32_to_cpu(buf[0]);
3857 if (magic != 0x669955aa) {
3858 rc = -ENODEV;
3859 goto test_nvram_done;
3860 }
3861
3862 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3863 goto test_nvram_done;
3864
3865 csum = ether_crc_le(0x100, data);
3866 if (csum != CRC32_RESIDUAL) {
3867 rc = -ENODEV;
3868 goto test_nvram_done;
3869 }
3870
3871 csum = ether_crc_le(0x100, data + 0x100);
3872 if (csum != CRC32_RESIDUAL) {
3873 rc = -ENODEV;
3874 }
3875
3876test_nvram_done:
3877 return rc;
3878}
3879
3880static int
3881bnx2_test_link(struct bnx2 *bp)
3882{
3883 u32 bmsr;
3884
c770a65c 3885 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
3886 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3887 bnx2_read_phy(bp, MII_BMSR, &bmsr);
c770a65c 3888 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3889
3890 if (bmsr & BMSR_LSTATUS) {
3891 return 0;
3892 }
3893 return -ENODEV;
3894}
3895
3896static int
3897bnx2_test_intr(struct bnx2 *bp)
3898{
3899 int i;
3900 u32 val;
3901 u16 status_idx;
3902
3903 if (!netif_running(bp->dev))
3904 return -ENODEV;
3905
3906 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3907
3908 /* This register is not touched during run-time. */
3909 val = REG_RD(bp, BNX2_HC_COMMAND);
3910 REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
3911 REG_RD(bp, BNX2_HC_COMMAND);
3912
3913 for (i = 0; i < 10; i++) {
3914 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3915 status_idx) {
3916
3917 break;
3918 }
3919
3920 msleep_interruptible(10);
3921 }
3922 if (i < 10)
3923 return 0;
3924
3925 return -ENODEV;
3926}
3927
3928static void
3929bnx2_timer(unsigned long data)
3930{
3931 struct bnx2 *bp = (struct bnx2 *) data;
3932 u32 msg;
3933
cd339a0e
MC
3934 if (!netif_running(bp->dev))
3935 return;
3936
b6016b76
MC
3937 if (atomic_read(&bp->intr_sem) != 0)
3938 goto bnx2_restart_timer;
3939
3940 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
e3648b3d 3941 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
b6016b76
MC
3942
3943 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3944 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
b6016b76 3945
c770a65c 3946 spin_lock(&bp->phy_lock);
b6016b76
MC
3947 if (bp->serdes_an_pending) {
3948 bp->serdes_an_pending--;
3949 }
3950 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3951 u32 bmcr;
3952
cd339a0e
MC
3953 bp->current_interval = bp->timer_interval;
3954
b6016b76
MC
3955 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3956
3957 if (bmcr & BMCR_ANENABLE) {
3958 u32 phy1, phy2;
3959
3960 bnx2_write_phy(bp, 0x1c, 0x7c00);
3961 bnx2_read_phy(bp, 0x1c, &phy1);
3962
3963 bnx2_write_phy(bp, 0x17, 0x0f01);
3964 bnx2_read_phy(bp, 0x15, &phy2);
3965 bnx2_write_phy(bp, 0x17, 0x0f01);
3966 bnx2_read_phy(bp, 0x15, &phy2);
3967
3968 if ((phy1 & 0x10) && /* SIGNAL DETECT */
3969 !(phy2 & 0x20)) { /* no CONFIG */
3970
3971 bmcr &= ~BMCR_ANENABLE;
3972 bmcr |= BMCR_SPEED1000 |
3973 BMCR_FULLDPLX;
3974 bnx2_write_phy(bp, MII_BMCR, bmcr);
3975 bp->phy_flags |=
3976 PHY_PARALLEL_DETECT_FLAG;
3977 }
3978 }
3979 }
3980 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
3981 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
3982 u32 phy2;
3983
3984 bnx2_write_phy(bp, 0x17, 0x0f01);
3985 bnx2_read_phy(bp, 0x15, &phy2);
3986 if (phy2 & 0x20) {
3987 u32 bmcr;
3988
3989 bnx2_read_phy(bp, MII_BMCR, &bmcr);
3990 bmcr |= BMCR_ANENABLE;
3991 bnx2_write_phy(bp, MII_BMCR, bmcr);
3992
3993 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
3994
3995 }
3996 }
cd339a0e
MC
3997 else
3998 bp->current_interval = bp->timer_interval;
b6016b76 3999
c770a65c 4000 spin_unlock(&bp->phy_lock);
b6016b76
MC
4001 }
4002
4003bnx2_restart_timer:
cd339a0e 4004 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4005}
4006
4007/* Called with rtnl_lock */
4008static int
4009bnx2_open(struct net_device *dev)
4010{
972ec0d4 4011 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4012 int rc;
4013
829ca9a3 4014 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
4015 bnx2_disable_int(bp);
4016
4017 rc = bnx2_alloc_mem(bp);
4018 if (rc)
4019 return rc;
4020
4021 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4022 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4023 !disable_msi) {
4024
4025 if (pci_enable_msi(bp->pdev) == 0) {
4026 bp->flags |= USING_MSI_FLAG;
4027 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4028 dev);
4029 }
4030 else {
4031 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4032 SA_SHIRQ, dev->name, dev);
4033 }
4034 }
4035 else {
4036 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4037 dev->name, dev);
4038 }
4039 if (rc) {
4040 bnx2_free_mem(bp);
4041 return rc;
4042 }
4043
4044 rc = bnx2_init_nic(bp);
4045
4046 if (rc) {
4047 free_irq(bp->pdev->irq, dev);
4048 if (bp->flags & USING_MSI_FLAG) {
4049 pci_disable_msi(bp->pdev);
4050 bp->flags &= ~USING_MSI_FLAG;
4051 }
4052 bnx2_free_skbs(bp);
4053 bnx2_free_mem(bp);
4054 return rc;
4055 }
4056
cd339a0e 4057 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4058
4059 atomic_set(&bp->intr_sem, 0);
4060
4061 bnx2_enable_int(bp);
4062
4063 if (bp->flags & USING_MSI_FLAG) {
4064 /* Test MSI to make sure it is working
4065 * If MSI test fails, go back to INTx mode
4066 */
4067 if (bnx2_test_intr(bp) != 0) {
4068 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4069 " using MSI, switching to INTx mode. Please"
4070 " report this failure to the PCI maintainer"
4071 " and include system chipset information.\n",
4072 bp->dev->name);
4073
4074 bnx2_disable_int(bp);
4075 free_irq(bp->pdev->irq, dev);
4076 pci_disable_msi(bp->pdev);
4077 bp->flags &= ~USING_MSI_FLAG;
4078
4079 rc = bnx2_init_nic(bp);
4080
4081 if (!rc) {
4082 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4083 SA_SHIRQ, dev->name, dev);
4084 }
4085 if (rc) {
4086 bnx2_free_skbs(bp);
4087 bnx2_free_mem(bp);
4088 del_timer_sync(&bp->timer);
4089 return rc;
4090 }
4091 bnx2_enable_int(bp);
4092 }
4093 }
4094 if (bp->flags & USING_MSI_FLAG) {
4095 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4096 }
4097
4098 netif_start_queue(dev);
4099
4100 return 0;
4101}
4102
4103static void
4104bnx2_reset_task(void *data)
4105{
4106 struct bnx2 *bp = data;
4107
afdc08b9
MC
4108 if (!netif_running(bp->dev))
4109 return;
4110
4111 bp->in_reset_task = 1;
b6016b76
MC
4112 bnx2_netif_stop(bp);
4113
4114 bnx2_init_nic(bp);
4115
4116 atomic_set(&bp->intr_sem, 1);
4117 bnx2_netif_start(bp);
afdc08b9 4118 bp->in_reset_task = 0;
b6016b76
MC
4119}
4120
4121static void
4122bnx2_tx_timeout(struct net_device *dev)
4123{
972ec0d4 4124 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4125
4126 /* This allows the netif to be shutdown gracefully before resetting */
4127 schedule_work(&bp->reset_task);
4128}
4129
4130#ifdef BCM_VLAN
4131/* Called with rtnl_lock */
4132static void
4133bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4134{
972ec0d4 4135 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4136
4137 bnx2_netif_stop(bp);
4138
4139 bp->vlgrp = vlgrp;
4140 bnx2_set_rx_mode(dev);
4141
4142 bnx2_netif_start(bp);
4143}
4144
4145/* Called with rtnl_lock */
4146static void
4147bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4148{
972ec0d4 4149 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4150
4151 bnx2_netif_stop(bp);
4152
4153 if (bp->vlgrp)
4154 bp->vlgrp->vlan_devices[vid] = NULL;
4155 bnx2_set_rx_mode(dev);
4156
4157 bnx2_netif_start(bp);
4158}
4159#endif
4160
4161/* Called with dev->xmit_lock.
4162 * hard_start_xmit is pseudo-lockless - a lock is only required when
4163 * the tx queue is full. This way, we get the benefit of lockless
4164 * operations most of the time without the complexities to handle
4165 * netif_stop_queue/wake_queue race conditions.
4166 */
4167static int
4168bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4169{
972ec0d4 4170 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4171 dma_addr_t mapping;
4172 struct tx_bd *txbd;
4173 struct sw_bd *tx_buf;
4174 u32 len, vlan_tag_flags, last_frag, mss;
4175 u16 prod, ring_prod;
4176 int i;
4177
e89bbf10 4178 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
4179 netif_stop_queue(dev);
4180 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4181 dev->name);
4182
4183 return NETDEV_TX_BUSY;
4184 }
4185 len = skb_headlen(skb);
4186 prod = bp->tx_prod;
4187 ring_prod = TX_RING_IDX(prod);
4188
4189 vlan_tag_flags = 0;
4190 if (skb->ip_summed == CHECKSUM_HW) {
4191 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4192 }
4193
4194 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4195 vlan_tag_flags |=
4196 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4197 }
4198#ifdef BCM_TSO
4199 if ((mss = skb_shinfo(skb)->tso_size) &&
4200 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4201 u32 tcp_opt_len, ip_tcp_len;
4202
4203 if (skb_header_cloned(skb) &&
4204 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4205 dev_kfree_skb(skb);
4206 return NETDEV_TX_OK;
4207 }
4208
4209 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4210 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4211
4212 tcp_opt_len = 0;
4213 if (skb->h.th->doff > 5) {
4214 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4215 }
4216 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4217
4218 skb->nh.iph->check = 0;
4219 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4220 skb->h.th->check =
4221 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4222 skb->nh.iph->daddr,
4223 0, IPPROTO_TCP, 0);
4224
4225 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4226 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4227 (tcp_opt_len >> 2)) << 8;
4228 }
4229 }
4230 else
4231#endif
4232 {
4233 mss = 0;
4234 }
4235
4236 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4237
4238 tx_buf = &bp->tx_buf_ring[ring_prod];
4239 tx_buf->skb = skb;
4240 pci_unmap_addr_set(tx_buf, mapping, mapping);
4241
4242 txbd = &bp->tx_desc_ring[ring_prod];
4243
4244 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4245 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4246 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4247 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4248
4249 last_frag = skb_shinfo(skb)->nr_frags;
4250
4251 for (i = 0; i < last_frag; i++) {
4252 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4253
4254 prod = NEXT_TX_BD(prod);
4255 ring_prod = TX_RING_IDX(prod);
4256 txbd = &bp->tx_desc_ring[ring_prod];
4257
4258 len = frag->size;
4259 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4260 len, PCI_DMA_TODEVICE);
4261 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4262 mapping, mapping);
4263
4264 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4265 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4266 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4267 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4268
4269 }
4270 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4271
4272 prod = NEXT_TX_BD(prod);
4273 bp->tx_prod_bseq += skb->len;
4274
b6016b76
MC
4275 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4276 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4277
4278 mmiowb();
4279
4280 bp->tx_prod = prod;
4281 dev->trans_start = jiffies;
4282
e89bbf10 4283 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
c770a65c 4284 spin_lock(&bp->tx_lock);
e89bbf10
MC
4285 netif_stop_queue(dev);
4286
4287 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4288 netif_wake_queue(dev);
c770a65c 4289 spin_unlock(&bp->tx_lock);
b6016b76
MC
4290 }
4291
4292 return NETDEV_TX_OK;
4293}
4294
4295/* Called with rtnl_lock */
4296static int
4297bnx2_close(struct net_device *dev)
4298{
972ec0d4 4299 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4300 u32 reset_code;
4301
afdc08b9
MC
4302 /* Calling flush_scheduled_work() may deadlock because
4303 * linkwatch_event() may be on the workqueue and it will try to get
4304 * the rtnl_lock which we are holding.
4305 */
4306 while (bp->in_reset_task)
4307 msleep(1);
4308
b6016b76
MC
4309 bnx2_netif_stop(bp);
4310 del_timer_sync(&bp->timer);
dda1e390
MC
4311 if (bp->flags & NO_WOL_FLAG)
4312 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4313 else if (bp->wol)
b6016b76
MC
4314 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4315 else
4316 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4317 bnx2_reset_chip(bp, reset_code);
4318 free_irq(bp->pdev->irq, dev);
4319 if (bp->flags & USING_MSI_FLAG) {
4320 pci_disable_msi(bp->pdev);
4321 bp->flags &= ~USING_MSI_FLAG;
4322 }
4323 bnx2_free_skbs(bp);
4324 bnx2_free_mem(bp);
4325 bp->link_up = 0;
4326 netif_carrier_off(bp->dev);
829ca9a3 4327 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
4328 return 0;
4329}
4330
4331#define GET_NET_STATS64(ctr) \
4332 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4333 (unsigned long) (ctr##_lo)
4334
4335#define GET_NET_STATS32(ctr) \
4336 (ctr##_lo)
4337
4338#if (BITS_PER_LONG == 64)
4339#define GET_NET_STATS GET_NET_STATS64
4340#else
4341#define GET_NET_STATS GET_NET_STATS32
4342#endif
4343
4344static struct net_device_stats *
4345bnx2_get_stats(struct net_device *dev)
4346{
972ec0d4 4347 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4348 struct statistics_block *stats_blk = bp->stats_blk;
4349 struct net_device_stats *net_stats = &bp->net_stats;
4350
4351 if (bp->stats_blk == NULL) {
4352 return net_stats;
4353 }
4354 net_stats->rx_packets =
4355 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4356 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4357 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4358
4359 net_stats->tx_packets =
4360 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4361 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4362 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4363
4364 net_stats->rx_bytes =
4365 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4366
4367 net_stats->tx_bytes =
4368 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4369
4370 net_stats->multicast =
4371 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4372
4373 net_stats->collisions =
4374 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4375
4376 net_stats->rx_length_errors =
4377 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4378 stats_blk->stat_EtherStatsOverrsizePkts);
4379
4380 net_stats->rx_over_errors =
4381 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4382
4383 net_stats->rx_frame_errors =
4384 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4385
4386 net_stats->rx_crc_errors =
4387 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4388
4389 net_stats->rx_errors = net_stats->rx_length_errors +
4390 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4391 net_stats->rx_crc_errors;
4392
4393 net_stats->tx_aborted_errors =
4394 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4395 stats_blk->stat_Dot3StatsLateCollisions);
4396
5b0c76ad
MC
4397 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4398 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
4399 net_stats->tx_carrier_errors = 0;
4400 else {
4401 net_stats->tx_carrier_errors =
4402 (unsigned long)
4403 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4404 }
4405
4406 net_stats->tx_errors =
4407 (unsigned long)
4408 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4409 +
4410 net_stats->tx_aborted_errors +
4411 net_stats->tx_carrier_errors;
4412
4413 return net_stats;
4414}
4415
4416/* All ethtool functions called with rtnl_lock */
4417
4418static int
4419bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4420{
972ec0d4 4421 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4422
4423 cmd->supported = SUPPORTED_Autoneg;
4424 if (bp->phy_flags & PHY_SERDES_FLAG) {
4425 cmd->supported |= SUPPORTED_1000baseT_Full |
4426 SUPPORTED_FIBRE;
4427
4428 cmd->port = PORT_FIBRE;
4429 }
4430 else {
4431 cmd->supported |= SUPPORTED_10baseT_Half |
4432 SUPPORTED_10baseT_Full |
4433 SUPPORTED_100baseT_Half |
4434 SUPPORTED_100baseT_Full |
4435 SUPPORTED_1000baseT_Full |
4436 SUPPORTED_TP;
4437
4438 cmd->port = PORT_TP;
4439 }
4440
4441 cmd->advertising = bp->advertising;
4442
4443 if (bp->autoneg & AUTONEG_SPEED) {
4444 cmd->autoneg = AUTONEG_ENABLE;
4445 }
4446 else {
4447 cmd->autoneg = AUTONEG_DISABLE;
4448 }
4449
4450 if (netif_carrier_ok(dev)) {
4451 cmd->speed = bp->line_speed;
4452 cmd->duplex = bp->duplex;
4453 }
4454 else {
4455 cmd->speed = -1;
4456 cmd->duplex = -1;
4457 }
4458
4459 cmd->transceiver = XCVR_INTERNAL;
4460 cmd->phy_address = bp->phy_addr;
4461
4462 return 0;
4463}
4464
4465static int
4466bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4467{
972ec0d4 4468 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4469 u8 autoneg = bp->autoneg;
4470 u8 req_duplex = bp->req_duplex;
4471 u16 req_line_speed = bp->req_line_speed;
4472 u32 advertising = bp->advertising;
4473
4474 if (cmd->autoneg == AUTONEG_ENABLE) {
4475 autoneg |= AUTONEG_SPEED;
4476
4477 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4478
4479 /* allow advertising 1 speed */
4480 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4481 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4482 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4483 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4484
4485 if (bp->phy_flags & PHY_SERDES_FLAG)
4486 return -EINVAL;
4487
4488 advertising = cmd->advertising;
4489
4490 }
4491 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4492 advertising = cmd->advertising;
4493 }
4494 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4495 return -EINVAL;
4496 }
4497 else {
4498 if (bp->phy_flags & PHY_SERDES_FLAG) {
4499 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4500 }
4501 else {
4502 advertising = ETHTOOL_ALL_COPPER_SPEED;
4503 }
4504 }
4505 advertising |= ADVERTISED_Autoneg;
4506 }
4507 else {
4508 if (bp->phy_flags & PHY_SERDES_FLAG) {
4509 if ((cmd->speed != SPEED_1000) ||
4510 (cmd->duplex != DUPLEX_FULL)) {
4511 return -EINVAL;
4512 }
4513 }
4514 else if (cmd->speed == SPEED_1000) {
4515 return -EINVAL;
4516 }
4517 autoneg &= ~AUTONEG_SPEED;
4518 req_line_speed = cmd->speed;
4519 req_duplex = cmd->duplex;
4520 advertising = 0;
4521 }
4522
4523 bp->autoneg = autoneg;
4524 bp->advertising = advertising;
4525 bp->req_line_speed = req_line_speed;
4526 bp->req_duplex = req_duplex;
4527
c770a65c 4528 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4529
4530 bnx2_setup_phy(bp);
4531
c770a65c 4532 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4533
4534 return 0;
4535}
4536
4537static void
4538bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4539{
972ec0d4 4540 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4541
4542 strcpy(info->driver, DRV_MODULE_NAME);
4543 strcpy(info->version, DRV_MODULE_VERSION);
4544 strcpy(info->bus_info, pci_name(bp->pdev));
4545 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4546 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4547 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
206cc83c
MC
4548 info->fw_version[1] = info->fw_version[3] = '.';
4549 info->fw_version[5] = 0;
b6016b76
MC
4550}
4551
244ac4f4
MC
4552#define BNX2_REGDUMP_LEN (32 * 1024)
4553
4554static int
4555bnx2_get_regs_len(struct net_device *dev)
4556{
4557 return BNX2_REGDUMP_LEN;
4558}
4559
4560static void
4561bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4562{
4563 u32 *p = _p, i, offset;
4564 u8 *orig_p = _p;
4565 struct bnx2 *bp = netdev_priv(dev);
4566 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4567 0x0800, 0x0880, 0x0c00, 0x0c10,
4568 0x0c30, 0x0d08, 0x1000, 0x101c,
4569 0x1040, 0x1048, 0x1080, 0x10a4,
4570 0x1400, 0x1490, 0x1498, 0x14f0,
4571 0x1500, 0x155c, 0x1580, 0x15dc,
4572 0x1600, 0x1658, 0x1680, 0x16d8,
4573 0x1800, 0x1820, 0x1840, 0x1854,
4574 0x1880, 0x1894, 0x1900, 0x1984,
4575 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4576 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4577 0x2000, 0x2030, 0x23c0, 0x2400,
4578 0x2800, 0x2820, 0x2830, 0x2850,
4579 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4580 0x3c00, 0x3c94, 0x4000, 0x4010,
4581 0x4080, 0x4090, 0x43c0, 0x4458,
4582 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4583 0x4fc0, 0x5010, 0x53c0, 0x5444,
4584 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4585 0x5fc0, 0x6000, 0x6400, 0x6428,
4586 0x6800, 0x6848, 0x684c, 0x6860,
4587 0x6888, 0x6910, 0x8000 };
4588
4589 regs->version = 0;
4590
4591 memset(p, 0, BNX2_REGDUMP_LEN);
4592
4593 if (!netif_running(bp->dev))
4594 return;
4595
4596 i = 0;
4597 offset = reg_boundaries[0];
4598 p += offset;
4599 while (offset < BNX2_REGDUMP_LEN) {
4600 *p++ = REG_RD(bp, offset);
4601 offset += 4;
4602 if (offset == reg_boundaries[i + 1]) {
4603 offset = reg_boundaries[i + 2];
4604 p = (u32 *) (orig_p + offset);
4605 i += 2;
4606 }
4607 }
4608}
4609
b6016b76
MC
4610static void
4611bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4612{
972ec0d4 4613 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4614
4615 if (bp->flags & NO_WOL_FLAG) {
4616 wol->supported = 0;
4617 wol->wolopts = 0;
4618 }
4619 else {
4620 wol->supported = WAKE_MAGIC;
4621 if (bp->wol)
4622 wol->wolopts = WAKE_MAGIC;
4623 else
4624 wol->wolopts = 0;
4625 }
4626 memset(&wol->sopass, 0, sizeof(wol->sopass));
4627}
4628
4629static int
4630bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4631{
972ec0d4 4632 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4633
4634 if (wol->wolopts & ~WAKE_MAGIC)
4635 return -EINVAL;
4636
4637 if (wol->wolopts & WAKE_MAGIC) {
4638 if (bp->flags & NO_WOL_FLAG)
4639 return -EINVAL;
4640
4641 bp->wol = 1;
4642 }
4643 else {
4644 bp->wol = 0;
4645 }
4646 return 0;
4647}
4648
4649static int
4650bnx2_nway_reset(struct net_device *dev)
4651{
972ec0d4 4652 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4653 u32 bmcr;
4654
4655 if (!(bp->autoneg & AUTONEG_SPEED)) {
4656 return -EINVAL;
4657 }
4658
c770a65c 4659 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4660
4661 /* Force a link down visible on the other side */
4662 if (bp->phy_flags & PHY_SERDES_FLAG) {
4663 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
c770a65c 4664 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4665
4666 msleep(20);
4667
c770a65c 4668 spin_lock_bh(&bp->phy_lock);
b6016b76 4669 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
cd339a0e
MC
4670 bp->current_interval = SERDES_AN_TIMEOUT;
4671 bp->serdes_an_pending = 1;
4672 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4673 }
4674 }
4675
4676 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4677 bmcr &= ~BMCR_LOOPBACK;
4678 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4679
c770a65c 4680 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4681
4682 return 0;
4683}
4684
4685static int
4686bnx2_get_eeprom_len(struct net_device *dev)
4687{
972ec0d4 4688 struct bnx2 *bp = netdev_priv(dev);
b6016b76 4689
1122db71 4690 if (bp->flash_info == NULL)
b6016b76
MC
4691 return 0;
4692
1122db71 4693 return (int) bp->flash_size;
b6016b76
MC
4694}
4695
4696static int
4697bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4698 u8 *eebuf)
4699{
972ec0d4 4700 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4701 int rc;
4702
1064e944 4703 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
4704
4705 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4706
4707 return rc;
4708}
4709
4710static int
4711bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4712 u8 *eebuf)
4713{
972ec0d4 4714 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4715 int rc;
4716
1064e944 4717 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
4718
4719 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4720
4721 return rc;
4722}
4723
4724static int
4725bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4726{
972ec0d4 4727 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4728
4729 memset(coal, 0, sizeof(struct ethtool_coalesce));
4730
4731 coal->rx_coalesce_usecs = bp->rx_ticks;
4732 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4733 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4734 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4735
4736 coal->tx_coalesce_usecs = bp->tx_ticks;
4737 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4738 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4739 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4740
4741 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4742
4743 return 0;
4744}
4745
4746static int
4747bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4748{
972ec0d4 4749 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4750
4751 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4752 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4753
4754 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4755 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4756
4757 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4758 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4759
4760 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4761 if (bp->rx_quick_cons_trip_int > 0xff)
4762 bp->rx_quick_cons_trip_int = 0xff;
4763
4764 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4765 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4766
4767 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4768 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4769
4770 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4771 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4772
4773 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4774 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4775 0xff;
4776
4777 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4778 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4779 bp->stats_ticks &= 0xffff00;
4780
4781 if (netif_running(bp->dev)) {
4782 bnx2_netif_stop(bp);
4783 bnx2_init_nic(bp);
4784 bnx2_netif_start(bp);
4785 }
4786
4787 return 0;
4788}
4789
4790static void
4791bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4792{
972ec0d4 4793 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4794
4795 ering->rx_max_pending = MAX_RX_DESC_CNT;
4796 ering->rx_mini_max_pending = 0;
4797 ering->rx_jumbo_max_pending = 0;
4798
4799 ering->rx_pending = bp->rx_ring_size;
4800 ering->rx_mini_pending = 0;
4801 ering->rx_jumbo_pending = 0;
4802
4803 ering->tx_max_pending = MAX_TX_DESC_CNT;
4804 ering->tx_pending = bp->tx_ring_size;
4805}
4806
4807static int
4808bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4809{
972ec0d4 4810 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4811
4812 if ((ering->rx_pending > MAX_RX_DESC_CNT) ||
4813 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4814 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4815
4816 return -EINVAL;
4817 }
4818 bp->rx_ring_size = ering->rx_pending;
4819 bp->tx_ring_size = ering->tx_pending;
4820
4821 if (netif_running(bp->dev)) {
4822 bnx2_netif_stop(bp);
4823 bnx2_init_nic(bp);
4824 bnx2_netif_start(bp);
4825 }
4826
4827 return 0;
4828}
4829
4830static void
4831bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4832{
972ec0d4 4833 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4834
4835 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4836 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4837 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4838}
4839
4840static int
4841bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4842{
972ec0d4 4843 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4844
4845 bp->req_flow_ctrl = 0;
4846 if (epause->rx_pause)
4847 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4848 if (epause->tx_pause)
4849 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4850
4851 if (epause->autoneg) {
4852 bp->autoneg |= AUTONEG_FLOW_CTRL;
4853 }
4854 else {
4855 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4856 }
4857
c770a65c 4858 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4859
4860 bnx2_setup_phy(bp);
4861
c770a65c 4862 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4863
4864 return 0;
4865}
4866
4867static u32
4868bnx2_get_rx_csum(struct net_device *dev)
4869{
972ec0d4 4870 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4871
4872 return bp->rx_csum;
4873}
4874
4875static int
4876bnx2_set_rx_csum(struct net_device *dev, u32 data)
4877{
972ec0d4 4878 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4879
4880 bp->rx_csum = data;
4881 return 0;
4882}
4883
4884#define BNX2_NUM_STATS 45
4885
14ab9b86 4886static struct {
b6016b76
MC
4887 char string[ETH_GSTRING_LEN];
4888} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4889 { "rx_bytes" },
4890 { "rx_error_bytes" },
4891 { "tx_bytes" },
4892 { "tx_error_bytes" },
4893 { "rx_ucast_packets" },
4894 { "rx_mcast_packets" },
4895 { "rx_bcast_packets" },
4896 { "tx_ucast_packets" },
4897 { "tx_mcast_packets" },
4898 { "tx_bcast_packets" },
4899 { "tx_mac_errors" },
4900 { "tx_carrier_errors" },
4901 { "rx_crc_errors" },
4902 { "rx_align_errors" },
4903 { "tx_single_collisions" },
4904 { "tx_multi_collisions" },
4905 { "tx_deferred" },
4906 { "tx_excess_collisions" },
4907 { "tx_late_collisions" },
4908 { "tx_total_collisions" },
4909 { "rx_fragments" },
4910 { "rx_jabbers" },
4911 { "rx_undersize_packets" },
4912 { "rx_oversize_packets" },
4913 { "rx_64_byte_packets" },
4914 { "rx_65_to_127_byte_packets" },
4915 { "rx_128_to_255_byte_packets" },
4916 { "rx_256_to_511_byte_packets" },
4917 { "rx_512_to_1023_byte_packets" },
4918 { "rx_1024_to_1522_byte_packets" },
4919 { "rx_1523_to_9022_byte_packets" },
4920 { "tx_64_byte_packets" },
4921 { "tx_65_to_127_byte_packets" },
4922 { "tx_128_to_255_byte_packets" },
4923 { "tx_256_to_511_byte_packets" },
4924 { "tx_512_to_1023_byte_packets" },
4925 { "tx_1024_to_1522_byte_packets" },
4926 { "tx_1523_to_9022_byte_packets" },
4927 { "rx_xon_frames" },
4928 { "rx_xoff_frames" },
4929 { "tx_xon_frames" },
4930 { "tx_xoff_frames" },
4931 { "rx_mac_ctrl_frames" },
4932 { "rx_filtered_packets" },
4933 { "rx_discards" },
4934};
4935
4936#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4937
f71e1309 4938static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
4939 STATS_OFFSET32(stat_IfHCInOctets_hi),
4940 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4941 STATS_OFFSET32(stat_IfHCOutOctets_hi),
4942 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
4943 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
4944 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
4945 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
4946 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
4947 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
4948 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
4949 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
4950 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
4951 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
4952 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
4953 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
4954 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
4955 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
4956 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
4957 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
4958 STATS_OFFSET32(stat_EtherStatsCollisions),
4959 STATS_OFFSET32(stat_EtherStatsFragments),
4960 STATS_OFFSET32(stat_EtherStatsJabbers),
4961 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
4962 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
4963 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
4964 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
4965 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
4966 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
4967 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
4968 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
4969 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
4970 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
4971 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
4972 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
4973 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
4974 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
4975 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
4976 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
4977 STATS_OFFSET32(stat_XonPauseFramesReceived),
4978 STATS_OFFSET32(stat_XoffPauseFramesReceived),
4979 STATS_OFFSET32(stat_OutXonSent),
4980 STATS_OFFSET32(stat_OutXoffSent),
4981 STATS_OFFSET32(stat_MacControlFramesReceived),
4982 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
4983 STATS_OFFSET32(stat_IfInMBUFDiscards),
4984};
4985
4986/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
4987 * skipped because of errata.
4988 */
14ab9b86 4989static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
4990 8,0,8,8,8,8,8,8,8,8,
4991 4,0,4,4,4,4,4,4,4,4,
4992 4,4,4,4,4,4,4,4,4,4,
4993 4,4,4,4,4,4,4,4,4,4,
4994 4,4,4,4,4,
4995};
4996
5b0c76ad
MC
4997static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
4998 8,0,8,8,8,8,8,8,8,8,
4999 4,4,4,4,4,4,4,4,4,4,
5000 4,4,4,4,4,4,4,4,4,4,
5001 4,4,4,4,4,4,4,4,4,4,
5002 4,4,4,4,4,
5003};
5004
b6016b76
MC
5005#define BNX2_NUM_TESTS 6
5006
14ab9b86 5007static struct {
b6016b76
MC
5008 char string[ETH_GSTRING_LEN];
5009} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5010 { "register_test (offline)" },
5011 { "memory_test (offline)" },
5012 { "loopback_test (offline)" },
5013 { "nvram_test (online)" },
5014 { "interrupt_test (online)" },
5015 { "link_test (online)" },
5016};
5017
5018static int
5019bnx2_self_test_count(struct net_device *dev)
5020{
5021 return BNX2_NUM_TESTS;
5022}
5023
5024static void
5025bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5026{
972ec0d4 5027 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5028
5029 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5030 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5031 bnx2_netif_stop(bp);
5032 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5033 bnx2_free_skbs(bp);
5034
5035 if (bnx2_test_registers(bp) != 0) {
5036 buf[0] = 1;
5037 etest->flags |= ETH_TEST_FL_FAILED;
5038 }
5039 if (bnx2_test_memory(bp) != 0) {
5040 buf[1] = 1;
5041 etest->flags |= ETH_TEST_FL_FAILED;
5042 }
bc5a0690 5043 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 5044 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
5045
5046 if (!netif_running(bp->dev)) {
5047 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5048 }
5049 else {
5050 bnx2_init_nic(bp);
5051 bnx2_netif_start(bp);
5052 }
5053
5054 /* wait for link up */
5055 msleep_interruptible(3000);
5056 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5057 msleep_interruptible(4000);
5058 }
5059
5060 if (bnx2_test_nvram(bp) != 0) {
5061 buf[3] = 1;
5062 etest->flags |= ETH_TEST_FL_FAILED;
5063 }
5064 if (bnx2_test_intr(bp) != 0) {
5065 buf[4] = 1;
5066 etest->flags |= ETH_TEST_FL_FAILED;
5067 }
5068
5069 if (bnx2_test_link(bp) != 0) {
5070 buf[5] = 1;
5071 etest->flags |= ETH_TEST_FL_FAILED;
5072
5073 }
5074}
5075
5076static void
5077bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5078{
5079 switch (stringset) {
5080 case ETH_SS_STATS:
5081 memcpy(buf, bnx2_stats_str_arr,
5082 sizeof(bnx2_stats_str_arr));
5083 break;
5084 case ETH_SS_TEST:
5085 memcpy(buf, bnx2_tests_str_arr,
5086 sizeof(bnx2_tests_str_arr));
5087 break;
5088 }
5089}
5090
5091static int
5092bnx2_get_stats_count(struct net_device *dev)
5093{
5094 return BNX2_NUM_STATS;
5095}
5096
5097static void
5098bnx2_get_ethtool_stats(struct net_device *dev,
5099 struct ethtool_stats *stats, u64 *buf)
5100{
972ec0d4 5101 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5102 int i;
5103 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 5104 u8 *stats_len_arr = NULL;
b6016b76
MC
5105
5106 if (hw_stats == NULL) {
5107 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5108 return;
5109 }
5110
5b0c76ad
MC
5111 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5112 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5113 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5114 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 5115 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
5116 else
5117 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
5118
5119 for (i = 0; i < BNX2_NUM_STATS; i++) {
5120 if (stats_len_arr[i] == 0) {
5121 /* skip this counter */
5122 buf[i] = 0;
5123 continue;
5124 }
5125 if (stats_len_arr[i] == 4) {
5126 /* 4-byte counter */
5127 buf[i] = (u64)
5128 *(hw_stats + bnx2_stats_offset_arr[i]);
5129 continue;
5130 }
5131 /* 8-byte counter */
5132 buf[i] = (((u64) *(hw_stats +
5133 bnx2_stats_offset_arr[i])) << 32) +
5134 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5135 }
5136}
5137
5138static int
5139bnx2_phys_id(struct net_device *dev, u32 data)
5140{
972ec0d4 5141 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5142 int i;
5143 u32 save;
5144
5145 if (data == 0)
5146 data = 2;
5147
5148 save = REG_RD(bp, BNX2_MISC_CFG);
5149 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5150
5151 for (i = 0; i < (data * 2); i++) {
5152 if ((i % 2) == 0) {
5153 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5154 }
5155 else {
5156 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5157 BNX2_EMAC_LED_1000MB_OVERRIDE |
5158 BNX2_EMAC_LED_100MB_OVERRIDE |
5159 BNX2_EMAC_LED_10MB_OVERRIDE |
5160 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5161 BNX2_EMAC_LED_TRAFFIC);
5162 }
5163 msleep_interruptible(500);
5164 if (signal_pending(current))
5165 break;
5166 }
5167 REG_WR(bp, BNX2_EMAC_LED, 0);
5168 REG_WR(bp, BNX2_MISC_CFG, save);
5169 return 0;
5170}
5171
5172static struct ethtool_ops bnx2_ethtool_ops = {
5173 .get_settings = bnx2_get_settings,
5174 .set_settings = bnx2_set_settings,
5175 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
5176 .get_regs_len = bnx2_get_regs_len,
5177 .get_regs = bnx2_get_regs,
b6016b76
MC
5178 .get_wol = bnx2_get_wol,
5179 .set_wol = bnx2_set_wol,
5180 .nway_reset = bnx2_nway_reset,
5181 .get_link = ethtool_op_get_link,
5182 .get_eeprom_len = bnx2_get_eeprom_len,
5183 .get_eeprom = bnx2_get_eeprom,
5184 .set_eeprom = bnx2_set_eeprom,
5185 .get_coalesce = bnx2_get_coalesce,
5186 .set_coalesce = bnx2_set_coalesce,
5187 .get_ringparam = bnx2_get_ringparam,
5188 .set_ringparam = bnx2_set_ringparam,
5189 .get_pauseparam = bnx2_get_pauseparam,
5190 .set_pauseparam = bnx2_set_pauseparam,
5191 .get_rx_csum = bnx2_get_rx_csum,
5192 .set_rx_csum = bnx2_set_rx_csum,
5193 .get_tx_csum = ethtool_op_get_tx_csum,
5194 .set_tx_csum = ethtool_op_set_tx_csum,
5195 .get_sg = ethtool_op_get_sg,
5196 .set_sg = ethtool_op_set_sg,
5197#ifdef BCM_TSO
5198 .get_tso = ethtool_op_get_tso,
5199 .set_tso = ethtool_op_set_tso,
5200#endif
5201 .self_test_count = bnx2_self_test_count,
5202 .self_test = bnx2_self_test,
5203 .get_strings = bnx2_get_strings,
5204 .phys_id = bnx2_phys_id,
5205 .get_stats_count = bnx2_get_stats_count,
5206 .get_ethtool_stats = bnx2_get_ethtool_stats,
24b8e05d 5207 .get_perm_addr = ethtool_op_get_perm_addr,
b6016b76
MC
5208};
5209
5210/* Called with rtnl_lock */
5211static int
5212bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5213{
14ab9b86 5214 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 5215 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5216 int err;
5217
5218 switch(cmd) {
5219 case SIOCGMIIPHY:
5220 data->phy_id = bp->phy_addr;
5221
5222 /* fallthru */
5223 case SIOCGMIIREG: {
5224 u32 mii_regval;
5225
c770a65c 5226 spin_lock_bh(&bp->phy_lock);
b6016b76 5227 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 5228 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5229
5230 data->val_out = mii_regval;
5231
5232 return err;
5233 }
5234
5235 case SIOCSMIIREG:
5236 if (!capable(CAP_NET_ADMIN))
5237 return -EPERM;
5238
c770a65c 5239 spin_lock_bh(&bp->phy_lock);
b6016b76 5240 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 5241 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5242
5243 return err;
5244
5245 default:
5246 /* do nothing */
5247 break;
5248 }
5249 return -EOPNOTSUPP;
5250}
5251
5252/* Called with rtnl_lock */
5253static int
5254bnx2_change_mac_addr(struct net_device *dev, void *p)
5255{
5256 struct sockaddr *addr = p;
972ec0d4 5257 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5258
73eef4cd
MC
5259 if (!is_valid_ether_addr(addr->sa_data))
5260 return -EINVAL;
5261
b6016b76
MC
5262 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5263 if (netif_running(dev))
5264 bnx2_set_mac_addr(bp);
5265
5266 return 0;
5267}
5268
5269/* Called with rtnl_lock */
5270static int
5271bnx2_change_mtu(struct net_device *dev, int new_mtu)
5272{
972ec0d4 5273 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5274
5275 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5276 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5277 return -EINVAL;
5278
5279 dev->mtu = new_mtu;
5280 if (netif_running(dev)) {
5281 bnx2_netif_stop(bp);
5282
5283 bnx2_init_nic(bp);
5284
5285 bnx2_netif_start(bp);
5286 }
5287 return 0;
5288}
5289
5290#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5291static void
5292poll_bnx2(struct net_device *dev)
5293{
972ec0d4 5294 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5295
5296 disable_irq(bp->pdev->irq);
5297 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5298 enable_irq(bp->pdev->irq);
5299}
5300#endif
5301
5302static int __devinit
5303bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5304{
5305 struct bnx2 *bp;
5306 unsigned long mem_len;
5307 int rc;
5308 u32 reg;
5309
5310 SET_MODULE_OWNER(dev);
5311 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 5312 bp = netdev_priv(dev);
b6016b76
MC
5313
5314 bp->flags = 0;
5315 bp->phy_flags = 0;
5316
5317 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5318 rc = pci_enable_device(pdev);
5319 if (rc) {
5320 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5321 goto err_out;
5322 }
5323
5324 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5325 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5326 "aborting.\n");
5327 rc = -ENODEV;
5328 goto err_out_disable;
5329 }
5330
5331 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5332 if (rc) {
5333 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5334 goto err_out_disable;
5335 }
5336
5337 pci_set_master(pdev);
5338
5339 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5340 if (bp->pm_cap == 0) {
5341 printk(KERN_ERR PFX "Cannot find power management capability, "
5342 "aborting.\n");
5343 rc = -EIO;
5344 goto err_out_release;
5345 }
5346
5347 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5348 if (bp->pcix_cap == 0) {
5349 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5350 rc = -EIO;
5351 goto err_out_release;
5352 }
5353
5354 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5355 bp->flags |= USING_DAC_FLAG;
5356 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5357 printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5358 "failed, aborting.\n");
5359 rc = -EIO;
5360 goto err_out_release;
5361 }
5362 }
5363 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5364 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5365 rc = -EIO;
5366 goto err_out_release;
5367 }
5368
5369 bp->dev = dev;
5370 bp->pdev = pdev;
5371
5372 spin_lock_init(&bp->phy_lock);
5373 spin_lock_init(&bp->tx_lock);
5374 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5375
5376 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5377 mem_len = MB_GET_CID_ADDR(17);
5378 dev->mem_end = dev->mem_start + mem_len;
5379 dev->irq = pdev->irq;
5380
5381 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5382
5383 if (!bp->regview) {
5384 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5385 rc = -ENOMEM;
5386 goto err_out_release;
5387 }
5388
5389 /* Configure byte swap and enable write to the reg_window registers.
5390 * Rely on CPU to do target byte swapping on big endian systems
5391 * The chip's target access swapping will not swap all accesses
5392 */
5393 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5394 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5395 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5396
829ca9a3 5397 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5398
5399 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5400
b6016b76
MC
5401 /* Get bus information. */
5402 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5403 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5404 u32 clkreg;
5405
5406 bp->flags |= PCIX_FLAG;
5407
5408 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5409
5410 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5411 switch (clkreg) {
5412 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5413 bp->bus_speed_mhz = 133;
5414 break;
5415
5416 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5417 bp->bus_speed_mhz = 100;
5418 break;
5419
5420 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5421 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5422 bp->bus_speed_mhz = 66;
5423 break;
5424
5425 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5426 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5427 bp->bus_speed_mhz = 50;
5428 break;
5429
5430 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5431 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5432 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5433 bp->bus_speed_mhz = 33;
5434 break;
5435 }
5436 }
5437 else {
5438 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5439 bp->bus_speed_mhz = 66;
5440 else
5441 bp->bus_speed_mhz = 33;
5442 }
5443
5444 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5445 bp->flags |= PCI_32BIT_FLAG;
5446
5447 /* 5706A0 may falsely detect SERR and PERR. */
5448 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5449 reg = REG_RD(bp, PCI_COMMAND);
5450 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5451 REG_WR(bp, PCI_COMMAND, reg);
5452 }
5453 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5454 !(bp->flags & PCIX_FLAG)) {
5455
5456 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5457 "aborting.\n");
5458 goto err_out_unmap;
5459 }
5460
5461 bnx2_init_nvram(bp);
5462
e3648b3d
MC
5463 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5464
5465 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5466 BNX2_SHM_HDR_SIGNATURE_SIG)
5467 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5468 else
5469 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5470
b6016b76
MC
5471 /* Get the permanent MAC address. First we need to make sure the
5472 * firmware is actually running.
5473 */
e3648b3d 5474 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
5475
5476 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5477 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5478 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5479 rc = -ENODEV;
5480 goto err_out_unmap;
5481 }
5482
e3648b3d 5483 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
b6016b76 5484
e3648b3d 5485 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
5486 bp->mac_addr[0] = (u8) (reg >> 8);
5487 bp->mac_addr[1] = (u8) reg;
5488
e3648b3d 5489 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
5490 bp->mac_addr[2] = (u8) (reg >> 24);
5491 bp->mac_addr[3] = (u8) (reg >> 16);
5492 bp->mac_addr[4] = (u8) (reg >> 8);
5493 bp->mac_addr[5] = (u8) reg;
5494
5495 bp->tx_ring_size = MAX_TX_DESC_CNT;
5496 bp->rx_ring_size = 100;
5497
5498 bp->rx_csum = 1;
5499
5500 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5501
5502 bp->tx_quick_cons_trip_int = 20;
5503 bp->tx_quick_cons_trip = 20;
5504 bp->tx_ticks_int = 80;
5505 bp->tx_ticks = 80;
5506
5507 bp->rx_quick_cons_trip_int = 6;
5508 bp->rx_quick_cons_trip = 6;
5509 bp->rx_ticks_int = 18;
5510 bp->rx_ticks = 18;
5511
5512 bp->stats_ticks = 1000000 & 0xffff00;
5513
5514 bp->timer_interval = HZ;
cd339a0e 5515 bp->current_interval = HZ;
b6016b76 5516
5b0c76ad
MC
5517 bp->phy_addr = 1;
5518
b6016b76
MC
5519 /* Disable WOL support if we are running on a SERDES chip. */
5520 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5521 bp->phy_flags |= PHY_SERDES_FLAG;
5522 bp->flags |= NO_WOL_FLAG;
5b0c76ad
MC
5523 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5524 bp->phy_addr = 2;
e3648b3d 5525 reg = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
5526 BNX2_SHARED_HW_CFG_CONFIG);
5527 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5528 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5529 }
b6016b76
MC
5530 }
5531
dda1e390
MC
5532 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5533 bp->flags |= NO_WOL_FLAG;
5534
b6016b76
MC
5535 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5536 bp->tx_quick_cons_trip_int =
5537 bp->tx_quick_cons_trip;
5538 bp->tx_ticks_int = bp->tx_ticks;
5539 bp->rx_quick_cons_trip_int =
5540 bp->rx_quick_cons_trip;
5541 bp->rx_ticks_int = bp->rx_ticks;
5542 bp->comp_prod_trip_int = bp->comp_prod_trip;
5543 bp->com_ticks_int = bp->com_ticks;
5544 bp->cmd_ticks_int = bp->cmd_ticks;
5545 }
5546
5547 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5548 bp->req_line_speed = 0;
5549 if (bp->phy_flags & PHY_SERDES_FLAG) {
5550 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
cd339a0e 5551
e3648b3d 5552 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
cd339a0e
MC
5553 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5554 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5555 bp->autoneg = 0;
5556 bp->req_line_speed = bp->line_speed = SPEED_1000;
5557 bp->req_duplex = DUPLEX_FULL;
5558 }
b6016b76
MC
5559 }
5560 else {
5561 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5562 }
5563
5564 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5565
cd339a0e
MC
5566 init_timer(&bp->timer);
5567 bp->timer.expires = RUN_AT(bp->timer_interval);
5568 bp->timer.data = (unsigned long) bp;
5569 bp->timer.function = bnx2_timer;
5570
b6016b76
MC
5571 return 0;
5572
5573err_out_unmap:
5574 if (bp->regview) {
5575 iounmap(bp->regview);
73eef4cd 5576 bp->regview = NULL;
b6016b76
MC
5577 }
5578
5579err_out_release:
5580 pci_release_regions(pdev);
5581
5582err_out_disable:
5583 pci_disable_device(pdev);
5584 pci_set_drvdata(pdev, NULL);
5585
5586err_out:
5587 return rc;
5588}
5589
5590static int __devinit
5591bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5592{
5593 static int version_printed = 0;
5594 struct net_device *dev = NULL;
5595 struct bnx2 *bp;
5596 int rc, i;
5597
5598 if (version_printed++ == 0)
5599 printk(KERN_INFO "%s", version);
5600
5601 /* dev zeroed in init_etherdev */
5602 dev = alloc_etherdev(sizeof(*bp));
5603
5604 if (!dev)
5605 return -ENOMEM;
5606
5607 rc = bnx2_init_board(pdev, dev);
5608 if (rc < 0) {
5609 free_netdev(dev);
5610 return rc;
5611 }
5612
5613 dev->open = bnx2_open;
5614 dev->hard_start_xmit = bnx2_start_xmit;
5615 dev->stop = bnx2_close;
5616 dev->get_stats = bnx2_get_stats;
5617 dev->set_multicast_list = bnx2_set_rx_mode;
5618 dev->do_ioctl = bnx2_ioctl;
5619 dev->set_mac_address = bnx2_change_mac_addr;
5620 dev->change_mtu = bnx2_change_mtu;
5621 dev->tx_timeout = bnx2_tx_timeout;
5622 dev->watchdog_timeo = TX_TIMEOUT;
5623#ifdef BCM_VLAN
5624 dev->vlan_rx_register = bnx2_vlan_rx_register;
5625 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5626#endif
5627 dev->poll = bnx2_poll;
5628 dev->ethtool_ops = &bnx2_ethtool_ops;
5629 dev->weight = 64;
5630
972ec0d4 5631 bp = netdev_priv(dev);
b6016b76
MC
5632
5633#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5634 dev->poll_controller = poll_bnx2;
5635#endif
5636
5637 if ((rc = register_netdev(dev))) {
5638 printk(KERN_ERR PFX "Cannot register net device\n");
5639 if (bp->regview)
5640 iounmap(bp->regview);
5641 pci_release_regions(pdev);
5642 pci_disable_device(pdev);
5643 pci_set_drvdata(pdev, NULL);
5644 free_netdev(dev);
5645 return rc;
5646 }
5647
5648 pci_set_drvdata(pdev, dev);
5649
5650 memcpy(dev->dev_addr, bp->mac_addr, 6);
24b8e05d 5651 memcpy(dev->perm_addr, bp->mac_addr, 6);
b6016b76
MC
5652 bp->name = board_info[ent->driver_data].name,
5653 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5654 "IRQ %d, ",
5655 dev->name,
5656 bp->name,
5657 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5658 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5659 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5660 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5661 bp->bus_speed_mhz,
5662 dev->base_addr,
5663 bp->pdev->irq);
5664
5665 printk("node addr ");
5666 for (i = 0; i < 6; i++)
5667 printk("%2.2x", dev->dev_addr[i]);
5668 printk("\n");
5669
5670 dev->features |= NETIF_F_SG;
5671 if (bp->flags & USING_DAC_FLAG)
5672 dev->features |= NETIF_F_HIGHDMA;
5673 dev->features |= NETIF_F_IP_CSUM;
5674#ifdef BCM_VLAN
5675 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5676#endif
5677#ifdef BCM_TSO
5678 dev->features |= NETIF_F_TSO;
5679#endif
5680
5681 netif_carrier_off(bp->dev);
5682
5683 return 0;
5684}
5685
5686static void __devexit
5687bnx2_remove_one(struct pci_dev *pdev)
5688{
5689 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 5690 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5691
afdc08b9
MC
5692 flush_scheduled_work();
5693
b6016b76
MC
5694 unregister_netdev(dev);
5695
5696 if (bp->regview)
5697 iounmap(bp->regview);
5698
5699 free_netdev(dev);
5700 pci_release_regions(pdev);
5701 pci_disable_device(pdev);
5702 pci_set_drvdata(pdev, NULL);
5703}
5704
5705static int
829ca9a3 5706bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
5707{
5708 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 5709 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5710 u32 reset_code;
5711
5712 if (!netif_running(dev))
5713 return 0;
5714
5715 bnx2_netif_stop(bp);
5716 netif_device_detach(dev);
5717 del_timer_sync(&bp->timer);
dda1e390
MC
5718 if (bp->flags & NO_WOL_FLAG)
5719 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5720 else if (bp->wol)
b6016b76
MC
5721 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5722 else
5723 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5724 bnx2_reset_chip(bp, reset_code);
5725 bnx2_free_skbs(bp);
829ca9a3 5726 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
5727 return 0;
5728}
5729
5730static int
5731bnx2_resume(struct pci_dev *pdev)
5732{
5733 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 5734 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5735
5736 if (!netif_running(dev))
5737 return 0;
5738
829ca9a3 5739 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5740 netif_device_attach(dev);
5741 bnx2_init_nic(bp);
5742 bnx2_netif_start(bp);
5743 return 0;
5744}
5745
5746static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
5747 .name = DRV_MODULE_NAME,
5748 .id_table = bnx2_pci_tbl,
5749 .probe = bnx2_init_one,
5750 .remove = __devexit_p(bnx2_remove_one),
5751 .suspend = bnx2_suspend,
5752 .resume = bnx2_resume,
b6016b76
MC
5753};
5754
5755static int __init bnx2_init(void)
5756{
5757 return pci_module_init(&bnx2_pci_driver);
5758}
5759
5760static void __exit bnx2_cleanup(void)
5761{
5762 pci_unregister_driver(&bnx2_pci_driver);
5763}
5764
5765module_init(bnx2_init);
5766module_exit(bnx2_cleanup);
5767
5768
5769