]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/bnx2.c
[BNX2]: Move tx indexes into bnx2_napi struct.
[mirror_ubuntu-bionic-kernel.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
72fbaeb6 3 * Copyright (c) 2004-2007 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
1977f032 29#include <linux/bitops.h>
f2a4f052
MC
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
f2a4f052 42#include <net/ip.h>
de081fa5 43#include <net/tcp.h>
f2a4f052 44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
fba9fe91 49#include <linux/zlib.h>
f2a4f052 50
b6016b76
MC
51#include "bnx2.h"
52#include "bnx2_fw.h"
d43584c8 53#include "bnx2_fw2.h"
b6016b76 54
110d0ef9 55#define FW_BUF_SIZE 0x10000
b3448b0b 56
b6016b76
MC
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
a0d142c6
MC
59#define DRV_MODULE_VERSION "1.7.0"
60#define DRV_MODULE_RELDATE "December 11, 2007"
b6016b76
MC
61
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
e19360f2 67static const char version[] __devinitdata =
b6016b76
MC
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 71MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
72MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
5b0c76ad
MC
86 BCM5708,
87 BCM5708S,
bac0dff6 88 BCM5709,
27a005b8 89 BCM5709S,
b6016b76
MC
90} board_t;
91
92/* indexed by board_t, above */
f71e1309 93static const struct {
b6016b76
MC
94 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
b6016b76
MC
105 };
106
107static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
b6016b76
MC
126 { 0, }
127};
128
129static struct flash_spec flash_table[] =
130{
e30372c9
MC
131#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 133 /* Slow EEPROM */
37137709 134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
37137709
MC
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
b6016b76
MC
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
37137709 145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
37137709 151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
37137709
MC
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
b6016b76
MC
216};
217
e30372c9
MC
218static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225};
226
b6016b76
MC
227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
a550c99b 229static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
e89bbf10 230{
2f8af120 231 u32 diff;
e89bbf10 232
2f8af120 233 smp_mb();
faac9c4b
MC
234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
a550c99b 238 diff = bp->tx_prod - bnapi->tx_cons;
faac9c4b
MC
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
e89bbf10
MC
244 return (bp->tx_ring_size - diff);
245}
246
b6016b76
MC
247static u32
248bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249{
1b8227c4
MC
250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
b6016b76 253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
b6016b76
MC
257}
258
259static void
260bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261{
1b8227c4 262 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 265 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
266}
267
268static void
269bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270{
271 offset += cid_addr;
1b8227c4 272 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
1b8227c4 290 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
291}
292
293static int
294bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295{
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348}
349
350static int
351bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352{
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 370
b6016b76
MC
371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397}
398
399static void
400bnx2_disable_int(struct bnx2 *bp)
401{
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405}
406
407static void
408bnx2_enable_int(struct bnx2 *bp)
409{
35efa7c1
MC
410 struct bnx2_napi *bnapi = &bp->bnx2_napi;
411
1269a8a6
MC
412 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
413 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 414 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bnapi->last_status_idx);
1269a8a6 415
b6016b76 416 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
35efa7c1 417 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx);
b6016b76 418
bf5295bb 419 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
420}
421
422static void
423bnx2_disable_int_sync(struct bnx2 *bp)
424{
425 atomic_inc(&bp->intr_sem);
426 bnx2_disable_int(bp);
427 synchronize_irq(bp->pdev->irq);
428}
429
35efa7c1
MC
430static void
431bnx2_napi_disable(struct bnx2 *bp)
432{
433 napi_disable(&bp->bnx2_napi.napi);
434}
435
436static void
437bnx2_napi_enable(struct bnx2 *bp)
438{
439 napi_enable(&bp->bnx2_napi.napi);
440}
441
b6016b76
MC
442static void
443bnx2_netif_stop(struct bnx2 *bp)
444{
445 bnx2_disable_int_sync(bp);
446 if (netif_running(bp->dev)) {
35efa7c1 447 bnx2_napi_disable(bp);
b6016b76
MC
448 netif_tx_disable(bp->dev);
449 bp->dev->trans_start = jiffies; /* prevent tx timeout */
450 }
451}
452
453static void
454bnx2_netif_start(struct bnx2 *bp)
455{
456 if (atomic_dec_and_test(&bp->intr_sem)) {
457 if (netif_running(bp->dev)) {
458 netif_wake_queue(bp->dev);
35efa7c1 459 bnx2_napi_enable(bp);
b6016b76
MC
460 bnx2_enable_int(bp);
461 }
462 }
463}
464
465static void
466bnx2_free_mem(struct bnx2 *bp)
467{
13daffa2
MC
468 int i;
469
59b47d8a
MC
470 for (i = 0; i < bp->ctx_pages; i++) {
471 if (bp->ctx_blk[i]) {
472 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
473 bp->ctx_blk[i],
474 bp->ctx_blk_mapping[i]);
475 bp->ctx_blk[i] = NULL;
476 }
477 }
b6016b76 478 if (bp->status_blk) {
0f31f994 479 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
480 bp->status_blk, bp->status_blk_mapping);
481 bp->status_blk = NULL;
0f31f994 482 bp->stats_blk = NULL;
b6016b76
MC
483 }
484 if (bp->tx_desc_ring) {
e343d55c 485 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
b6016b76
MC
486 bp->tx_desc_ring, bp->tx_desc_mapping);
487 bp->tx_desc_ring = NULL;
488 }
b4558ea9
JJ
489 kfree(bp->tx_buf_ring);
490 bp->tx_buf_ring = NULL;
13daffa2
MC
491 for (i = 0; i < bp->rx_max_ring; i++) {
492 if (bp->rx_desc_ring[i])
e343d55c 493 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
13daffa2
MC
494 bp->rx_desc_ring[i],
495 bp->rx_desc_mapping[i]);
496 bp->rx_desc_ring[i] = NULL;
497 }
498 vfree(bp->rx_buf_ring);
b4558ea9 499 bp->rx_buf_ring = NULL;
47bf4246
MC
500 for (i = 0; i < bp->rx_max_pg_ring; i++) {
501 if (bp->rx_pg_desc_ring[i])
502 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
503 bp->rx_pg_desc_ring[i],
504 bp->rx_pg_desc_mapping[i]);
505 bp->rx_pg_desc_ring[i] = NULL;
506 }
507 if (bp->rx_pg_ring)
508 vfree(bp->rx_pg_ring);
509 bp->rx_pg_ring = NULL;
b6016b76
MC
510}
511
512static int
513bnx2_alloc_mem(struct bnx2 *bp)
514{
0f31f994 515 int i, status_blk_size;
13daffa2 516
e343d55c 517 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
b6016b76
MC
518 if (bp->tx_buf_ring == NULL)
519 return -ENOMEM;
520
e343d55c 521 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
b6016b76
MC
522 &bp->tx_desc_mapping);
523 if (bp->tx_desc_ring == NULL)
524 goto alloc_mem_err;
525
e343d55c 526 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
b6016b76
MC
527 if (bp->rx_buf_ring == NULL)
528 goto alloc_mem_err;
529
e343d55c 530 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
13daffa2
MC
531
532 for (i = 0; i < bp->rx_max_ring; i++) {
533 bp->rx_desc_ring[i] =
e343d55c 534 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
13daffa2
MC
535 &bp->rx_desc_mapping[i]);
536 if (bp->rx_desc_ring[i] == NULL)
537 goto alloc_mem_err;
538
539 }
b6016b76 540
47bf4246
MC
541 if (bp->rx_pg_ring_size) {
542 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
543 bp->rx_max_pg_ring);
544 if (bp->rx_pg_ring == NULL)
545 goto alloc_mem_err;
546
547 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
548 bp->rx_max_pg_ring);
549 }
550
551 for (i = 0; i < bp->rx_max_pg_ring; i++) {
552 bp->rx_pg_desc_ring[i] =
553 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
554 &bp->rx_pg_desc_mapping[i]);
555 if (bp->rx_pg_desc_ring[i] == NULL)
556 goto alloc_mem_err;
557
558 }
559
0f31f994
MC
560 /* Combine status and statistics blocks into one allocation. */
561 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
562 bp->status_stats_size = status_blk_size +
563 sizeof(struct statistics_block);
564
565 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
566 &bp->status_blk_mapping);
567 if (bp->status_blk == NULL)
568 goto alloc_mem_err;
569
0f31f994 570 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 571
35efa7c1
MC
572 bp->bnx2_napi.status_blk = bp->status_blk;
573
0f31f994
MC
574 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
575 status_blk_size);
b6016b76 576
0f31f994 577 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 578
59b47d8a
MC
579 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
580 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
581 if (bp->ctx_pages == 0)
582 bp->ctx_pages = 1;
583 for (i = 0; i < bp->ctx_pages; i++) {
584 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
585 BCM_PAGE_SIZE,
586 &bp->ctx_blk_mapping[i]);
587 if (bp->ctx_blk[i] == NULL)
588 goto alloc_mem_err;
589 }
590 }
b6016b76
MC
591 return 0;
592
593alloc_mem_err:
594 bnx2_free_mem(bp);
595 return -ENOMEM;
596}
597
e3648b3d
MC
598static void
599bnx2_report_fw_link(struct bnx2 *bp)
600{
601 u32 fw_link_status = 0;
602
0d8a6571
MC
603 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
604 return;
605
e3648b3d
MC
606 if (bp->link_up) {
607 u32 bmsr;
608
609 switch (bp->line_speed) {
610 case SPEED_10:
611 if (bp->duplex == DUPLEX_HALF)
612 fw_link_status = BNX2_LINK_STATUS_10HALF;
613 else
614 fw_link_status = BNX2_LINK_STATUS_10FULL;
615 break;
616 case SPEED_100:
617 if (bp->duplex == DUPLEX_HALF)
618 fw_link_status = BNX2_LINK_STATUS_100HALF;
619 else
620 fw_link_status = BNX2_LINK_STATUS_100FULL;
621 break;
622 case SPEED_1000:
623 if (bp->duplex == DUPLEX_HALF)
624 fw_link_status = BNX2_LINK_STATUS_1000HALF;
625 else
626 fw_link_status = BNX2_LINK_STATUS_1000FULL;
627 break;
628 case SPEED_2500:
629 if (bp->duplex == DUPLEX_HALF)
630 fw_link_status = BNX2_LINK_STATUS_2500HALF;
631 else
632 fw_link_status = BNX2_LINK_STATUS_2500FULL;
633 break;
634 }
635
636 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
637
638 if (bp->autoneg) {
639 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
640
ca58c3af
MC
641 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
642 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
643
644 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
645 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
646 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
647 else
648 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
649 }
650 }
651 else
652 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
653
654 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
655}
656
9b1084b8
MC
657static char *
658bnx2_xceiver_str(struct bnx2 *bp)
659{
660 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
661 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
662 "Copper"));
663}
664
b6016b76
MC
665static void
666bnx2_report_link(struct bnx2 *bp)
667{
668 if (bp->link_up) {
669 netif_carrier_on(bp->dev);
9b1084b8
MC
670 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
671 bnx2_xceiver_str(bp));
b6016b76
MC
672
673 printk("%d Mbps ", bp->line_speed);
674
675 if (bp->duplex == DUPLEX_FULL)
676 printk("full duplex");
677 else
678 printk("half duplex");
679
680 if (bp->flow_ctrl) {
681 if (bp->flow_ctrl & FLOW_CTRL_RX) {
682 printk(", receive ");
683 if (bp->flow_ctrl & FLOW_CTRL_TX)
684 printk("& transmit ");
685 }
686 else {
687 printk(", transmit ");
688 }
689 printk("flow control ON");
690 }
691 printk("\n");
692 }
693 else {
694 netif_carrier_off(bp->dev);
9b1084b8
MC
695 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
696 bnx2_xceiver_str(bp));
b6016b76 697 }
e3648b3d
MC
698
699 bnx2_report_fw_link(bp);
b6016b76
MC
700}
701
702static void
703bnx2_resolve_flow_ctrl(struct bnx2 *bp)
704{
705 u32 local_adv, remote_adv;
706
707 bp->flow_ctrl = 0;
6aa20a22 708 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
709 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
710
711 if (bp->duplex == DUPLEX_FULL) {
712 bp->flow_ctrl = bp->req_flow_ctrl;
713 }
714 return;
715 }
716
717 if (bp->duplex != DUPLEX_FULL) {
718 return;
719 }
720
5b0c76ad
MC
721 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
722 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
723 u32 val;
724
725 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
726 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
727 bp->flow_ctrl |= FLOW_CTRL_TX;
728 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
729 bp->flow_ctrl |= FLOW_CTRL_RX;
730 return;
731 }
732
ca58c3af
MC
733 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
734 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
735
736 if (bp->phy_flags & PHY_SERDES_FLAG) {
737 u32 new_local_adv = 0;
738 u32 new_remote_adv = 0;
739
740 if (local_adv & ADVERTISE_1000XPAUSE)
741 new_local_adv |= ADVERTISE_PAUSE_CAP;
742 if (local_adv & ADVERTISE_1000XPSE_ASYM)
743 new_local_adv |= ADVERTISE_PAUSE_ASYM;
744 if (remote_adv & ADVERTISE_1000XPAUSE)
745 new_remote_adv |= ADVERTISE_PAUSE_CAP;
746 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
747 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
748
749 local_adv = new_local_adv;
750 remote_adv = new_remote_adv;
751 }
752
753 /* See Table 28B-3 of 802.3ab-1999 spec. */
754 if (local_adv & ADVERTISE_PAUSE_CAP) {
755 if(local_adv & ADVERTISE_PAUSE_ASYM) {
756 if (remote_adv & ADVERTISE_PAUSE_CAP) {
757 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
758 }
759 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
760 bp->flow_ctrl = FLOW_CTRL_RX;
761 }
762 }
763 else {
764 if (remote_adv & ADVERTISE_PAUSE_CAP) {
765 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
766 }
767 }
768 }
769 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
770 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
771 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
772
773 bp->flow_ctrl = FLOW_CTRL_TX;
774 }
775 }
776}
777
27a005b8
MC
778static int
779bnx2_5709s_linkup(struct bnx2 *bp)
780{
781 u32 val, speed;
782
783 bp->link_up = 1;
784
785 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
786 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
787 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
788
789 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
790 bp->line_speed = bp->req_line_speed;
791 bp->duplex = bp->req_duplex;
792 return 0;
793 }
794 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
795 switch (speed) {
796 case MII_BNX2_GP_TOP_AN_SPEED_10:
797 bp->line_speed = SPEED_10;
798 break;
799 case MII_BNX2_GP_TOP_AN_SPEED_100:
800 bp->line_speed = SPEED_100;
801 break;
802 case MII_BNX2_GP_TOP_AN_SPEED_1G:
803 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
804 bp->line_speed = SPEED_1000;
805 break;
806 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
807 bp->line_speed = SPEED_2500;
808 break;
809 }
810 if (val & MII_BNX2_GP_TOP_AN_FD)
811 bp->duplex = DUPLEX_FULL;
812 else
813 bp->duplex = DUPLEX_HALF;
814 return 0;
815}
816
b6016b76 817static int
5b0c76ad
MC
818bnx2_5708s_linkup(struct bnx2 *bp)
819{
820 u32 val;
821
822 bp->link_up = 1;
823 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
824 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
825 case BCM5708S_1000X_STAT1_SPEED_10:
826 bp->line_speed = SPEED_10;
827 break;
828 case BCM5708S_1000X_STAT1_SPEED_100:
829 bp->line_speed = SPEED_100;
830 break;
831 case BCM5708S_1000X_STAT1_SPEED_1G:
832 bp->line_speed = SPEED_1000;
833 break;
834 case BCM5708S_1000X_STAT1_SPEED_2G5:
835 bp->line_speed = SPEED_2500;
836 break;
837 }
838 if (val & BCM5708S_1000X_STAT1_FD)
839 bp->duplex = DUPLEX_FULL;
840 else
841 bp->duplex = DUPLEX_HALF;
842
843 return 0;
844}
845
846static int
847bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
848{
849 u32 bmcr, local_adv, remote_adv, common;
850
851 bp->link_up = 1;
852 bp->line_speed = SPEED_1000;
853
ca58c3af 854 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
855 if (bmcr & BMCR_FULLDPLX) {
856 bp->duplex = DUPLEX_FULL;
857 }
858 else {
859 bp->duplex = DUPLEX_HALF;
860 }
861
862 if (!(bmcr & BMCR_ANENABLE)) {
863 return 0;
864 }
865
ca58c3af
MC
866 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
867 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
868
869 common = local_adv & remote_adv;
870 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
871
872 if (common & ADVERTISE_1000XFULL) {
873 bp->duplex = DUPLEX_FULL;
874 }
875 else {
876 bp->duplex = DUPLEX_HALF;
877 }
878 }
879
880 return 0;
881}
882
883static int
884bnx2_copper_linkup(struct bnx2 *bp)
885{
886 u32 bmcr;
887
ca58c3af 888 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
889 if (bmcr & BMCR_ANENABLE) {
890 u32 local_adv, remote_adv, common;
891
892 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
893 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
894
895 common = local_adv & (remote_adv >> 2);
896 if (common & ADVERTISE_1000FULL) {
897 bp->line_speed = SPEED_1000;
898 bp->duplex = DUPLEX_FULL;
899 }
900 else if (common & ADVERTISE_1000HALF) {
901 bp->line_speed = SPEED_1000;
902 bp->duplex = DUPLEX_HALF;
903 }
904 else {
ca58c3af
MC
905 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
906 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
907
908 common = local_adv & remote_adv;
909 if (common & ADVERTISE_100FULL) {
910 bp->line_speed = SPEED_100;
911 bp->duplex = DUPLEX_FULL;
912 }
913 else if (common & ADVERTISE_100HALF) {
914 bp->line_speed = SPEED_100;
915 bp->duplex = DUPLEX_HALF;
916 }
917 else if (common & ADVERTISE_10FULL) {
918 bp->line_speed = SPEED_10;
919 bp->duplex = DUPLEX_FULL;
920 }
921 else if (common & ADVERTISE_10HALF) {
922 bp->line_speed = SPEED_10;
923 bp->duplex = DUPLEX_HALF;
924 }
925 else {
926 bp->line_speed = 0;
927 bp->link_up = 0;
928 }
929 }
930 }
931 else {
932 if (bmcr & BMCR_SPEED100) {
933 bp->line_speed = SPEED_100;
934 }
935 else {
936 bp->line_speed = SPEED_10;
937 }
938 if (bmcr & BMCR_FULLDPLX) {
939 bp->duplex = DUPLEX_FULL;
940 }
941 else {
942 bp->duplex = DUPLEX_HALF;
943 }
944 }
945
946 return 0;
947}
948
949static int
950bnx2_set_mac_link(struct bnx2 *bp)
951{
952 u32 val;
953
954 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
955 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
956 (bp->duplex == DUPLEX_HALF)) {
957 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
958 }
959
960 /* Configure the EMAC mode register. */
961 val = REG_RD(bp, BNX2_EMAC_MODE);
962
963 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 964 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 965 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
966
967 if (bp->link_up) {
5b0c76ad
MC
968 switch (bp->line_speed) {
969 case SPEED_10:
59b47d8a
MC
970 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
971 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
972 break;
973 }
974 /* fall through */
975 case SPEED_100:
976 val |= BNX2_EMAC_MODE_PORT_MII;
977 break;
978 case SPEED_2500:
59b47d8a 979 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
980 /* fall through */
981 case SPEED_1000:
982 val |= BNX2_EMAC_MODE_PORT_GMII;
983 break;
984 }
b6016b76
MC
985 }
986 else {
987 val |= BNX2_EMAC_MODE_PORT_GMII;
988 }
989
990 /* Set the MAC to operate in the appropriate duplex mode. */
991 if (bp->duplex == DUPLEX_HALF)
992 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
993 REG_WR(bp, BNX2_EMAC_MODE, val);
994
995 /* Enable/disable rx PAUSE. */
996 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
997
998 if (bp->flow_ctrl & FLOW_CTRL_RX)
999 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1000 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1001
1002 /* Enable/disable tx PAUSE. */
1003 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1004 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1005
1006 if (bp->flow_ctrl & FLOW_CTRL_TX)
1007 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1008 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1009
1010 /* Acknowledge the interrupt. */
1011 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1012
1013 return 0;
1014}
1015
27a005b8
MC
1016static void
1017bnx2_enable_bmsr1(struct bnx2 *bp)
1018{
1019 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1020 (CHIP_NUM(bp) == CHIP_NUM_5709))
1021 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1022 MII_BNX2_BLK_ADDR_GP_STATUS);
1023}
1024
1025static void
1026bnx2_disable_bmsr1(struct bnx2 *bp)
1027{
1028 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1029 (CHIP_NUM(bp) == CHIP_NUM_5709))
1030 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1031 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1032}
1033
605a9e20
MC
1034static int
1035bnx2_test_and_enable_2g5(struct bnx2 *bp)
1036{
1037 u32 up1;
1038 int ret = 1;
1039
1040 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1041 return 0;
1042
1043 if (bp->autoneg & AUTONEG_SPEED)
1044 bp->advertising |= ADVERTISED_2500baseX_Full;
1045
27a005b8
MC
1046 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1047 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1048
605a9e20
MC
1049 bnx2_read_phy(bp, bp->mii_up1, &up1);
1050 if (!(up1 & BCM5708S_UP1_2G5)) {
1051 up1 |= BCM5708S_UP1_2G5;
1052 bnx2_write_phy(bp, bp->mii_up1, up1);
1053 ret = 0;
1054 }
1055
27a005b8
MC
1056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059
605a9e20
MC
1060 return ret;
1061}
1062
1063static int
1064bnx2_test_and_disable_2g5(struct bnx2 *bp)
1065{
1066 u32 up1;
1067 int ret = 0;
1068
1069 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1070 return 0;
1071
27a005b8
MC
1072 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1073 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1074
605a9e20
MC
1075 bnx2_read_phy(bp, bp->mii_up1, &up1);
1076 if (up1 & BCM5708S_UP1_2G5) {
1077 up1 &= ~BCM5708S_UP1_2G5;
1078 bnx2_write_phy(bp, bp->mii_up1, up1);
1079 ret = 1;
1080 }
1081
27a005b8
MC
1082 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1083 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1084 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1085
605a9e20
MC
1086 return ret;
1087}
1088
1089static void
1090bnx2_enable_forced_2g5(struct bnx2 *bp)
1091{
1092 u32 bmcr;
1093
1094 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1095 return;
1096
27a005b8
MC
1097 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1098 u32 val;
1099
1100 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1101 MII_BNX2_BLK_ADDR_SERDES_DIG);
1102 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1103 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1104 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1105 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1106
1107 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1108 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1109 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110
1111 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1112 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1113 bmcr |= BCM5708S_BMCR_FORCE_2500;
1114 }
1115
1116 if (bp->autoneg & AUTONEG_SPEED) {
1117 bmcr &= ~BMCR_ANENABLE;
1118 if (bp->req_duplex == DUPLEX_FULL)
1119 bmcr |= BMCR_FULLDPLX;
1120 }
1121 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1122}
1123
1124static void
1125bnx2_disable_forced_2g5(struct bnx2 *bp)
1126{
1127 u32 bmcr;
1128
1129 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1130 return;
1131
27a005b8
MC
1132 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1133 u32 val;
1134
1135 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1136 MII_BNX2_BLK_ADDR_SERDES_DIG);
1137 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1138 val &= ~MII_BNX2_SD_MISC1_FORCE;
1139 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1146 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1148 }
1149
1150 if (bp->autoneg & AUTONEG_SPEED)
1151 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1152 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1153}
1154
b6016b76
MC
1155static int
1156bnx2_set_link(struct bnx2 *bp)
1157{
1158 u32 bmsr;
1159 u8 link_up;
1160
80be4434 1161 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1162 bp->link_up = 1;
1163 return 0;
1164 }
1165
0d8a6571
MC
1166 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1167 return 0;
1168
b6016b76
MC
1169 link_up = bp->link_up;
1170
27a005b8
MC
1171 bnx2_enable_bmsr1(bp);
1172 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1173 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1174 bnx2_disable_bmsr1(bp);
b6016b76
MC
1175
1176 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1177 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1178 u32 val;
1179
1180 val = REG_RD(bp, BNX2_EMAC_STATUS);
1181 if (val & BNX2_EMAC_STATUS_LINK)
1182 bmsr |= BMSR_LSTATUS;
1183 else
1184 bmsr &= ~BMSR_LSTATUS;
1185 }
1186
1187 if (bmsr & BMSR_LSTATUS) {
1188 bp->link_up = 1;
1189
1190 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1191 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1192 bnx2_5706s_linkup(bp);
1193 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1194 bnx2_5708s_linkup(bp);
27a005b8
MC
1195 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1196 bnx2_5709s_linkup(bp);
b6016b76
MC
1197 }
1198 else {
1199 bnx2_copper_linkup(bp);
1200 }
1201 bnx2_resolve_flow_ctrl(bp);
1202 }
1203 else {
1204 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
605a9e20
MC
1205 (bp->autoneg & AUTONEG_SPEED))
1206 bnx2_disable_forced_2g5(bp);
b6016b76 1207
b6016b76
MC
1208 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1209 bp->link_up = 0;
1210 }
1211
1212 if (bp->link_up != link_up) {
1213 bnx2_report_link(bp);
1214 }
1215
1216 bnx2_set_mac_link(bp);
1217
1218 return 0;
1219}
1220
1221static int
1222bnx2_reset_phy(struct bnx2 *bp)
1223{
1224 int i;
1225 u32 reg;
1226
ca58c3af 1227 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1228
1229#define PHY_RESET_MAX_WAIT 100
1230 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1231 udelay(10);
1232
ca58c3af 1233 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1234 if (!(reg & BMCR_RESET)) {
1235 udelay(20);
1236 break;
1237 }
1238 }
1239 if (i == PHY_RESET_MAX_WAIT) {
1240 return -EBUSY;
1241 }
1242 return 0;
1243}
1244
1245static u32
1246bnx2_phy_get_pause_adv(struct bnx2 *bp)
1247{
1248 u32 adv = 0;
1249
1250 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1251 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1252
1253 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254 adv = ADVERTISE_1000XPAUSE;
1255 }
1256 else {
1257 adv = ADVERTISE_PAUSE_CAP;
1258 }
1259 }
1260 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1261 if (bp->phy_flags & PHY_SERDES_FLAG) {
1262 adv = ADVERTISE_1000XPSE_ASYM;
1263 }
1264 else {
1265 adv = ADVERTISE_PAUSE_ASYM;
1266 }
1267 }
1268 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1269 if (bp->phy_flags & PHY_SERDES_FLAG) {
1270 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1271 }
1272 else {
1273 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1274 }
1275 }
1276 return adv;
1277}
1278
0d8a6571
MC
1279static int bnx2_fw_sync(struct bnx2 *, u32, int);
1280
b6016b76 1281static int
0d8a6571
MC
1282bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1283{
1284 u32 speed_arg = 0, pause_adv;
1285
1286 pause_adv = bnx2_phy_get_pause_adv(bp);
1287
1288 if (bp->autoneg & AUTONEG_SPEED) {
1289 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1290 if (bp->advertising & ADVERTISED_10baseT_Half)
1291 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1292 if (bp->advertising & ADVERTISED_10baseT_Full)
1293 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1294 if (bp->advertising & ADVERTISED_100baseT_Half)
1295 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296 if (bp->advertising & ADVERTISED_100baseT_Full)
1297 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1298 if (bp->advertising & ADVERTISED_1000baseT_Full)
1299 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1300 if (bp->advertising & ADVERTISED_2500baseX_Full)
1301 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1302 } else {
1303 if (bp->req_line_speed == SPEED_2500)
1304 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1305 else if (bp->req_line_speed == SPEED_1000)
1306 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1307 else if (bp->req_line_speed == SPEED_100) {
1308 if (bp->req_duplex == DUPLEX_FULL)
1309 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1310 else
1311 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1312 } else if (bp->req_line_speed == SPEED_10) {
1313 if (bp->req_duplex == DUPLEX_FULL)
1314 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1315 else
1316 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1317 }
1318 }
1319
1320 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1321 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1322 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1323 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1324
1325 if (port == PORT_TP)
1326 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1327 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1328
1329 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1330
1331 spin_unlock_bh(&bp->phy_lock);
1332 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1333 spin_lock_bh(&bp->phy_lock);
1334
1335 return 0;
1336}
1337
1338static int
1339bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
b6016b76 1340{
605a9e20 1341 u32 adv, bmcr;
b6016b76
MC
1342 u32 new_adv = 0;
1343
0d8a6571
MC
1344 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1345 return (bnx2_setup_remote_phy(bp, port));
1346
b6016b76
MC
1347 if (!(bp->autoneg & AUTONEG_SPEED)) {
1348 u32 new_bmcr;
5b0c76ad
MC
1349 int force_link_down = 0;
1350
605a9e20
MC
1351 if (bp->req_line_speed == SPEED_2500) {
1352 if (!bnx2_test_and_enable_2g5(bp))
1353 force_link_down = 1;
1354 } else if (bp->req_line_speed == SPEED_1000) {
1355 if (bnx2_test_and_disable_2g5(bp))
1356 force_link_down = 1;
1357 }
ca58c3af 1358 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1359 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1360
ca58c3af 1361 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1362 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1363 new_bmcr |= BMCR_SPEED1000;
605a9e20 1364
27a005b8
MC
1365 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1366 if (bp->req_line_speed == SPEED_2500)
1367 bnx2_enable_forced_2g5(bp);
1368 else if (bp->req_line_speed == SPEED_1000) {
1369 bnx2_disable_forced_2g5(bp);
1370 new_bmcr &= ~0x2000;
1371 }
1372
1373 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1374 if (bp->req_line_speed == SPEED_2500)
1375 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1376 else
1377 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1378 }
1379
b6016b76 1380 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1381 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1382 new_bmcr |= BMCR_FULLDPLX;
1383 }
1384 else {
5b0c76ad 1385 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1386 new_bmcr &= ~BMCR_FULLDPLX;
1387 }
5b0c76ad 1388 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1389 /* Force a link down visible on the other side */
1390 if (bp->link_up) {
ca58c3af 1391 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1392 ~(ADVERTISE_1000XFULL |
1393 ADVERTISE_1000XHALF));
ca58c3af 1394 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1395 BMCR_ANRESTART | BMCR_ANENABLE);
1396
1397 bp->link_up = 0;
1398 netif_carrier_off(bp->dev);
ca58c3af 1399 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1400 bnx2_report_link(bp);
b6016b76 1401 }
ca58c3af
MC
1402 bnx2_write_phy(bp, bp->mii_adv, adv);
1403 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1404 } else {
1405 bnx2_resolve_flow_ctrl(bp);
1406 bnx2_set_mac_link(bp);
b6016b76
MC
1407 }
1408 return 0;
1409 }
1410
605a9e20 1411 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1412
b6016b76
MC
1413 if (bp->advertising & ADVERTISED_1000baseT_Full)
1414 new_adv |= ADVERTISE_1000XFULL;
1415
1416 new_adv |= bnx2_phy_get_pause_adv(bp);
1417
ca58c3af
MC
1418 bnx2_read_phy(bp, bp->mii_adv, &adv);
1419 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1420
1421 bp->serdes_an_pending = 0;
1422 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1423 /* Force a link down visible on the other side */
1424 if (bp->link_up) {
ca58c3af 1425 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1426 spin_unlock_bh(&bp->phy_lock);
1427 msleep(20);
1428 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1429 }
1430
ca58c3af
MC
1431 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1432 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1433 BMCR_ANENABLE);
f8dd064e
MC
1434 /* Speed up link-up time when the link partner
1435 * does not autonegotiate which is very common
1436 * in blade servers. Some blade servers use
1437 * IPMI for kerboard input and it's important
1438 * to minimize link disruptions. Autoneg. involves
1439 * exchanging base pages plus 3 next pages and
1440 * normally completes in about 120 msec.
1441 */
1442 bp->current_interval = SERDES_AN_TIMEOUT;
1443 bp->serdes_an_pending = 1;
1444 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1445 } else {
1446 bnx2_resolve_flow_ctrl(bp);
1447 bnx2_set_mac_link(bp);
b6016b76
MC
1448 }
1449
1450 return 0;
1451}
1452
1453#define ETHTOOL_ALL_FIBRE_SPEED \
deaf391b
MC
1454 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1455 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1456 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1457
1458#define ETHTOOL_ALL_COPPER_SPEED \
1459 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1460 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1461 ADVERTISED_1000baseT_Full)
1462
1463#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1464 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1465
b6016b76
MC
1466#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1467
0d8a6571
MC
1468static void
1469bnx2_set_default_remote_link(struct bnx2 *bp)
1470{
1471 u32 link;
1472
1473 if (bp->phy_port == PORT_TP)
1474 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1475 else
1476 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1477
1478 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1479 bp->req_line_speed = 0;
1480 bp->autoneg |= AUTONEG_SPEED;
1481 bp->advertising = ADVERTISED_Autoneg;
1482 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1483 bp->advertising |= ADVERTISED_10baseT_Half;
1484 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1485 bp->advertising |= ADVERTISED_10baseT_Full;
1486 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1487 bp->advertising |= ADVERTISED_100baseT_Half;
1488 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1489 bp->advertising |= ADVERTISED_100baseT_Full;
1490 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1491 bp->advertising |= ADVERTISED_1000baseT_Full;
1492 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1493 bp->advertising |= ADVERTISED_2500baseX_Full;
1494 } else {
1495 bp->autoneg = 0;
1496 bp->advertising = 0;
1497 bp->req_duplex = DUPLEX_FULL;
1498 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1499 bp->req_line_speed = SPEED_10;
1500 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1501 bp->req_duplex = DUPLEX_HALF;
1502 }
1503 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1504 bp->req_line_speed = SPEED_100;
1505 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1506 bp->req_duplex = DUPLEX_HALF;
1507 }
1508 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1509 bp->req_line_speed = SPEED_1000;
1510 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1511 bp->req_line_speed = SPEED_2500;
1512 }
1513}
1514
deaf391b
MC
1515static void
1516bnx2_set_default_link(struct bnx2 *bp)
1517{
0d8a6571
MC
1518 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1519 return bnx2_set_default_remote_link(bp);
1520
deaf391b
MC
1521 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1522 bp->req_line_speed = 0;
1523 if (bp->phy_flags & PHY_SERDES_FLAG) {
1524 u32 reg;
1525
1526 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1527
1528 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1529 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1530 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1531 bp->autoneg = 0;
1532 bp->req_line_speed = bp->line_speed = SPEED_1000;
1533 bp->req_duplex = DUPLEX_FULL;
1534 }
1535 } else
1536 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1537}
1538
df149d70
MC
1539static void
1540bnx2_send_heart_beat(struct bnx2 *bp)
1541{
1542 u32 msg;
1543 u32 addr;
1544
1545 spin_lock(&bp->indirect_lock);
1546 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1547 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1548 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1549 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1550 spin_unlock(&bp->indirect_lock);
1551}
1552
0d8a6571
MC
1553static void
1554bnx2_remote_phy_event(struct bnx2 *bp)
1555{
1556 u32 msg;
1557 u8 link_up = bp->link_up;
1558 u8 old_port;
1559
1560 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1561
df149d70
MC
1562 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1563 bnx2_send_heart_beat(bp);
1564
1565 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1566
0d8a6571
MC
1567 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1568 bp->link_up = 0;
1569 else {
1570 u32 speed;
1571
1572 bp->link_up = 1;
1573 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1574 bp->duplex = DUPLEX_FULL;
1575 switch (speed) {
1576 case BNX2_LINK_STATUS_10HALF:
1577 bp->duplex = DUPLEX_HALF;
1578 case BNX2_LINK_STATUS_10FULL:
1579 bp->line_speed = SPEED_10;
1580 break;
1581 case BNX2_LINK_STATUS_100HALF:
1582 bp->duplex = DUPLEX_HALF;
1583 case BNX2_LINK_STATUS_100BASE_T4:
1584 case BNX2_LINK_STATUS_100FULL:
1585 bp->line_speed = SPEED_100;
1586 break;
1587 case BNX2_LINK_STATUS_1000HALF:
1588 bp->duplex = DUPLEX_HALF;
1589 case BNX2_LINK_STATUS_1000FULL:
1590 bp->line_speed = SPEED_1000;
1591 break;
1592 case BNX2_LINK_STATUS_2500HALF:
1593 bp->duplex = DUPLEX_HALF;
1594 case BNX2_LINK_STATUS_2500FULL:
1595 bp->line_speed = SPEED_2500;
1596 break;
1597 default:
1598 bp->line_speed = 0;
1599 break;
1600 }
1601
1602 spin_lock(&bp->phy_lock);
1603 bp->flow_ctrl = 0;
1604 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1605 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1606 if (bp->duplex == DUPLEX_FULL)
1607 bp->flow_ctrl = bp->req_flow_ctrl;
1608 } else {
1609 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1610 bp->flow_ctrl |= FLOW_CTRL_TX;
1611 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1612 bp->flow_ctrl |= FLOW_CTRL_RX;
1613 }
1614
1615 old_port = bp->phy_port;
1616 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1617 bp->phy_port = PORT_FIBRE;
1618 else
1619 bp->phy_port = PORT_TP;
1620
1621 if (old_port != bp->phy_port)
1622 bnx2_set_default_link(bp);
1623
1624 spin_unlock(&bp->phy_lock);
1625 }
1626 if (bp->link_up != link_up)
1627 bnx2_report_link(bp);
1628
1629 bnx2_set_mac_link(bp);
1630}
1631
1632static int
1633bnx2_set_remote_link(struct bnx2 *bp)
1634{
1635 u32 evt_code;
1636
1637 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1638 switch (evt_code) {
1639 case BNX2_FW_EVT_CODE_LINK_EVENT:
1640 bnx2_remote_phy_event(bp);
1641 break;
1642 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1643 default:
df149d70 1644 bnx2_send_heart_beat(bp);
0d8a6571
MC
1645 break;
1646 }
1647 return 0;
1648}
1649
b6016b76
MC
1650static int
1651bnx2_setup_copper_phy(struct bnx2 *bp)
1652{
1653 u32 bmcr;
1654 u32 new_bmcr;
1655
ca58c3af 1656 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1657
1658 if (bp->autoneg & AUTONEG_SPEED) {
1659 u32 adv_reg, adv1000_reg;
1660 u32 new_adv_reg = 0;
1661 u32 new_adv1000_reg = 0;
1662
ca58c3af 1663 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1664 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1665 ADVERTISE_PAUSE_ASYM);
1666
1667 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1668 adv1000_reg &= PHY_ALL_1000_SPEED;
1669
1670 if (bp->advertising & ADVERTISED_10baseT_Half)
1671 new_adv_reg |= ADVERTISE_10HALF;
1672 if (bp->advertising & ADVERTISED_10baseT_Full)
1673 new_adv_reg |= ADVERTISE_10FULL;
1674 if (bp->advertising & ADVERTISED_100baseT_Half)
1675 new_adv_reg |= ADVERTISE_100HALF;
1676 if (bp->advertising & ADVERTISED_100baseT_Full)
1677 new_adv_reg |= ADVERTISE_100FULL;
1678 if (bp->advertising & ADVERTISED_1000baseT_Full)
1679 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1680
b6016b76
MC
1681 new_adv_reg |= ADVERTISE_CSMA;
1682
1683 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1684
1685 if ((adv1000_reg != new_adv1000_reg) ||
1686 (adv_reg != new_adv_reg) ||
1687 ((bmcr & BMCR_ANENABLE) == 0)) {
1688
ca58c3af 1689 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1690 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1691 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1692 BMCR_ANENABLE);
1693 }
1694 else if (bp->link_up) {
1695 /* Flow ctrl may have changed from auto to forced */
1696 /* or vice-versa. */
1697
1698 bnx2_resolve_flow_ctrl(bp);
1699 bnx2_set_mac_link(bp);
1700 }
1701 return 0;
1702 }
1703
1704 new_bmcr = 0;
1705 if (bp->req_line_speed == SPEED_100) {
1706 new_bmcr |= BMCR_SPEED100;
1707 }
1708 if (bp->req_duplex == DUPLEX_FULL) {
1709 new_bmcr |= BMCR_FULLDPLX;
1710 }
1711 if (new_bmcr != bmcr) {
1712 u32 bmsr;
b6016b76 1713
ca58c3af
MC
1714 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1715 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1716
b6016b76
MC
1717 if (bmsr & BMSR_LSTATUS) {
1718 /* Force link down */
ca58c3af 1719 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1720 spin_unlock_bh(&bp->phy_lock);
1721 msleep(50);
1722 spin_lock_bh(&bp->phy_lock);
1723
ca58c3af
MC
1724 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1725 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1726 }
1727
ca58c3af 1728 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1729
1730 /* Normally, the new speed is setup after the link has
1731 * gone down and up again. In some cases, link will not go
1732 * down so we need to set up the new speed here.
1733 */
1734 if (bmsr & BMSR_LSTATUS) {
1735 bp->line_speed = bp->req_line_speed;
1736 bp->duplex = bp->req_duplex;
1737 bnx2_resolve_flow_ctrl(bp);
1738 bnx2_set_mac_link(bp);
1739 }
27a005b8
MC
1740 } else {
1741 bnx2_resolve_flow_ctrl(bp);
1742 bnx2_set_mac_link(bp);
b6016b76
MC
1743 }
1744 return 0;
1745}
1746
1747static int
0d8a6571 1748bnx2_setup_phy(struct bnx2 *bp, u8 port)
b6016b76
MC
1749{
1750 if (bp->loopback == MAC_LOOPBACK)
1751 return 0;
1752
1753 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 1754 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1755 }
1756 else {
1757 return (bnx2_setup_copper_phy(bp));
1758 }
1759}
1760
27a005b8
MC
1761static int
1762bnx2_init_5709s_phy(struct bnx2 *bp)
1763{
1764 u32 val;
1765
1766 bp->mii_bmcr = MII_BMCR + 0x10;
1767 bp->mii_bmsr = MII_BMSR + 0x10;
1768 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1769 bp->mii_adv = MII_ADVERTISE + 0x10;
1770 bp->mii_lpa = MII_LPA + 0x10;
1771 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1772
1773 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1774 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1775
1776 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1777 bnx2_reset_phy(bp);
1778
1779 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1780
1781 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1782 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1783 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1784 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1785
1786 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1787 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1788 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1789 val |= BCM5708S_UP1_2G5;
1790 else
1791 val &= ~BCM5708S_UP1_2G5;
1792 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1793
1794 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1795 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1796 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1797 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1798
1799 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1800
1801 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1802 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1803 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1804
1805 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1806
1807 return 0;
1808}
1809
b6016b76 1810static int
5b0c76ad
MC
1811bnx2_init_5708s_phy(struct bnx2 *bp)
1812{
1813 u32 val;
1814
27a005b8
MC
1815 bnx2_reset_phy(bp);
1816
1817 bp->mii_up1 = BCM5708S_UP1;
1818
5b0c76ad
MC
1819 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1820 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1821 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1822
1823 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1824 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1825 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1826
1827 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1828 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1829 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1830
1831 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1832 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1833 val |= BCM5708S_UP1_2G5;
1834 bnx2_write_phy(bp, BCM5708S_UP1, val);
1835 }
1836
1837 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1838 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1839 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1840 /* increase tx signal amplitude */
1841 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1842 BCM5708S_BLK_ADDR_TX_MISC);
1843 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1844 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1845 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1846 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1847 }
1848
e3648b3d 1849 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1850 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1851
1852 if (val) {
1853 u32 is_backplane;
1854
e3648b3d 1855 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1856 BNX2_SHARED_HW_CFG_CONFIG);
1857 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1858 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1859 BCM5708S_BLK_ADDR_TX_MISC);
1860 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1861 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1862 BCM5708S_BLK_ADDR_DIG);
1863 }
1864 }
1865 return 0;
1866}
1867
1868static int
1869bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76 1870{
27a005b8
MC
1871 bnx2_reset_phy(bp);
1872
b6016b76
MC
1873 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1874
59b47d8a
MC
1875 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1876 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
1877
1878 if (bp->dev->mtu > 1500) {
1879 u32 val;
1880
1881 /* Set extended packet length bit */
1882 bnx2_write_phy(bp, 0x18, 0x7);
1883 bnx2_read_phy(bp, 0x18, &val);
1884 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1885
1886 bnx2_write_phy(bp, 0x1c, 0x6c00);
1887 bnx2_read_phy(bp, 0x1c, &val);
1888 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1889 }
1890 else {
1891 u32 val;
1892
1893 bnx2_write_phy(bp, 0x18, 0x7);
1894 bnx2_read_phy(bp, 0x18, &val);
1895 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1896
1897 bnx2_write_phy(bp, 0x1c, 0x6c00);
1898 bnx2_read_phy(bp, 0x1c, &val);
1899 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1900 }
1901
1902 return 0;
1903}
1904
1905static int
1906bnx2_init_copper_phy(struct bnx2 *bp)
1907{
5b0c76ad
MC
1908 u32 val;
1909
27a005b8
MC
1910 bnx2_reset_phy(bp);
1911
b6016b76
MC
1912 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1913 bnx2_write_phy(bp, 0x18, 0x0c00);
1914 bnx2_write_phy(bp, 0x17, 0x000a);
1915 bnx2_write_phy(bp, 0x15, 0x310b);
1916 bnx2_write_phy(bp, 0x17, 0x201f);
1917 bnx2_write_phy(bp, 0x15, 0x9506);
1918 bnx2_write_phy(bp, 0x17, 0x401f);
1919 bnx2_write_phy(bp, 0x15, 0x14e2);
1920 bnx2_write_phy(bp, 0x18, 0x0400);
1921 }
1922
b659f44e
MC
1923 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1924 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1925 MII_BNX2_DSP_EXPAND_REG | 0x8);
1926 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1927 val &= ~(1 << 8);
1928 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1929 }
1930
b6016b76 1931 if (bp->dev->mtu > 1500) {
b6016b76
MC
1932 /* Set extended packet length bit */
1933 bnx2_write_phy(bp, 0x18, 0x7);
1934 bnx2_read_phy(bp, 0x18, &val);
1935 bnx2_write_phy(bp, 0x18, val | 0x4000);
1936
1937 bnx2_read_phy(bp, 0x10, &val);
1938 bnx2_write_phy(bp, 0x10, val | 0x1);
1939 }
1940 else {
b6016b76
MC
1941 bnx2_write_phy(bp, 0x18, 0x7);
1942 bnx2_read_phy(bp, 0x18, &val);
1943 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1944
1945 bnx2_read_phy(bp, 0x10, &val);
1946 bnx2_write_phy(bp, 0x10, val & ~0x1);
1947 }
1948
5b0c76ad
MC
1949 /* ethernet@wirespeed */
1950 bnx2_write_phy(bp, 0x18, 0x7007);
1951 bnx2_read_phy(bp, 0x18, &val);
1952 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1953 return 0;
1954}
1955
1956
1957static int
1958bnx2_init_phy(struct bnx2 *bp)
1959{
1960 u32 val;
1961 int rc = 0;
1962
1963 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1964 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1965
ca58c3af
MC
1966 bp->mii_bmcr = MII_BMCR;
1967 bp->mii_bmsr = MII_BMSR;
27a005b8 1968 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
1969 bp->mii_adv = MII_ADVERTISE;
1970 bp->mii_lpa = MII_LPA;
1971
b6016b76
MC
1972 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1973
0d8a6571
MC
1974 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1975 goto setup_phy;
1976
b6016b76
MC
1977 bnx2_read_phy(bp, MII_PHYSID1, &val);
1978 bp->phy_id = val << 16;
1979 bnx2_read_phy(bp, MII_PHYSID2, &val);
1980 bp->phy_id |= val & 0xffff;
1981
1982 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1983 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1984 rc = bnx2_init_5706s_phy(bp);
1985 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1986 rc = bnx2_init_5708s_phy(bp);
27a005b8
MC
1987 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1988 rc = bnx2_init_5709s_phy(bp);
b6016b76
MC
1989 }
1990 else {
1991 rc = bnx2_init_copper_phy(bp);
1992 }
1993
0d8a6571
MC
1994setup_phy:
1995 if (!rc)
1996 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
1997
1998 return rc;
1999}
2000
2001static int
2002bnx2_set_mac_loopback(struct bnx2 *bp)
2003{
2004 u32 mac_mode;
2005
2006 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2007 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2008 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2009 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2010 bp->link_up = 1;
2011 return 0;
2012}
2013
bc5a0690
MC
2014static int bnx2_test_link(struct bnx2 *);
2015
2016static int
2017bnx2_set_phy_loopback(struct bnx2 *bp)
2018{
2019 u32 mac_mode;
2020 int rc, i;
2021
2022 spin_lock_bh(&bp->phy_lock);
ca58c3af 2023 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2024 BMCR_SPEED1000);
2025 spin_unlock_bh(&bp->phy_lock);
2026 if (rc)
2027 return rc;
2028
2029 for (i = 0; i < 10; i++) {
2030 if (bnx2_test_link(bp) == 0)
2031 break;
80be4434 2032 msleep(100);
bc5a0690
MC
2033 }
2034
2035 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2036 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2037 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2038 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2039
2040 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2041 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2042 bp->link_up = 1;
2043 return 0;
2044}
2045
b6016b76 2046static int
b090ae2b 2047bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
2048{
2049 int i;
2050 u32 val;
2051
b6016b76
MC
2052 bp->fw_wr_seq++;
2053 msg_data |= bp->fw_wr_seq;
2054
e3648b3d 2055 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
2056
2057 /* wait for an acknowledgement. */
b090ae2b
MC
2058 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2059 msleep(10);
b6016b76 2060
e3648b3d 2061 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
2062
2063 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2064 break;
2065 }
b090ae2b
MC
2066 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2067 return 0;
b6016b76
MC
2068
2069 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2070 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2071 if (!silent)
2072 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2073 "%x\n", msg_data);
b6016b76
MC
2074
2075 msg_data &= ~BNX2_DRV_MSG_CODE;
2076 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2077
e3648b3d 2078 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 2079
b6016b76
MC
2080 return -EBUSY;
2081 }
2082
b090ae2b
MC
2083 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2084 return -EIO;
2085
b6016b76
MC
2086 return 0;
2087}
2088
59b47d8a
MC
2089static int
2090bnx2_init_5709_context(struct bnx2 *bp)
2091{
2092 int i, ret = 0;
2093 u32 val;
2094
2095 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2096 val |= (BCM_PAGE_BITS - 8) << 16;
2097 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2098 for (i = 0; i < 10; i++) {
2099 val = REG_RD(bp, BNX2_CTX_COMMAND);
2100 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2101 break;
2102 udelay(2);
2103 }
2104 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2105 return -EBUSY;
2106
59b47d8a
MC
2107 for (i = 0; i < bp->ctx_pages; i++) {
2108 int j;
2109
2110 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2111 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2112 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2113 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2114 (u64) bp->ctx_blk_mapping[i] >> 32);
2115 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2116 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2117 for (j = 0; j < 10; j++) {
2118
2119 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2120 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2121 break;
2122 udelay(5);
2123 }
2124 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2125 ret = -EBUSY;
2126 break;
2127 }
2128 }
2129 return ret;
2130}
2131
b6016b76
MC
2132static void
2133bnx2_init_context(struct bnx2 *bp)
2134{
2135 u32 vcid;
2136
2137 vcid = 96;
2138 while (vcid) {
2139 u32 vcid_addr, pcid_addr, offset;
7947b20e 2140 int i;
b6016b76
MC
2141
2142 vcid--;
2143
2144 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2145 u32 new_vcid;
2146
2147 vcid_addr = GET_PCID_ADDR(vcid);
2148 if (vcid & 0x8) {
2149 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2150 }
2151 else {
2152 new_vcid = vcid;
2153 }
2154 pcid_addr = GET_PCID_ADDR(new_vcid);
2155 }
2156 else {
2157 vcid_addr = GET_CID_ADDR(vcid);
2158 pcid_addr = vcid_addr;
2159 }
2160
7947b20e
MC
2161 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2162 vcid_addr += (i << PHY_CTX_SHIFT);
2163 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2164
5d5d0015 2165 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2166 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2167
7947b20e
MC
2168 /* Zero out the context. */
2169 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
5d5d0015 2170 CTX_WR(bp, vcid_addr, offset, 0);
7947b20e 2171 }
b6016b76
MC
2172 }
2173}
2174
2175static int
2176bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2177{
2178 u16 *good_mbuf;
2179 u32 good_mbuf_cnt;
2180 u32 val;
2181
2182 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2183 if (good_mbuf == NULL) {
2184 printk(KERN_ERR PFX "Failed to allocate memory in "
2185 "bnx2_alloc_bad_rbuf\n");
2186 return -ENOMEM;
2187 }
2188
2189 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2190 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2191
2192 good_mbuf_cnt = 0;
2193
2194 /* Allocate a bunch of mbufs and save the good ones in an array. */
2195 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2196 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2197 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2198
2199 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2200
2201 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2202
2203 /* The addresses with Bit 9 set are bad memory blocks. */
2204 if (!(val & (1 << 9))) {
2205 good_mbuf[good_mbuf_cnt] = (u16) val;
2206 good_mbuf_cnt++;
2207 }
2208
2209 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2210 }
2211
2212 /* Free the good ones back to the mbuf pool thus discarding
2213 * all the bad ones. */
2214 while (good_mbuf_cnt) {
2215 good_mbuf_cnt--;
2216
2217 val = good_mbuf[good_mbuf_cnt];
2218 val = (val << 9) | val | 1;
2219
2220 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2221 }
2222 kfree(good_mbuf);
2223 return 0;
2224}
2225
2226static void
6aa20a22 2227bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
2228{
2229 u32 val;
2230 u8 *mac_addr = bp->dev->dev_addr;
2231
2232 val = (mac_addr[0] << 8) | mac_addr[1];
2233
2234 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2235
6aa20a22 2236 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2237 (mac_addr[4] << 8) | mac_addr[5];
2238
2239 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2240}
2241
47bf4246
MC
2242static inline int
2243bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2244{
2245 dma_addr_t mapping;
2246 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2247 struct rx_bd *rxbd =
2248 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2249 struct page *page = alloc_page(GFP_ATOMIC);
2250
2251 if (!page)
2252 return -ENOMEM;
2253 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2254 PCI_DMA_FROMDEVICE);
2255 rx_pg->page = page;
2256 pci_unmap_addr_set(rx_pg, mapping, mapping);
2257 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2258 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2259 return 0;
2260}
2261
2262static void
2263bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2264{
2265 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2266 struct page *page = rx_pg->page;
2267
2268 if (!page)
2269 return;
2270
2271 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2272 PCI_DMA_FROMDEVICE);
2273
2274 __free_page(page);
2275 rx_pg->page = NULL;
2276}
2277
b6016b76
MC
2278static inline int
2279bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2280{
2281 struct sk_buff *skb;
2282 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2283 dma_addr_t mapping;
13daffa2 2284 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2285 unsigned long align;
2286
932f3772 2287 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2288 if (skb == NULL) {
2289 return -ENOMEM;
2290 }
2291
59b47d8a
MC
2292 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2293 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2294
b6016b76
MC
2295 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2296 PCI_DMA_FROMDEVICE);
2297
2298 rx_buf->skb = skb;
2299 pci_unmap_addr_set(rx_buf, mapping, mapping);
2300
2301 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2302 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2303
2304 bp->rx_prod_bseq += bp->rx_buf_use_size;
2305
2306 return 0;
2307}
2308
da3e4fbe 2309static int
35efa7c1 2310bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2311{
35efa7c1 2312 struct status_block *sblk = bnapi->status_blk;
b6016b76 2313 u32 new_link_state, old_link_state;
da3e4fbe 2314 int is_set = 1;
b6016b76 2315
da3e4fbe
MC
2316 new_link_state = sblk->status_attn_bits & event;
2317 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2318 if (new_link_state != old_link_state) {
da3e4fbe
MC
2319 if (new_link_state)
2320 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2321 else
2322 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2323 } else
2324 is_set = 0;
2325
2326 return is_set;
2327}
2328
2329static void
35efa7c1 2330bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2331{
35efa7c1 2332 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
da3e4fbe 2333 spin_lock(&bp->phy_lock);
b6016b76 2334 bnx2_set_link(bp);
da3e4fbe 2335 spin_unlock(&bp->phy_lock);
b6016b76 2336 }
35efa7c1 2337 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2338 bnx2_set_remote_link(bp);
2339
b6016b76
MC
2340}
2341
ead7270b 2342static inline u16
35efa7c1 2343bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2344{
2345 u16 cons;
2346
35efa7c1 2347 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
ead7270b
MC
2348
2349 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2350 cons++;
2351 return cons;
2352}
2353
b6016b76 2354static void
35efa7c1 2355bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76
MC
2356{
2357 u16 hw_cons, sw_cons, sw_ring_cons;
2358 int tx_free_bd = 0;
2359
35efa7c1 2360 hw_cons = bnx2_get_hw_tx_cons(bnapi);
a550c99b 2361 sw_cons = bnapi->tx_cons;
b6016b76
MC
2362
2363 while (sw_cons != hw_cons) {
2364 struct sw_bd *tx_buf;
2365 struct sk_buff *skb;
2366 int i, last;
2367
2368 sw_ring_cons = TX_RING_IDX(sw_cons);
2369
2370 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2371 skb = tx_buf->skb;
1d39ed56 2372
b6016b76 2373 /* partial BD completions possible with TSO packets */
89114afd 2374 if (skb_is_gso(skb)) {
b6016b76
MC
2375 u16 last_idx, last_ring_idx;
2376
2377 last_idx = sw_cons +
2378 skb_shinfo(skb)->nr_frags + 1;
2379 last_ring_idx = sw_ring_cons +
2380 skb_shinfo(skb)->nr_frags + 1;
2381 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2382 last_idx++;
2383 }
2384 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2385 break;
2386 }
2387 }
1d39ed56 2388
b6016b76
MC
2389 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2390 skb_headlen(skb), PCI_DMA_TODEVICE);
2391
2392 tx_buf->skb = NULL;
2393 last = skb_shinfo(skb)->nr_frags;
2394
2395 for (i = 0; i < last; i++) {
2396 sw_cons = NEXT_TX_BD(sw_cons);
2397
2398 pci_unmap_page(bp->pdev,
2399 pci_unmap_addr(
2400 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2401 mapping),
2402 skb_shinfo(skb)->frags[i].size,
2403 PCI_DMA_TODEVICE);
2404 }
2405
2406 sw_cons = NEXT_TX_BD(sw_cons);
2407
2408 tx_free_bd += last + 1;
2409
745720e5 2410 dev_kfree_skb(skb);
b6016b76 2411
35efa7c1 2412 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2413 }
2414
a550c99b
MC
2415 bnapi->hw_tx_cons = hw_cons;
2416 bnapi->tx_cons = sw_cons;
2f8af120
MC
2417 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2418 * before checking for netif_queue_stopped(). Without the
2419 * memory barrier, there is a small possibility that bnx2_start_xmit()
2420 * will miss it and cause the queue to be stopped forever.
2421 */
2422 smp_mb();
b6016b76 2423
2f8af120 2424 if (unlikely(netif_queue_stopped(bp->dev)) &&
a550c99b 2425 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2f8af120 2426 netif_tx_lock(bp->dev);
b6016b76 2427 if ((netif_queue_stopped(bp->dev)) &&
a550c99b 2428 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
b6016b76 2429 netif_wake_queue(bp->dev);
2f8af120 2430 netif_tx_unlock(bp->dev);
b6016b76 2431 }
b6016b76
MC
2432}
2433
1db82f2a
MC
2434static void
2435bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2436{
2437 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2438 struct rx_bd *cons_bd, *prod_bd;
2439 dma_addr_t mapping;
2440 int i;
2441 u16 hw_prod = bp->rx_pg_prod, prod;
2442 u16 cons = bp->rx_pg_cons;
2443
2444 for (i = 0; i < count; i++) {
2445 prod = RX_PG_RING_IDX(hw_prod);
2446
2447 prod_rx_pg = &bp->rx_pg_ring[prod];
2448 cons_rx_pg = &bp->rx_pg_ring[cons];
2449 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2450 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2451
2452 if (i == 0 && skb) {
2453 struct page *page;
2454 struct skb_shared_info *shinfo;
2455
2456 shinfo = skb_shinfo(skb);
2457 shinfo->nr_frags--;
2458 page = shinfo->frags[shinfo->nr_frags].page;
2459 shinfo->frags[shinfo->nr_frags].page = NULL;
2460 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2461 PCI_DMA_FROMDEVICE);
2462 cons_rx_pg->page = page;
2463 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2464 dev_kfree_skb(skb);
2465 }
2466 if (prod != cons) {
2467 prod_rx_pg->page = cons_rx_pg->page;
2468 cons_rx_pg->page = NULL;
2469 pci_unmap_addr_set(prod_rx_pg, mapping,
2470 pci_unmap_addr(cons_rx_pg, mapping));
2471
2472 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2473 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2474
2475 }
2476 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2477 hw_prod = NEXT_RX_BD(hw_prod);
2478 }
2479 bp->rx_pg_prod = hw_prod;
2480 bp->rx_pg_cons = cons;
2481}
2482
b6016b76
MC
2483static inline void
2484bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2485 u16 cons, u16 prod)
2486{
236b6394
MC
2487 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2488 struct rx_bd *cons_bd, *prod_bd;
2489
2490 cons_rx_buf = &bp->rx_buf_ring[cons];
2491 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
2492
2493 pci_dma_sync_single_for_device(bp->pdev,
2494 pci_unmap_addr(cons_rx_buf, mapping),
2495 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2496
236b6394 2497 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2498
236b6394 2499 prod_rx_buf->skb = skb;
b6016b76 2500
236b6394
MC
2501 if (cons == prod)
2502 return;
b6016b76 2503
236b6394
MC
2504 pci_unmap_addr_set(prod_rx_buf, mapping,
2505 pci_unmap_addr(cons_rx_buf, mapping));
2506
3fdfcc2c
MC
2507 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2508 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2509 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2510 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2511}
2512
85833c62
MC
2513static int
2514bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
1db82f2a 2515 unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
85833c62
MC
2516{
2517 int err;
2518 u16 prod = ring_idx & 0xffff;
2519
2520 err = bnx2_alloc_rx_skb(bp, prod);
2521 if (unlikely(err)) {
2522 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
2523 if (hdr_len) {
2524 unsigned int raw_len = len + 4;
2525 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2526
2527 bnx2_reuse_rx_skb_pages(bp, NULL, pages);
2528 }
85833c62
MC
2529 return err;
2530 }
2531
2532 skb_reserve(skb, bp->rx_offset);
2533 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2534 PCI_DMA_FROMDEVICE);
2535
1db82f2a
MC
2536 if (hdr_len == 0) {
2537 skb_put(skb, len);
2538 return 0;
2539 } else {
2540 unsigned int i, frag_len, frag_size, pages;
2541 struct sw_pg *rx_pg;
2542 u16 pg_cons = bp->rx_pg_cons;
2543 u16 pg_prod = bp->rx_pg_prod;
2544
2545 frag_size = len + 4 - hdr_len;
2546 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2547 skb_put(skb, hdr_len);
2548
2549 for (i = 0; i < pages; i++) {
2550 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2551 if (unlikely(frag_len <= 4)) {
2552 unsigned int tail = 4 - frag_len;
2553
2554 bp->rx_pg_cons = pg_cons;
2555 bp->rx_pg_prod = pg_prod;
2556 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
2557 skb->len -= tail;
2558 if (i == 0) {
2559 skb->tail -= tail;
2560 } else {
2561 skb_frag_t *frag =
2562 &skb_shinfo(skb)->frags[i - 1];
2563 frag->size -= tail;
2564 skb->data_len -= tail;
2565 skb->truesize -= tail;
2566 }
2567 return 0;
2568 }
2569 rx_pg = &bp->rx_pg_ring[pg_cons];
2570
2571 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2572 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2573
2574 if (i == pages - 1)
2575 frag_len -= 4;
2576
2577 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2578 rx_pg->page = NULL;
2579
2580 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2581 if (unlikely(err)) {
2582 bp->rx_pg_cons = pg_cons;
2583 bp->rx_pg_prod = pg_prod;
2584 bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
2585 return err;
2586 }
2587
2588 frag_size -= frag_len;
2589 skb->data_len += frag_len;
2590 skb->truesize += frag_len;
2591 skb->len += frag_len;
2592
2593 pg_prod = NEXT_RX_BD(pg_prod);
2594 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2595 }
2596 bp->rx_pg_prod = pg_prod;
2597 bp->rx_pg_cons = pg_cons;
2598 }
85833c62
MC
2599 return 0;
2600}
2601
c09c2627 2602static inline u16
35efa7c1 2603bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 2604{
35efa7c1 2605 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
c09c2627
MC
2606
2607 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2608 cons++;
2609 return cons;
2610}
2611
b6016b76 2612static int
35efa7c1 2613bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76
MC
2614{
2615 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2616 struct l2_fhdr *rx_hdr;
1db82f2a 2617 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 2618
35efa7c1 2619 hw_cons = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
2620 sw_cons = bp->rx_cons;
2621 sw_prod = bp->rx_prod;
2622
2623 /* Memory barrier necessary as speculative reads of the rx
2624 * buffer can be ahead of the index in the status block
2625 */
2626 rmb();
2627 while (sw_cons != hw_cons) {
1db82f2a 2628 unsigned int len, hdr_len;
ade2bfe7 2629 u32 status;
b6016b76
MC
2630 struct sw_bd *rx_buf;
2631 struct sk_buff *skb;
236b6394 2632 dma_addr_t dma_addr;
b6016b76
MC
2633
2634 sw_ring_cons = RX_RING_IDX(sw_cons);
2635 sw_ring_prod = RX_RING_IDX(sw_prod);
2636
2637 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2638 skb = rx_buf->skb;
236b6394
MC
2639
2640 rx_buf->skb = NULL;
2641
2642 dma_addr = pci_unmap_addr(rx_buf, mapping);
2643
2644 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
2645 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2646
2647 rx_hdr = (struct l2_fhdr *) skb->data;
1db82f2a 2648 len = rx_hdr->l2_fhdr_pkt_len;
b6016b76 2649
ade2bfe7 2650 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2651 (L2_FHDR_ERRORS_BAD_CRC |
2652 L2_FHDR_ERRORS_PHY_DECODE |
2653 L2_FHDR_ERRORS_ALIGNMENT |
2654 L2_FHDR_ERRORS_TOO_SHORT |
2655 L2_FHDR_ERRORS_GIANT_FRAME)) {
2656
85833c62
MC
2657 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2658 goto next_rx;
b6016b76 2659 }
1db82f2a
MC
2660 hdr_len = 0;
2661 if (status & L2_FHDR_STATUS_SPLIT) {
2662 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2663 pg_ring_used = 1;
2664 } else if (len > bp->rx_jumbo_thresh) {
2665 hdr_len = bp->rx_jumbo_thresh;
2666 pg_ring_used = 1;
2667 }
2668
2669 len -= 4;
b6016b76 2670
5d5d0015 2671 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
2672 struct sk_buff *new_skb;
2673
932f3772 2674 new_skb = netdev_alloc_skb(bp->dev, len + 2);
85833c62
MC
2675 if (new_skb == NULL) {
2676 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2677 sw_ring_prod);
2678 goto next_rx;
2679 }
b6016b76
MC
2680
2681 /* aligned copy */
d626f62b
ACM
2682 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2683 new_skb->data, len + 2);
b6016b76
MC
2684 skb_reserve(new_skb, 2);
2685 skb_put(new_skb, len);
b6016b76
MC
2686
2687 bnx2_reuse_rx_skb(bp, skb,
2688 sw_ring_cons, sw_ring_prod);
2689
2690 skb = new_skb;
1db82f2a 2691 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
85833c62 2692 (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 2693 goto next_rx;
b6016b76
MC
2694
2695 skb->protocol = eth_type_trans(skb, bp->dev);
2696
2697 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2698 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2699
745720e5 2700 dev_kfree_skb(skb);
b6016b76
MC
2701 goto next_rx;
2702
2703 }
2704
b6016b76
MC
2705 skb->ip_summed = CHECKSUM_NONE;
2706 if (bp->rx_csum &&
2707 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2708 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2709
ade2bfe7
MC
2710 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2711 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
2712 skb->ip_summed = CHECKSUM_UNNECESSARY;
2713 }
2714
2715#ifdef BCM_VLAN
2716 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2717 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2718 rx_hdr->l2_fhdr_vlan_tag);
2719 }
2720 else
2721#endif
2722 netif_receive_skb(skb);
2723
2724 bp->dev->last_rx = jiffies;
2725 rx_pkt++;
2726
2727next_rx:
b6016b76
MC
2728 sw_cons = NEXT_RX_BD(sw_cons);
2729 sw_prod = NEXT_RX_BD(sw_prod);
2730
2731 if ((rx_pkt == budget))
2732 break;
f4e418f7
MC
2733
2734 /* Refresh hw_cons to see if there is new work */
2735 if (sw_cons == hw_cons) {
35efa7c1 2736 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
2737 rmb();
2738 }
b6016b76
MC
2739 }
2740 bp->rx_cons = sw_cons;
2741 bp->rx_prod = sw_prod;
2742
1db82f2a
MC
2743 if (pg_ring_used)
2744 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2745 bp->rx_pg_prod);
2746
b6016b76
MC
2747 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2748
2749 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2750
2751 mmiowb();
2752
2753 return rx_pkt;
2754
2755}
2756
2757/* MSI ISR - The only difference between this and the INTx ISR
2758 * is that the MSI interrupt is always serviced.
2759 */
2760static irqreturn_t
7d12e780 2761bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
2762{
2763 struct net_device *dev = dev_instance;
972ec0d4 2764 struct bnx2 *bp = netdev_priv(dev);
35efa7c1 2765 struct bnx2_napi *bnapi = &bp->bnx2_napi;
b6016b76 2766
35efa7c1 2767 prefetch(bnapi->status_blk);
b6016b76
MC
2768 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2769 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2770 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2771
2772 /* Return here if interrupt is disabled. */
73eef4cd
MC
2773 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2774 return IRQ_HANDLED;
b6016b76 2775
35efa7c1 2776 netif_rx_schedule(dev, &bnapi->napi);
b6016b76 2777
73eef4cd 2778 return IRQ_HANDLED;
b6016b76
MC
2779}
2780
8e6a72c4
MC
2781static irqreturn_t
2782bnx2_msi_1shot(int irq, void *dev_instance)
2783{
2784 struct net_device *dev = dev_instance;
2785 struct bnx2 *bp = netdev_priv(dev);
35efa7c1 2786 struct bnx2_napi *bnapi = &bp->bnx2_napi;
8e6a72c4 2787
35efa7c1 2788 prefetch(bnapi->status_blk);
8e6a72c4
MC
2789
2790 /* Return here if interrupt is disabled. */
2791 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2792 return IRQ_HANDLED;
2793
35efa7c1 2794 netif_rx_schedule(dev, &bnapi->napi);
8e6a72c4
MC
2795
2796 return IRQ_HANDLED;
2797}
2798
b6016b76 2799static irqreturn_t
7d12e780 2800bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
2801{
2802 struct net_device *dev = dev_instance;
972ec0d4 2803 struct bnx2 *bp = netdev_priv(dev);
35efa7c1
MC
2804 struct bnx2_napi *bnapi = &bp->bnx2_napi;
2805 struct status_block *sblk = bnapi->status_blk;
b6016b76
MC
2806
2807 /* When using INTx, it is possible for the interrupt to arrive
2808 * at the CPU before the status block posted prior to the
2809 * interrupt. Reading a register will flush the status block.
2810 * When using MSI, the MSI message will always complete after
2811 * the status block write.
2812 */
35efa7c1 2813 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
2814 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2815 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 2816 return IRQ_NONE;
b6016b76
MC
2817
2818 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2819 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2820 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2821
b8a7ce7b
MC
2822 /* Read back to deassert IRQ immediately to avoid too many
2823 * spurious interrupts.
2824 */
2825 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2826
b6016b76 2827 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
2828 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2829 return IRQ_HANDLED;
b6016b76 2830
35efa7c1
MC
2831 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2832 bnapi->last_status_idx = sblk->status_idx;
2833 __netif_rx_schedule(dev, &bnapi->napi);
b8a7ce7b 2834 }
b6016b76 2835
73eef4cd 2836 return IRQ_HANDLED;
b6016b76
MC
2837}
2838
0d8a6571
MC
2839#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2840 STATUS_ATTN_BITS_TIMER_ABORT)
da3e4fbe 2841
f4e418f7 2842static inline int
35efa7c1 2843bnx2_has_work(struct bnx2_napi *bnapi)
f4e418f7 2844{
35efa7c1 2845 struct bnx2 *bp = bnapi->bp;
f4e418f7
MC
2846 struct status_block *sblk = bp->status_blk;
2847
35efa7c1 2848 if ((bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) ||
a550c99b 2849 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
f4e418f7
MC
2850 return 1;
2851
da3e4fbe
MC
2852 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2853 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
2854 return 1;
2855
2856 return 0;
2857}
2858
35efa7c1
MC
2859static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2860 int work_done, int budget)
b6016b76 2861{
35efa7c1 2862 struct status_block *sblk = bnapi->status_blk;
da3e4fbe
MC
2863 u32 status_attn_bits = sblk->status_attn_bits;
2864 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 2865
da3e4fbe
MC
2866 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2867 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 2868
35efa7c1 2869 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
2870
2871 /* This is needed to take care of transient status
2872 * during link changes.
2873 */
2874 REG_WR(bp, BNX2_HC_COMMAND,
2875 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2876 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
2877 }
2878
a550c99b 2879 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
35efa7c1 2880 bnx2_tx_int(bp, bnapi);
b6016b76 2881
35efa7c1
MC
2882 if (bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons)
2883 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 2884
6f535763
DM
2885 return work_done;
2886}
2887
2888static int bnx2_poll(struct napi_struct *napi, int budget)
2889{
35efa7c1
MC
2890 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2891 struct bnx2 *bp = bnapi->bp;
6f535763 2892 int work_done = 0;
35efa7c1 2893 struct status_block *sblk = bnapi->status_blk;
6f535763
DM
2894
2895 while (1) {
35efa7c1 2896 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 2897
6f535763
DM
2898 if (unlikely(work_done >= budget))
2899 break;
2900
35efa7c1 2901 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
2902 * much work has been processed, so we must read it before
2903 * checking for more work.
2904 */
35efa7c1 2905 bnapi->last_status_idx = sblk->status_idx;
6dee6421 2906 rmb();
35efa7c1 2907 if (likely(!bnx2_has_work(bnapi))) {
6f535763
DM
2908 netif_rx_complete(bp->dev, napi);
2909 if (likely(bp->flags & USING_MSI_FLAG)) {
2910 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2911 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 2912 bnapi->last_status_idx);
6dee6421 2913 break;
6f535763 2914 }
1269a8a6
MC
2915 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2916 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 2917 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 2918 bnapi->last_status_idx);
1269a8a6 2919
6f535763
DM
2920 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2921 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 2922 bnapi->last_status_idx);
6f535763
DM
2923 break;
2924 }
b6016b76
MC
2925 }
2926
bea3348e 2927 return work_done;
b6016b76
MC
2928}
2929
932ff279 2930/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2931 * from set_multicast.
2932 */
2933static void
2934bnx2_set_rx_mode(struct net_device *dev)
2935{
972ec0d4 2936 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2937 u32 rx_mode, sort_mode;
2938 int i;
b6016b76 2939
c770a65c 2940 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2941
2942 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2943 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2944 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2945#ifdef BCM_VLAN
e29054f9 2946 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2947 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2948#else
e29054f9
MC
2949 if (!(bp->flags & ASF_ENABLE_FLAG))
2950 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2951#endif
2952 if (dev->flags & IFF_PROMISC) {
2953 /* Promiscuous mode. */
2954 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2955 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2956 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2957 }
2958 else if (dev->flags & IFF_ALLMULTI) {
2959 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2960 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2961 0xffffffff);
2962 }
2963 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2964 }
2965 else {
2966 /* Accept one or more multicast(s). */
2967 struct dev_mc_list *mclist;
2968 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2969 u32 regidx;
2970 u32 bit;
2971 u32 crc;
2972
2973 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2974
2975 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2976 i++, mclist = mclist->next) {
2977
2978 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2979 bit = crc & 0xff;
2980 regidx = (bit & 0xe0) >> 5;
2981 bit &= 0x1f;
2982 mc_filter[regidx] |= (1 << bit);
2983 }
2984
2985 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2986 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2987 mc_filter[i]);
2988 }
2989
2990 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2991 }
2992
2993 if (rx_mode != bp->rx_mode) {
2994 bp->rx_mode = rx_mode;
2995 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2996 }
2997
2998 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2999 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3000 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3001
c770a65c 3002 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3003}
3004
3005static void
3006load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3007 u32 rv2p_proc)
3008{
3009 int i;
3010 u32 val;
3011
3012
3013 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 3014 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 3015 rv2p_code++;
fba9fe91 3016 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
3017 rv2p_code++;
3018
3019 if (rv2p_proc == RV2P_PROC1) {
3020 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3021 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3022 }
3023 else {
3024 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3025 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3026 }
3027 }
3028
3029 /* Reset the processor, un-stall is done later. */
3030 if (rv2p_proc == RV2P_PROC1) {
3031 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3032 }
3033 else {
3034 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3035 }
3036}
3037
af3ee519 3038static int
b6016b76
MC
3039load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3040{
3041 u32 offset;
3042 u32 val;
af3ee519 3043 int rc;
b6016b76
MC
3044
3045 /* Halt the CPU. */
3046 val = REG_RD_IND(bp, cpu_reg->mode);
3047 val |= cpu_reg->mode_value_halt;
3048 REG_WR_IND(bp, cpu_reg->mode, val);
3049 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3050
3051 /* Load the Text area. */
3052 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519 3053 if (fw->gz_text) {
b6016b76
MC
3054 int j;
3055
ea1f8d5c
MC
3056 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3057 fw->gz_text_len);
3058 if (rc < 0)
b3448b0b 3059 return rc;
ea1f8d5c 3060
b6016b76 3061 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
ea1f8d5c 3062 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
3063 }
3064 }
3065
3066 /* Load the Data area. */
3067 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3068 if (fw->data) {
3069 int j;
3070
3071 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3072 REG_WR_IND(bp, offset, fw->data[j]);
3073 }
3074 }
3075
3076 /* Load the SBSS area. */
3077 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
ea1f8d5c 3078 if (fw->sbss_len) {
b6016b76
MC
3079 int j;
3080
3081 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
ea1f8d5c 3082 REG_WR_IND(bp, offset, 0);
b6016b76
MC
3083 }
3084 }
3085
3086 /* Load the BSS area. */
3087 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
ea1f8d5c 3088 if (fw->bss_len) {
b6016b76
MC
3089 int j;
3090
3091 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
ea1f8d5c 3092 REG_WR_IND(bp, offset, 0);
b6016b76
MC
3093 }
3094 }
3095
3096 /* Load the Read-Only area. */
3097 offset = cpu_reg->spad_base +
3098 (fw->rodata_addr - cpu_reg->mips_view_base);
3099 if (fw->rodata) {
3100 int j;
3101
3102 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3103 REG_WR_IND(bp, offset, fw->rodata[j]);
3104 }
3105 }
3106
3107 /* Clear the pre-fetch instruction. */
3108 REG_WR_IND(bp, cpu_reg->inst, 0);
3109 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3110
3111 /* Start the CPU. */
3112 val = REG_RD_IND(bp, cpu_reg->mode);
3113 val &= ~cpu_reg->mode_value_halt;
3114 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3115 REG_WR_IND(bp, cpu_reg->mode, val);
af3ee519
MC
3116
3117 return 0;
b6016b76
MC
3118}
3119
fba9fe91 3120static int
b6016b76
MC
3121bnx2_init_cpus(struct bnx2 *bp)
3122{
3123 struct cpu_reg cpu_reg;
af3ee519 3124 struct fw_info *fw;
110d0ef9
MC
3125 int rc, rv2p_len;
3126 void *text, *rv2p;
b6016b76
MC
3127
3128 /* Initialize the RV2P processor. */
b3448b0b
DV
3129 text = vmalloc(FW_BUF_SIZE);
3130 if (!text)
3131 return -ENOMEM;
110d0ef9
MC
3132 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3133 rv2p = bnx2_xi_rv2p_proc1;
3134 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3135 } else {
3136 rv2p = bnx2_rv2p_proc1;
3137 rv2p_len = sizeof(bnx2_rv2p_proc1);
3138 }
3139 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
ea1f8d5c 3140 if (rc < 0)
fba9fe91 3141 goto init_cpu_err;
ea1f8d5c 3142
b3448b0b 3143 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
fba9fe91 3144
110d0ef9
MC
3145 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3146 rv2p = bnx2_xi_rv2p_proc2;
3147 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3148 } else {
3149 rv2p = bnx2_rv2p_proc2;
3150 rv2p_len = sizeof(bnx2_rv2p_proc2);
3151 }
3152 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
ea1f8d5c 3153 if (rc < 0)
fba9fe91 3154 goto init_cpu_err;
ea1f8d5c 3155
b3448b0b 3156 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
b6016b76
MC
3157
3158 /* Initialize the RX Processor. */
3159 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3160 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3161 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3162 cpu_reg.state = BNX2_RXP_CPU_STATE;
3163 cpu_reg.state_value_clear = 0xffffff;
3164 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3165 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3166 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3167 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3168 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3169 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3170 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3171
d43584c8
MC
3172 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3173 fw = &bnx2_rxp_fw_09;
3174 else
3175 fw = &bnx2_rxp_fw_06;
fba9fe91 3176
ea1f8d5c 3177 fw->text = text;
af3ee519 3178 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3179 if (rc)
3180 goto init_cpu_err;
3181
b6016b76
MC
3182 /* Initialize the TX Processor. */
3183 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3184 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3185 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3186 cpu_reg.state = BNX2_TXP_CPU_STATE;
3187 cpu_reg.state_value_clear = 0xffffff;
3188 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3189 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3190 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3191 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3192 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3193 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3194 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3195
d43584c8
MC
3196 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3197 fw = &bnx2_txp_fw_09;
3198 else
3199 fw = &bnx2_txp_fw_06;
fba9fe91 3200
ea1f8d5c 3201 fw->text = text;
af3ee519 3202 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3203 if (rc)
3204 goto init_cpu_err;
3205
b6016b76
MC
3206 /* Initialize the TX Patch-up Processor. */
3207 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3208 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3209 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3210 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3211 cpu_reg.state_value_clear = 0xffffff;
3212 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3213 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3214 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3215 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3216 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3217 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3218 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3219
d43584c8
MC
3220 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3221 fw = &bnx2_tpat_fw_09;
3222 else
3223 fw = &bnx2_tpat_fw_06;
fba9fe91 3224
ea1f8d5c 3225 fw->text = text;
af3ee519 3226 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3227 if (rc)
3228 goto init_cpu_err;
3229
b6016b76
MC
3230 /* Initialize the Completion Processor. */
3231 cpu_reg.mode = BNX2_COM_CPU_MODE;
3232 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3233 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3234 cpu_reg.state = BNX2_COM_CPU_STATE;
3235 cpu_reg.state_value_clear = 0xffffff;
3236 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3237 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3238 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3239 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3240 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3241 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3242 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3243
d43584c8
MC
3244 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3245 fw = &bnx2_com_fw_09;
3246 else
3247 fw = &bnx2_com_fw_06;
fba9fe91 3248
ea1f8d5c 3249 fw->text = text;
af3ee519 3250 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3251 if (rc)
3252 goto init_cpu_err;
3253
d43584c8
MC
3254 /* Initialize the Command Processor. */
3255 cpu_reg.mode = BNX2_CP_CPU_MODE;
3256 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3257 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3258 cpu_reg.state = BNX2_CP_CPU_STATE;
3259 cpu_reg.state_value_clear = 0xffffff;
3260 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3261 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3262 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3263 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3264 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3265 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3266 cpu_reg.mips_view_base = 0x8000000;
b6016b76 3267
110d0ef9 3268 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d43584c8 3269 fw = &bnx2_cp_fw_09;
110d0ef9
MC
3270 else
3271 fw = &bnx2_cp_fw_06;
3272
3273 fw->text = text;
3274 rc = load_cpu_fw(bp, &cpu_reg, fw);
b6016b76 3275
fba9fe91 3276init_cpu_err:
ea1f8d5c 3277 vfree(text);
fba9fe91 3278 return rc;
b6016b76
MC
3279}
3280
3281static int
829ca9a3 3282bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3283{
3284 u16 pmcsr;
3285
3286 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3287
3288 switch (state) {
829ca9a3 3289 case PCI_D0: {
b6016b76
MC
3290 u32 val;
3291
3292 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3293 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3294 PCI_PM_CTRL_PME_STATUS);
3295
3296 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3297 /* delay required during transition out of D3hot */
3298 msleep(20);
3299
3300 val = REG_RD(bp, BNX2_EMAC_MODE);
3301 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3302 val &= ~BNX2_EMAC_MODE_MPKT;
3303 REG_WR(bp, BNX2_EMAC_MODE, val);
3304
3305 val = REG_RD(bp, BNX2_RPM_CONFIG);
3306 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3307 REG_WR(bp, BNX2_RPM_CONFIG, val);
3308 break;
3309 }
829ca9a3 3310 case PCI_D3hot: {
b6016b76
MC
3311 int i;
3312 u32 val, wol_msg;
3313
3314 if (bp->wol) {
3315 u32 advertising;
3316 u8 autoneg;
3317
3318 autoneg = bp->autoneg;
3319 advertising = bp->advertising;
3320
239cd343
MC
3321 if (bp->phy_port == PORT_TP) {
3322 bp->autoneg = AUTONEG_SPEED;
3323 bp->advertising = ADVERTISED_10baseT_Half |
3324 ADVERTISED_10baseT_Full |
3325 ADVERTISED_100baseT_Half |
3326 ADVERTISED_100baseT_Full |
3327 ADVERTISED_Autoneg;
3328 }
b6016b76 3329
239cd343
MC
3330 spin_lock_bh(&bp->phy_lock);
3331 bnx2_setup_phy(bp, bp->phy_port);
3332 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3333
3334 bp->autoneg = autoneg;
3335 bp->advertising = advertising;
3336
3337 bnx2_set_mac_addr(bp);
3338
3339 val = REG_RD(bp, BNX2_EMAC_MODE);
3340
3341 /* Enable port mode. */
3342 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3343 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3344 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3345 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3346 if (bp->phy_port == PORT_TP)
3347 val |= BNX2_EMAC_MODE_PORT_MII;
3348 else {
3349 val |= BNX2_EMAC_MODE_PORT_GMII;
3350 if (bp->line_speed == SPEED_2500)
3351 val |= BNX2_EMAC_MODE_25G_MODE;
3352 }
b6016b76
MC
3353
3354 REG_WR(bp, BNX2_EMAC_MODE, val);
3355
3356 /* receive all multicast */
3357 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3358 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3359 0xffffffff);
3360 }
3361 REG_WR(bp, BNX2_EMAC_RX_MODE,
3362 BNX2_EMAC_RX_MODE_SORT_MODE);
3363
3364 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3365 BNX2_RPM_SORT_USER0_MC_EN;
3366 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3367 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3368 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3369 BNX2_RPM_SORT_USER0_ENA);
3370
3371 /* Need to enable EMAC and RPM for WOL. */
3372 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3373 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3374 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3375 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3376
3377 val = REG_RD(bp, BNX2_RPM_CONFIG);
3378 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3379 REG_WR(bp, BNX2_RPM_CONFIG, val);
3380
3381 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3382 }
3383 else {
3384 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3385 }
3386
dda1e390
MC
3387 if (!(bp->flags & NO_WOL_FLAG))
3388 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
3389
3390 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3391 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3392 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3393
3394 if (bp->wol)
3395 pmcsr |= 3;
3396 }
3397 else {
3398 pmcsr |= 3;
3399 }
3400 if (bp->wol) {
3401 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3402 }
3403 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3404 pmcsr);
3405
3406 /* No more memory access after this point until
3407 * device is brought back to D0.
3408 */
3409 udelay(50);
3410 break;
3411 }
3412 default:
3413 return -EINVAL;
3414 }
3415 return 0;
3416}
3417
3418static int
3419bnx2_acquire_nvram_lock(struct bnx2 *bp)
3420{
3421 u32 val;
3422 int j;
3423
3424 /* Request access to the flash interface. */
3425 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3426 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3427 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3428 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3429 break;
3430
3431 udelay(5);
3432 }
3433
3434 if (j >= NVRAM_TIMEOUT_COUNT)
3435 return -EBUSY;
3436
3437 return 0;
3438}
3439
3440static int
3441bnx2_release_nvram_lock(struct bnx2 *bp)
3442{
3443 int j;
3444 u32 val;
3445
3446 /* Relinquish nvram interface. */
3447 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3448
3449 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3450 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3451 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3452 break;
3453
3454 udelay(5);
3455 }
3456
3457 if (j >= NVRAM_TIMEOUT_COUNT)
3458 return -EBUSY;
3459
3460 return 0;
3461}
3462
3463
3464static int
3465bnx2_enable_nvram_write(struct bnx2 *bp)
3466{
3467 u32 val;
3468
3469 val = REG_RD(bp, BNX2_MISC_CFG);
3470 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3471
e30372c9 3472 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
3473 int j;
3474
3475 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3476 REG_WR(bp, BNX2_NVM_COMMAND,
3477 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3478
3479 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3480 udelay(5);
3481
3482 val = REG_RD(bp, BNX2_NVM_COMMAND);
3483 if (val & BNX2_NVM_COMMAND_DONE)
3484 break;
3485 }
3486
3487 if (j >= NVRAM_TIMEOUT_COUNT)
3488 return -EBUSY;
3489 }
3490 return 0;
3491}
3492
3493static void
3494bnx2_disable_nvram_write(struct bnx2 *bp)
3495{
3496 u32 val;
3497
3498 val = REG_RD(bp, BNX2_MISC_CFG);
3499 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3500}
3501
3502
3503static void
3504bnx2_enable_nvram_access(struct bnx2 *bp)
3505{
3506 u32 val;
3507
3508 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3509 /* Enable both bits, even on read. */
6aa20a22 3510 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3511 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3512}
3513
3514static void
3515bnx2_disable_nvram_access(struct bnx2 *bp)
3516{
3517 u32 val;
3518
3519 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3520 /* Disable both bits, even after read. */
6aa20a22 3521 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3522 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3523 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3524}
3525
3526static int
3527bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3528{
3529 u32 cmd;
3530 int j;
3531
e30372c9 3532 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
3533 /* Buffered flash, no erase needed */
3534 return 0;
3535
3536 /* Build an erase command */
3537 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3538 BNX2_NVM_COMMAND_DOIT;
3539
3540 /* Need to clear DONE bit separately. */
3541 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3542
3543 /* Address of the NVRAM to read from. */
3544 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3545
3546 /* Issue an erase command. */
3547 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3548
3549 /* Wait for completion. */
3550 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3551 u32 val;
3552
3553 udelay(5);
3554
3555 val = REG_RD(bp, BNX2_NVM_COMMAND);
3556 if (val & BNX2_NVM_COMMAND_DONE)
3557 break;
3558 }
3559
3560 if (j >= NVRAM_TIMEOUT_COUNT)
3561 return -EBUSY;
3562
3563 return 0;
3564}
3565
3566static int
3567bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3568{
3569 u32 cmd;
3570 int j;
3571
3572 /* Build the command word. */
3573 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3574
e30372c9
MC
3575 /* Calculate an offset of a buffered flash, not needed for 5709. */
3576 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3577 offset = ((offset / bp->flash_info->page_size) <<
3578 bp->flash_info->page_bits) +
3579 (offset % bp->flash_info->page_size);
3580 }
3581
3582 /* Need to clear DONE bit separately. */
3583 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3584
3585 /* Address of the NVRAM to read from. */
3586 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3587
3588 /* Issue a read command. */
3589 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3590
3591 /* Wait for completion. */
3592 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3593 u32 val;
3594
3595 udelay(5);
3596
3597 val = REG_RD(bp, BNX2_NVM_COMMAND);
3598 if (val & BNX2_NVM_COMMAND_DONE) {
3599 val = REG_RD(bp, BNX2_NVM_READ);
3600
3601 val = be32_to_cpu(val);
3602 memcpy(ret_val, &val, 4);
3603 break;
3604 }
3605 }
3606 if (j >= NVRAM_TIMEOUT_COUNT)
3607 return -EBUSY;
3608
3609 return 0;
3610}
3611
3612
3613static int
3614bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3615{
3616 u32 cmd, val32;
3617 int j;
3618
3619 /* Build the command word. */
3620 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3621
e30372c9
MC
3622 /* Calculate an offset of a buffered flash, not needed for 5709. */
3623 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3624 offset = ((offset / bp->flash_info->page_size) <<
3625 bp->flash_info->page_bits) +
3626 (offset % bp->flash_info->page_size);
3627 }
3628
3629 /* Need to clear DONE bit separately. */
3630 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3631
3632 memcpy(&val32, val, 4);
3633 val32 = cpu_to_be32(val32);
3634
3635 /* Write the data. */
3636 REG_WR(bp, BNX2_NVM_WRITE, val32);
3637
3638 /* Address of the NVRAM to write to. */
3639 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3640
3641 /* Issue the write command. */
3642 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3643
3644 /* Wait for completion. */
3645 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3646 udelay(5);
3647
3648 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3649 break;
3650 }
3651 if (j >= NVRAM_TIMEOUT_COUNT)
3652 return -EBUSY;
3653
3654 return 0;
3655}
3656
3657static int
3658bnx2_init_nvram(struct bnx2 *bp)
3659{
3660 u32 val;
e30372c9 3661 int j, entry_count, rc = 0;
b6016b76
MC
3662 struct flash_spec *flash;
3663
e30372c9
MC
3664 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3665 bp->flash_info = &flash_5709;
3666 goto get_flash_size;
3667 }
3668
b6016b76
MC
3669 /* Determine the selected interface. */
3670 val = REG_RD(bp, BNX2_NVM_CFG1);
3671
ff8ac609 3672 entry_count = ARRAY_SIZE(flash_table);
b6016b76 3673
b6016b76
MC
3674 if (val & 0x40000000) {
3675
3676 /* Flash interface has been reconfigured */
3677 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3678 j++, flash++) {
3679 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3680 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3681 bp->flash_info = flash;
3682 break;
3683 }
3684 }
3685 }
3686 else {
37137709 3687 u32 mask;
b6016b76
MC
3688 /* Not yet been reconfigured */
3689
37137709
MC
3690 if (val & (1 << 23))
3691 mask = FLASH_BACKUP_STRAP_MASK;
3692 else
3693 mask = FLASH_STRAP_MASK;
3694
b6016b76
MC
3695 for (j = 0, flash = &flash_table[0]; j < entry_count;
3696 j++, flash++) {
3697
37137709 3698 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
3699 bp->flash_info = flash;
3700
3701 /* Request access to the flash interface. */
3702 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3703 return rc;
3704
3705 /* Enable access to flash interface */
3706 bnx2_enable_nvram_access(bp);
3707
3708 /* Reconfigure the flash interface */
3709 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3710 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3711 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3712 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3713
3714 /* Disable access to flash interface */
3715 bnx2_disable_nvram_access(bp);
3716 bnx2_release_nvram_lock(bp);
3717
3718 break;
3719 }
3720 }
3721 } /* if (val & 0x40000000) */
3722
3723 if (j == entry_count) {
3724 bp->flash_info = NULL;
2f23c523 3725 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 3726 return -ENODEV;
b6016b76
MC
3727 }
3728
e30372c9 3729get_flash_size:
1122db71
MC
3730 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3731 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3732 if (val)
3733 bp->flash_size = val;
3734 else
3735 bp->flash_size = bp->flash_info->total_size;
3736
b6016b76
MC
3737 return rc;
3738}
3739
3740static int
3741bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3742 int buf_size)
3743{
3744 int rc = 0;
3745 u32 cmd_flags, offset32, len32, extra;
3746
3747 if (buf_size == 0)
3748 return 0;
3749
3750 /* Request access to the flash interface. */
3751 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3752 return rc;
3753
3754 /* Enable access to flash interface */
3755 bnx2_enable_nvram_access(bp);
3756
3757 len32 = buf_size;
3758 offset32 = offset;
3759 extra = 0;
3760
3761 cmd_flags = 0;
3762
3763 if (offset32 & 3) {
3764 u8 buf[4];
3765 u32 pre_len;
3766
3767 offset32 &= ~3;
3768 pre_len = 4 - (offset & 3);
3769
3770 if (pre_len >= len32) {
3771 pre_len = len32;
3772 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3773 BNX2_NVM_COMMAND_LAST;
3774 }
3775 else {
3776 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3777 }
3778
3779 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3780
3781 if (rc)
3782 return rc;
3783
3784 memcpy(ret_buf, buf + (offset & 3), pre_len);
3785
3786 offset32 += 4;
3787 ret_buf += pre_len;
3788 len32 -= pre_len;
3789 }
3790 if (len32 & 3) {
3791 extra = 4 - (len32 & 3);
3792 len32 = (len32 + 4) & ~3;
3793 }
3794
3795 if (len32 == 4) {
3796 u8 buf[4];
3797
3798 if (cmd_flags)
3799 cmd_flags = BNX2_NVM_COMMAND_LAST;
3800 else
3801 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3802 BNX2_NVM_COMMAND_LAST;
3803
3804 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3805
3806 memcpy(ret_buf, buf, 4 - extra);
3807 }
3808 else if (len32 > 0) {
3809 u8 buf[4];
3810
3811 /* Read the first word. */
3812 if (cmd_flags)
3813 cmd_flags = 0;
3814 else
3815 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3816
3817 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3818
3819 /* Advance to the next dword. */
3820 offset32 += 4;
3821 ret_buf += 4;
3822 len32 -= 4;
3823
3824 while (len32 > 4 && rc == 0) {
3825 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3826
3827 /* Advance to the next dword. */
3828 offset32 += 4;
3829 ret_buf += 4;
3830 len32 -= 4;
3831 }
3832
3833 if (rc)
3834 return rc;
3835
3836 cmd_flags = BNX2_NVM_COMMAND_LAST;
3837 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3838
3839 memcpy(ret_buf, buf, 4 - extra);
3840 }
3841
3842 /* Disable access to flash interface */
3843 bnx2_disable_nvram_access(bp);
3844
3845 bnx2_release_nvram_lock(bp);
3846
3847 return rc;
3848}
3849
3850static int
3851bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3852 int buf_size)
3853{
3854 u32 written, offset32, len32;
e6be763f 3855 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
3856 int rc = 0;
3857 int align_start, align_end;
3858
3859 buf = data_buf;
3860 offset32 = offset;
3861 len32 = buf_size;
3862 align_start = align_end = 0;
3863
3864 if ((align_start = (offset32 & 3))) {
3865 offset32 &= ~3;
c873879c
MC
3866 len32 += align_start;
3867 if (len32 < 4)
3868 len32 = 4;
b6016b76
MC
3869 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3870 return rc;
3871 }
3872
3873 if (len32 & 3) {
c873879c
MC
3874 align_end = 4 - (len32 & 3);
3875 len32 += align_end;
3876 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3877 return rc;
b6016b76
MC
3878 }
3879
3880 if (align_start || align_end) {
e6be763f
MC
3881 align_buf = kmalloc(len32, GFP_KERNEL);
3882 if (align_buf == NULL)
b6016b76
MC
3883 return -ENOMEM;
3884 if (align_start) {
e6be763f 3885 memcpy(align_buf, start, 4);
b6016b76
MC
3886 }
3887 if (align_end) {
e6be763f 3888 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 3889 }
e6be763f
MC
3890 memcpy(align_buf + align_start, data_buf, buf_size);
3891 buf = align_buf;
b6016b76
MC
3892 }
3893
e30372c9 3894 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
3895 flash_buffer = kmalloc(264, GFP_KERNEL);
3896 if (flash_buffer == NULL) {
3897 rc = -ENOMEM;
3898 goto nvram_write_end;
3899 }
3900 }
3901
b6016b76
MC
3902 written = 0;
3903 while ((written < len32) && (rc == 0)) {
3904 u32 page_start, page_end, data_start, data_end;
3905 u32 addr, cmd_flags;
3906 int i;
b6016b76
MC
3907
3908 /* Find the page_start addr */
3909 page_start = offset32 + written;
3910 page_start -= (page_start % bp->flash_info->page_size);
3911 /* Find the page_end addr */
3912 page_end = page_start + bp->flash_info->page_size;
3913 /* Find the data_start addr */
3914 data_start = (written == 0) ? offset32 : page_start;
3915 /* Find the data_end addr */
6aa20a22 3916 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3917 (offset32 + len32) : page_end;
3918
3919 /* Request access to the flash interface. */
3920 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3921 goto nvram_write_end;
3922
3923 /* Enable access to flash interface */
3924 bnx2_enable_nvram_access(bp);
3925
3926 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 3927 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
3928 int j;
3929
3930 /* Read the whole page into the buffer
3931 * (non-buffer flash only) */
3932 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3933 if (j == (bp->flash_info->page_size - 4)) {
3934 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3935 }
3936 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3937 page_start + j,
3938 &flash_buffer[j],
b6016b76
MC
3939 cmd_flags);
3940
3941 if (rc)
3942 goto nvram_write_end;
3943
3944 cmd_flags = 0;
3945 }
3946 }
3947
3948 /* Enable writes to flash interface (unlock write-protect) */
3949 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3950 goto nvram_write_end;
3951
b6016b76
MC
3952 /* Loop to write back the buffer data from page_start to
3953 * data_start */
3954 i = 0;
e30372c9 3955 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
3956 /* Erase the page */
3957 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3958 goto nvram_write_end;
3959
3960 /* Re-enable the write again for the actual write */
3961 bnx2_enable_nvram_write(bp);
3962
b6016b76
MC
3963 for (addr = page_start; addr < data_start;
3964 addr += 4, i += 4) {
6aa20a22 3965
b6016b76
MC
3966 rc = bnx2_nvram_write_dword(bp, addr,
3967 &flash_buffer[i], cmd_flags);
3968
3969 if (rc != 0)
3970 goto nvram_write_end;
3971
3972 cmd_flags = 0;
3973 }
3974 }
3975
3976 /* Loop to write the new data from data_start to data_end */
bae25761 3977 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 3978 if ((addr == page_end - 4) ||
e30372c9 3979 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
3980 (addr == data_end - 4))) {
3981
3982 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3983 }
3984 rc = bnx2_nvram_write_dword(bp, addr, buf,
3985 cmd_flags);
3986
3987 if (rc != 0)
3988 goto nvram_write_end;
3989
3990 cmd_flags = 0;
3991 buf += 4;
3992 }
3993
3994 /* Loop to write back the buffer data from data_end
3995 * to page_end */
e30372c9 3996 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
3997 for (addr = data_end; addr < page_end;
3998 addr += 4, i += 4) {
6aa20a22 3999
b6016b76
MC
4000 if (addr == page_end-4) {
4001 cmd_flags = BNX2_NVM_COMMAND_LAST;
4002 }
4003 rc = bnx2_nvram_write_dword(bp, addr,
4004 &flash_buffer[i], cmd_flags);
4005
4006 if (rc != 0)
4007 goto nvram_write_end;
4008
4009 cmd_flags = 0;
4010 }
4011 }
4012
4013 /* Disable writes to flash interface (lock write-protect) */
4014 bnx2_disable_nvram_write(bp);
4015
4016 /* Disable access to flash interface */
4017 bnx2_disable_nvram_access(bp);
4018 bnx2_release_nvram_lock(bp);
4019
4020 /* Increment written */
4021 written += data_end - data_start;
4022 }
4023
4024nvram_write_end:
e6be763f
MC
4025 kfree(flash_buffer);
4026 kfree(align_buf);
b6016b76
MC
4027 return rc;
4028}
4029
0d8a6571
MC
4030static void
4031bnx2_init_remote_phy(struct bnx2 *bp)
4032{
4033 u32 val;
4034
4035 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4036 if (!(bp->phy_flags & PHY_SERDES_FLAG))
4037 return;
4038
4039 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4040 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4041 return;
4042
4043 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
0d8a6571
MC
4044 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4045
4046 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4047 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4048 bp->phy_port = PORT_FIBRE;
4049 else
4050 bp->phy_port = PORT_TP;
489310a4
MC
4051
4052 if (netif_running(bp->dev)) {
4053 u32 sig;
4054
4055 if (val & BNX2_LINK_STATUS_LINK_UP) {
4056 bp->link_up = 1;
4057 netif_carrier_on(bp->dev);
4058 } else {
4059 bp->link_up = 0;
4060 netif_carrier_off(bp->dev);
4061 }
4062 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4063 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4064 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4065 sig);
4066 }
0d8a6571
MC
4067 }
4068}
4069
b6016b76
MC
4070static int
4071bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4072{
4073 u32 val;
4074 int i, rc = 0;
489310a4 4075 u8 old_port;
b6016b76
MC
4076
4077 /* Wait for the current PCI transaction to complete before
4078 * issuing a reset. */
4079 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4080 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4081 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4082 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4083 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4084 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4085 udelay(5);
4086
b090ae2b
MC
4087 /* Wait for the firmware to tell us it is ok to issue a reset. */
4088 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4089
b6016b76
MC
4090 /* Deposit a driver reset signature so the firmware knows that
4091 * this is a soft reset. */
e3648b3d 4092 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
4093 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4094
b6016b76
MC
4095 /* Do a dummy read to force the chip to complete all current transaction
4096 * before we issue a reset. */
4097 val = REG_RD(bp, BNX2_MISC_ID);
4098
234754d5
MC
4099 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4100 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4101 REG_RD(bp, BNX2_MISC_COMMAND);
4102 udelay(5);
b6016b76 4103
234754d5
MC
4104 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4105 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4106
234754d5 4107 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4108
234754d5
MC
4109 } else {
4110 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4111 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4112 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4113
4114 /* Chip reset. */
4115 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4116
594a9dfa
MC
4117 /* Reading back any register after chip reset will hang the
4118 * bus on 5706 A0 and A1. The msleep below provides plenty
4119 * of margin for write posting.
4120 */
234754d5 4121 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4122 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4123 msleep(20);
b6016b76 4124
234754d5
MC
4125 /* Reset takes approximate 30 usec */
4126 for (i = 0; i < 10; i++) {
4127 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4128 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4129 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4130 break;
4131 udelay(10);
4132 }
4133
4134 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4135 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4136 printk(KERN_ERR PFX "Chip reset did not complete\n");
4137 return -EBUSY;
4138 }
b6016b76
MC
4139 }
4140
4141 /* Make sure byte swapping is properly configured. */
4142 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4143 if (val != 0x01020304) {
4144 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4145 return -ENODEV;
4146 }
4147
b6016b76 4148 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
4149 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4150 if (rc)
4151 return rc;
b6016b76 4152
0d8a6571 4153 spin_lock_bh(&bp->phy_lock);
489310a4 4154 old_port = bp->phy_port;
0d8a6571 4155 bnx2_init_remote_phy(bp);
489310a4 4156 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
0d8a6571
MC
4157 bnx2_set_default_remote_link(bp);
4158 spin_unlock_bh(&bp->phy_lock);
4159
b6016b76
MC
4160 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4161 /* Adjust the voltage regular to two steps lower. The default
4162 * of this register is 0x0000000e. */
4163 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4164
4165 /* Remove bad rbuf memory from the free pool. */
4166 rc = bnx2_alloc_bad_rbuf(bp);
4167 }
4168
4169 return rc;
4170}
4171
4172static int
4173bnx2_init_chip(struct bnx2 *bp)
4174{
4175 u32 val;
b090ae2b 4176 int rc;
b6016b76
MC
4177
4178 /* Make sure the interrupt is not active. */
4179 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4180
4181 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4182 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4183#ifdef __BIG_ENDIAN
6aa20a22 4184 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4185#endif
6aa20a22 4186 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4187 DMA_READ_CHANS << 12 |
4188 DMA_WRITE_CHANS << 16;
4189
4190 val |= (0x2 << 20) | (1 << 11);
4191
dda1e390 4192 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4193 val |= (1 << 23);
4194
4195 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4196 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4197 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4198
4199 REG_WR(bp, BNX2_DMA_CONFIG, val);
4200
4201 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4202 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4203 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4204 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4205 }
4206
4207 if (bp->flags & PCIX_FLAG) {
4208 u16 val16;
4209
4210 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4211 &val16);
4212 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4213 val16 & ~PCI_X_CMD_ERO);
4214 }
4215
4216 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4217 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4218 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4219 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4220
4221 /* Initialize context mapping and zero out the quick contexts. The
4222 * context block must have already been enabled. */
641bdcd5
MC
4223 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4224 rc = bnx2_init_5709_context(bp);
4225 if (rc)
4226 return rc;
4227 } else
59b47d8a 4228 bnx2_init_context(bp);
b6016b76 4229
fba9fe91
MC
4230 if ((rc = bnx2_init_cpus(bp)) != 0)
4231 return rc;
4232
b6016b76
MC
4233 bnx2_init_nvram(bp);
4234
4235 bnx2_set_mac_addr(bp);
4236
4237 val = REG_RD(bp, BNX2_MQ_CONFIG);
4238 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4239 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4240 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4241 val |= BNX2_MQ_CONFIG_HALT_DIS;
4242
b6016b76
MC
4243 REG_WR(bp, BNX2_MQ_CONFIG, val);
4244
4245 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4246 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4247 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4248
4249 val = (BCM_PAGE_BITS - 8) << 24;
4250 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4251
4252 /* Configure page size. */
4253 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4254 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4255 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4256 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4257
4258 val = bp->mac_addr[0] +
4259 (bp->mac_addr[1] << 8) +
4260 (bp->mac_addr[2] << 16) +
4261 bp->mac_addr[3] +
4262 (bp->mac_addr[4] << 8) +
4263 (bp->mac_addr[5] << 16);
4264 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4265
4266 /* Program the MTU. Also include 4 bytes for CRC32. */
4267 val = bp->dev->mtu + ETH_HLEN + 4;
4268 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4269 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4270 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4271
35efa7c1 4272 bp->bnx2_napi.last_status_idx = 0;
b6016b76
MC
4273 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4274
4275 /* Set up how to generate a link change interrupt. */
4276 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4277
4278 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4279 (u64) bp->status_blk_mapping & 0xffffffff);
4280 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4281
4282 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4283 (u64) bp->stats_blk_mapping & 0xffffffff);
4284 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4285 (u64) bp->stats_blk_mapping >> 32);
4286
6aa20a22 4287 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4288 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4289
4290 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4291 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4292
4293 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4294 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4295
4296 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4297
4298 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4299
4300 REG_WR(bp, BNX2_HC_COM_TICKS,
4301 (bp->com_ticks_int << 16) | bp->com_ticks);
4302
4303 REG_WR(bp, BNX2_HC_CMD_TICKS,
4304 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4305
02537b06
MC
4306 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4307 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4308 else
7ea6920e 4309 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4310 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4311
4312 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4313 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4314 else {
8e6a72c4
MC
4315 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4316 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4317 }
4318
8e6a72c4
MC
4319 if (bp->flags & ONE_SHOT_MSI_FLAG)
4320 val |= BNX2_HC_CONFIG_ONE_SHOT;
4321
4322 REG_WR(bp, BNX2_HC_CONFIG, val);
4323
b6016b76
MC
4324 /* Clear internal stats counters. */
4325 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4326
da3e4fbe 4327 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4328
4329 /* Initialize the receive filter. */
4330 bnx2_set_rx_mode(bp->dev);
4331
0aa38df7
MC
4332 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4333 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4334 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4335 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4336 }
b090ae2b
MC
4337 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4338 0);
b6016b76 4339
df149d70 4340 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4341 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4342
4343 udelay(20);
4344
bf5295bb
MC
4345 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4346
b090ae2b 4347 return rc;
b6016b76
MC
4348}
4349
59b47d8a
MC
4350static void
4351bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4352{
4353 u32 val, offset0, offset1, offset2, offset3;
4354
4355 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4356 offset0 = BNX2_L2CTX_TYPE_XI;
4357 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4358 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4359 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4360 } else {
4361 offset0 = BNX2_L2CTX_TYPE;
4362 offset1 = BNX2_L2CTX_CMD_TYPE;
4363 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4364 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4365 }
4366 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4367 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4368
4369 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4370 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4371
4372 val = (u64) bp->tx_desc_mapping >> 32;
4373 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4374
4375 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4376 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4377}
b6016b76
MC
4378
4379static void
4380bnx2_init_tx_ring(struct bnx2 *bp)
4381{
4382 struct tx_bd *txbd;
59b47d8a 4383 u32 cid;
a550c99b 4384 struct bnx2_napi *bnapi = &bp->bnx2_napi;
b6016b76 4385
2f8af120
MC
4386 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4387
b6016b76 4388 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4389
b6016b76
MC
4390 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4391 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4392
4393 bp->tx_prod = 0;
a550c99b
MC
4394 bnapi->tx_cons = 0;
4395 bnapi->hw_tx_cons = 0;
b6016b76 4396 bp->tx_prod_bseq = 0;
6aa20a22 4397
59b47d8a
MC
4398 cid = TX_CID;
4399 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4400 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4401
59b47d8a 4402 bnx2_init_tx_context(bp, cid);
b6016b76
MC
4403}
4404
4405static void
5d5d0015
MC
4406bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4407 int num_rings)
b6016b76 4408{
b6016b76 4409 int i;
5d5d0015 4410 struct rx_bd *rxbd;
6aa20a22 4411
5d5d0015 4412 for (i = 0; i < num_rings; i++) {
13daffa2 4413 int j;
b6016b76 4414
5d5d0015 4415 rxbd = &rx_ring[i][0];
13daffa2 4416 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 4417 rxbd->rx_bd_len = buf_size;
13daffa2
MC
4418 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4419 }
5d5d0015 4420 if (i == (num_rings - 1))
13daffa2
MC
4421 j = 0;
4422 else
4423 j = i + 1;
5d5d0015
MC
4424 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4425 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 4426 }
5d5d0015
MC
4427}
4428
4429static void
4430bnx2_init_rx_ring(struct bnx2 *bp)
4431{
4432 int i;
4433 u16 prod, ring_prod;
4434 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4435
4436 bp->rx_prod = 0;
4437 bp->rx_cons = 0;
4438 bp->rx_prod_bseq = 0;
47bf4246
MC
4439 bp->rx_pg_prod = 0;
4440 bp->rx_pg_cons = 0;
5d5d0015
MC
4441
4442 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4443 bp->rx_buf_use_size, bp->rx_max_ring);
4444
4445 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246
MC
4446 if (bp->rx_pg_ring_size) {
4447 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4448 bp->rx_pg_desc_mapping,
4449 PAGE_SIZE, bp->rx_max_pg_ring);
4450 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4451 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4452 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4453 BNX2_L2CTX_RBDC_JUMBO_KEY);
4454
4455 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4456 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4457
4458 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4459 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4460
4461 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4462 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4463 }
b6016b76
MC
4464
4465 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4466 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4467 val |= 0x02 << 8;
5d5d0015 4468 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
b6016b76 4469
13daffa2 4470 val = (u64) bp->rx_desc_mapping[0] >> 32;
5d5d0015 4471 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 4472
13daffa2 4473 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
5d5d0015 4474 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 4475
47bf4246
MC
4476 ring_prod = prod = bp->rx_pg_prod;
4477 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4478 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4479 break;
4480 prod = NEXT_RX_BD(prod);
4481 ring_prod = RX_PG_RING_IDX(prod);
4482 }
4483 bp->rx_pg_prod = prod;
4484
5d5d0015 4485 ring_prod = prod = bp->rx_prod;
236b6394 4486 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
4487 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4488 break;
4489 }
4490 prod = NEXT_RX_BD(prod);
4491 ring_prod = RX_RING_IDX(prod);
4492 }
4493 bp->rx_prod = prod;
4494
47bf4246 4495 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
b6016b76
MC
4496 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4497
4498 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4499}
4500
5d5d0015 4501static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 4502{
5d5d0015 4503 u32 max, num_rings = 1;
13daffa2 4504
5d5d0015
MC
4505 while (ring_size > MAX_RX_DESC_CNT) {
4506 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
4507 num_rings++;
4508 }
4509 /* round to next power of 2 */
5d5d0015 4510 max = max_size;
13daffa2
MC
4511 while ((max & num_rings) == 0)
4512 max >>= 1;
4513
4514 if (num_rings != max)
4515 max <<= 1;
4516
5d5d0015
MC
4517 return max;
4518}
4519
4520static void
4521bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4522{
84eaa187 4523 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
4524
4525 /* 8 for CRC and VLAN */
4526 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4527
84eaa187
MC
4528 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4529 sizeof(struct skb_shared_info);
4530
5d5d0015 4531 bp->rx_copy_thresh = RX_COPY_THRESH;
47bf4246
MC
4532 bp->rx_pg_ring_size = 0;
4533 bp->rx_max_pg_ring = 0;
4534 bp->rx_max_pg_ring_idx = 0;
84eaa187
MC
4535 if (rx_space > PAGE_SIZE) {
4536 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4537
4538 jumbo_size = size * pages;
4539 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4540 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4541
4542 bp->rx_pg_ring_size = jumbo_size;
4543 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4544 MAX_RX_PG_RINGS);
4545 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4546 rx_size = RX_COPY_THRESH + bp->rx_offset;
4547 bp->rx_copy_thresh = 0;
4548 }
5d5d0015
MC
4549
4550 bp->rx_buf_use_size = rx_size;
4551 /* hw alignment */
4552 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
1db82f2a 4553 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
5d5d0015
MC
4554 bp->rx_ring_size = size;
4555 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
4556 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4557}
4558
b6016b76
MC
4559static void
4560bnx2_free_tx_skbs(struct bnx2 *bp)
4561{
4562 int i;
4563
4564 if (bp->tx_buf_ring == NULL)
4565 return;
4566
4567 for (i = 0; i < TX_DESC_CNT; ) {
4568 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4569 struct sk_buff *skb = tx_buf->skb;
4570 int j, last;
4571
4572 if (skb == NULL) {
4573 i++;
4574 continue;
4575 }
4576
4577 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4578 skb_headlen(skb), PCI_DMA_TODEVICE);
4579
4580 tx_buf->skb = NULL;
4581
4582 last = skb_shinfo(skb)->nr_frags;
4583 for (j = 0; j < last; j++) {
4584 tx_buf = &bp->tx_buf_ring[i + j + 1];
4585 pci_unmap_page(bp->pdev,
4586 pci_unmap_addr(tx_buf, mapping),
4587 skb_shinfo(skb)->frags[j].size,
4588 PCI_DMA_TODEVICE);
4589 }
745720e5 4590 dev_kfree_skb(skb);
b6016b76
MC
4591 i += j + 1;
4592 }
4593
4594}
4595
4596static void
4597bnx2_free_rx_skbs(struct bnx2 *bp)
4598{
4599 int i;
4600
4601 if (bp->rx_buf_ring == NULL)
4602 return;
4603
13daffa2 4604 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
4605 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4606 struct sk_buff *skb = rx_buf->skb;
4607
05d0f1cf 4608 if (skb == NULL)
b6016b76
MC
4609 continue;
4610
4611 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4612 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4613
4614 rx_buf->skb = NULL;
4615
745720e5 4616 dev_kfree_skb(skb);
b6016b76 4617 }
47bf4246
MC
4618 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4619 bnx2_free_rx_page(bp, i);
b6016b76
MC
4620}
4621
4622static void
4623bnx2_free_skbs(struct bnx2 *bp)
4624{
4625 bnx2_free_tx_skbs(bp);
4626 bnx2_free_rx_skbs(bp);
4627}
4628
4629static int
4630bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4631{
4632 int rc;
4633
4634 rc = bnx2_reset_chip(bp, reset_code);
4635 bnx2_free_skbs(bp);
4636 if (rc)
4637 return rc;
4638
fba9fe91
MC
4639 if ((rc = bnx2_init_chip(bp)) != 0)
4640 return rc;
4641
b6016b76
MC
4642 bnx2_init_tx_ring(bp);
4643 bnx2_init_rx_ring(bp);
4644 return 0;
4645}
4646
4647static int
4648bnx2_init_nic(struct bnx2 *bp)
4649{
4650 int rc;
4651
4652 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4653 return rc;
4654
80be4434 4655 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4656 bnx2_init_phy(bp);
4657 bnx2_set_link(bp);
0d8a6571 4658 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4659 return 0;
4660}
4661
4662static int
4663bnx2_test_registers(struct bnx2 *bp)
4664{
4665 int ret;
5bae30c9 4666 int i, is_5709;
f71e1309 4667 static const struct {
b6016b76
MC
4668 u16 offset;
4669 u16 flags;
5bae30c9 4670#define BNX2_FL_NOT_5709 1
b6016b76
MC
4671 u32 rw_mask;
4672 u32 ro_mask;
4673 } reg_tbl[] = {
4674 { 0x006c, 0, 0x00000000, 0x0000003f },
4675 { 0x0090, 0, 0xffffffff, 0x00000000 },
4676 { 0x0094, 0, 0x00000000, 0x00000000 },
4677
5bae30c9
MC
4678 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4679 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4680 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4681 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4682 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4683 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4684 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4685 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4686 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4687
4688 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4689 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4690 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4691 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4692 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4693 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4694
4695 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4696 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4697 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
4698
4699 { 0x1000, 0, 0x00000000, 0x00000001 },
4700 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
4701
4702 { 0x1408, 0, 0x01c00800, 0x00000000 },
4703 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4704 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 4705 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
4706 { 0x14b0, 0, 0x00000002, 0x00000001 },
4707 { 0x14b8, 0, 0x00000000, 0x00000000 },
4708 { 0x14c0, 0, 0x00000000, 0x00000009 },
4709 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4710 { 0x14cc, 0, 0x00000000, 0x00000001 },
4711 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
4712
4713 { 0x1800, 0, 0x00000000, 0x00000001 },
4714 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
4715
4716 { 0x2800, 0, 0x00000000, 0x00000001 },
4717 { 0x2804, 0, 0x00000000, 0x00003f01 },
4718 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4719 { 0x2810, 0, 0xffff0000, 0x00000000 },
4720 { 0x2814, 0, 0xffff0000, 0x00000000 },
4721 { 0x2818, 0, 0xffff0000, 0x00000000 },
4722 { 0x281c, 0, 0xffff0000, 0x00000000 },
4723 { 0x2834, 0, 0xffffffff, 0x00000000 },
4724 { 0x2840, 0, 0x00000000, 0xffffffff },
4725 { 0x2844, 0, 0x00000000, 0xffffffff },
4726 { 0x2848, 0, 0xffffffff, 0x00000000 },
4727 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4728
4729 { 0x2c00, 0, 0x00000000, 0x00000011 },
4730 { 0x2c04, 0, 0x00000000, 0x00030007 },
4731
b6016b76
MC
4732 { 0x3c00, 0, 0x00000000, 0x00000001 },
4733 { 0x3c04, 0, 0x00000000, 0x00070000 },
4734 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4735 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4736 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4737 { 0x3c14, 0, 0x00000000, 0xffffffff },
4738 { 0x3c18, 0, 0x00000000, 0xffffffff },
4739 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4740 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
4741
4742 { 0x5004, 0, 0x00000000, 0x0000007f },
4743 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 4744
b6016b76
MC
4745 { 0x5c00, 0, 0x00000000, 0x00000001 },
4746 { 0x5c04, 0, 0x00000000, 0x0003000f },
4747 { 0x5c08, 0, 0x00000003, 0x00000000 },
4748 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4749 { 0x5c10, 0, 0x00000000, 0xffffffff },
4750 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4751 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4752 { 0x5c88, 0, 0x00000000, 0x00077373 },
4753 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4754
4755 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4756 { 0x680c, 0, 0xffffffff, 0x00000000 },
4757 { 0x6810, 0, 0xffffffff, 0x00000000 },
4758 { 0x6814, 0, 0xffffffff, 0x00000000 },
4759 { 0x6818, 0, 0xffffffff, 0x00000000 },
4760 { 0x681c, 0, 0xffffffff, 0x00000000 },
4761 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4762 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4763 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4764 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4765 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4766 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4767 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4768 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4769 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4770 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4771 { 0x684c, 0, 0xffffffff, 0x00000000 },
4772 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4773 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4774 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4775 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4776 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4777 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4778
4779 { 0xffff, 0, 0x00000000, 0x00000000 },
4780 };
4781
4782 ret = 0;
5bae30c9
MC
4783 is_5709 = 0;
4784 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4785 is_5709 = 1;
4786
b6016b76
MC
4787 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4788 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
4789 u16 flags = reg_tbl[i].flags;
4790
4791 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4792 continue;
b6016b76
MC
4793
4794 offset = (u32) reg_tbl[i].offset;
4795 rw_mask = reg_tbl[i].rw_mask;
4796 ro_mask = reg_tbl[i].ro_mask;
4797
14ab9b86 4798 save_val = readl(bp->regview + offset);
b6016b76 4799
14ab9b86 4800 writel(0, bp->regview + offset);
b6016b76 4801
14ab9b86 4802 val = readl(bp->regview + offset);
b6016b76
MC
4803 if ((val & rw_mask) != 0) {
4804 goto reg_test_err;
4805 }
4806
4807 if ((val & ro_mask) != (save_val & ro_mask)) {
4808 goto reg_test_err;
4809 }
4810
14ab9b86 4811 writel(0xffffffff, bp->regview + offset);
b6016b76 4812
14ab9b86 4813 val = readl(bp->regview + offset);
b6016b76
MC
4814 if ((val & rw_mask) != rw_mask) {
4815 goto reg_test_err;
4816 }
4817
4818 if ((val & ro_mask) != (save_val & ro_mask)) {
4819 goto reg_test_err;
4820 }
4821
14ab9b86 4822 writel(save_val, bp->regview + offset);
b6016b76
MC
4823 continue;
4824
4825reg_test_err:
14ab9b86 4826 writel(save_val, bp->regview + offset);
b6016b76
MC
4827 ret = -ENODEV;
4828 break;
4829 }
4830 return ret;
4831}
4832
4833static int
4834bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4835{
f71e1309 4836 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
4837 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4838 int i;
4839
4840 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4841 u32 offset;
4842
4843 for (offset = 0; offset < size; offset += 4) {
4844
4845 REG_WR_IND(bp, start + offset, test_pattern[i]);
4846
4847 if (REG_RD_IND(bp, start + offset) !=
4848 test_pattern[i]) {
4849 return -ENODEV;
4850 }
4851 }
4852 }
4853 return 0;
4854}
4855
4856static int
4857bnx2_test_memory(struct bnx2 *bp)
4858{
4859 int ret = 0;
4860 int i;
5bae30c9 4861 static struct mem_entry {
b6016b76
MC
4862 u32 offset;
4863 u32 len;
5bae30c9 4864 } mem_tbl_5706[] = {
b6016b76 4865 { 0x60000, 0x4000 },
5b0c76ad 4866 { 0xa0000, 0x3000 },
b6016b76
MC
4867 { 0xe0000, 0x4000 },
4868 { 0x120000, 0x4000 },
4869 { 0x1a0000, 0x4000 },
4870 { 0x160000, 0x4000 },
4871 { 0xffffffff, 0 },
5bae30c9
MC
4872 },
4873 mem_tbl_5709[] = {
4874 { 0x60000, 0x4000 },
4875 { 0xa0000, 0x3000 },
4876 { 0xe0000, 0x4000 },
4877 { 0x120000, 0x4000 },
4878 { 0x1a0000, 0x4000 },
4879 { 0xffffffff, 0 },
b6016b76 4880 };
5bae30c9
MC
4881 struct mem_entry *mem_tbl;
4882
4883 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4884 mem_tbl = mem_tbl_5709;
4885 else
4886 mem_tbl = mem_tbl_5706;
b6016b76
MC
4887
4888 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4889 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4890 mem_tbl[i].len)) != 0) {
4891 return ret;
4892 }
4893 }
6aa20a22 4894
b6016b76
MC
4895 return ret;
4896}
4897
bc5a0690
MC
4898#define BNX2_MAC_LOOPBACK 0
4899#define BNX2_PHY_LOOPBACK 1
4900
b6016b76 4901static int
bc5a0690 4902bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
4903{
4904 unsigned int pkt_size, num_pkts, i;
4905 struct sk_buff *skb, *rx_skb;
4906 unsigned char *packet;
bc5a0690 4907 u16 rx_start_idx, rx_idx;
b6016b76
MC
4908 dma_addr_t map;
4909 struct tx_bd *txbd;
4910 struct sw_bd *rx_buf;
4911 struct l2_fhdr *rx_hdr;
4912 int ret = -ENODEV;
35efa7c1 4913 struct bnx2_napi *bnapi = &bp->bnx2_napi;
b6016b76 4914
bc5a0690
MC
4915 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4916 bp->loopback = MAC_LOOPBACK;
4917 bnx2_set_mac_loopback(bp);
4918 }
4919 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
489310a4
MC
4920 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4921 return 0;
4922
80be4434 4923 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
4924 bnx2_set_phy_loopback(bp);
4925 }
4926 else
4927 return -EINVAL;
b6016b76 4928
84eaa187 4929 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 4930 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
4931 if (!skb)
4932 return -ENOMEM;
b6016b76 4933 packet = skb_put(skb, pkt_size);
6634292b 4934 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
4935 memset(packet + 6, 0x0, 8);
4936 for (i = 14; i < pkt_size; i++)
4937 packet[i] = (unsigned char) (i & 0xff);
4938
4939 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4940 PCI_DMA_TODEVICE);
4941
bf5295bb
MC
4942 REG_WR(bp, BNX2_HC_COMMAND,
4943 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4944
b6016b76
MC
4945 REG_RD(bp, BNX2_HC_COMMAND);
4946
4947 udelay(5);
35efa7c1 4948 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 4949
b6016b76
MC
4950 num_pkts = 0;
4951
bc5a0690 4952 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
4953
4954 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4955 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4956 txbd->tx_bd_mss_nbytes = pkt_size;
4957 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4958
4959 num_pkts++;
bc5a0690
MC
4960 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4961 bp->tx_prod_bseq += pkt_size;
b6016b76 4962
234754d5
MC
4963 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4964 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
4965
4966 udelay(100);
4967
bf5295bb
MC
4968 REG_WR(bp, BNX2_HC_COMMAND,
4969 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4970
b6016b76
MC
4971 REG_RD(bp, BNX2_HC_COMMAND);
4972
4973 udelay(5);
4974
4975 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 4976 dev_kfree_skb(skb);
b6016b76 4977
35efa7c1 4978 if (bnx2_get_hw_tx_cons(bnapi) != bp->tx_prod)
b6016b76 4979 goto loopback_test_done;
b6016b76 4980
35efa7c1 4981 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
4982 if (rx_idx != rx_start_idx + num_pkts) {
4983 goto loopback_test_done;
4984 }
4985
4986 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4987 rx_skb = rx_buf->skb;
4988
4989 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4990 skb_reserve(rx_skb, bp->rx_offset);
4991
4992 pci_dma_sync_single_for_cpu(bp->pdev,
4993 pci_unmap_addr(rx_buf, mapping),
4994 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4995
ade2bfe7 4996 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
4997 (L2_FHDR_ERRORS_BAD_CRC |
4998 L2_FHDR_ERRORS_PHY_DECODE |
4999 L2_FHDR_ERRORS_ALIGNMENT |
5000 L2_FHDR_ERRORS_TOO_SHORT |
5001 L2_FHDR_ERRORS_GIANT_FRAME)) {
5002
5003 goto loopback_test_done;
5004 }
5005
5006 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5007 goto loopback_test_done;
5008 }
5009
5010 for (i = 14; i < pkt_size; i++) {
5011 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5012 goto loopback_test_done;
5013 }
5014 }
5015
5016 ret = 0;
5017
5018loopback_test_done:
5019 bp->loopback = 0;
5020 return ret;
5021}
5022
bc5a0690
MC
5023#define BNX2_MAC_LOOPBACK_FAILED 1
5024#define BNX2_PHY_LOOPBACK_FAILED 2
5025#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5026 BNX2_PHY_LOOPBACK_FAILED)
5027
5028static int
5029bnx2_test_loopback(struct bnx2 *bp)
5030{
5031 int rc = 0;
5032
5033 if (!netif_running(bp->dev))
5034 return BNX2_LOOPBACK_FAILED;
5035
5036 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5037 spin_lock_bh(&bp->phy_lock);
5038 bnx2_init_phy(bp);
5039 spin_unlock_bh(&bp->phy_lock);
5040 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5041 rc |= BNX2_MAC_LOOPBACK_FAILED;
5042 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5043 rc |= BNX2_PHY_LOOPBACK_FAILED;
5044 return rc;
5045}
5046
b6016b76
MC
5047#define NVRAM_SIZE 0x200
5048#define CRC32_RESIDUAL 0xdebb20e3
5049
5050static int
5051bnx2_test_nvram(struct bnx2 *bp)
5052{
5053 u32 buf[NVRAM_SIZE / 4];
5054 u8 *data = (u8 *) buf;
5055 int rc = 0;
5056 u32 magic, csum;
5057
5058 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5059 goto test_nvram_done;
5060
5061 magic = be32_to_cpu(buf[0]);
5062 if (magic != 0x669955aa) {
5063 rc = -ENODEV;
5064 goto test_nvram_done;
5065 }
5066
5067 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5068 goto test_nvram_done;
5069
5070 csum = ether_crc_le(0x100, data);
5071 if (csum != CRC32_RESIDUAL) {
5072 rc = -ENODEV;
5073 goto test_nvram_done;
5074 }
5075
5076 csum = ether_crc_le(0x100, data + 0x100);
5077 if (csum != CRC32_RESIDUAL) {
5078 rc = -ENODEV;
5079 }
5080
5081test_nvram_done:
5082 return rc;
5083}
5084
5085static int
5086bnx2_test_link(struct bnx2 *bp)
5087{
5088 u32 bmsr;
5089
489310a4
MC
5090 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5091 if (bp->link_up)
5092 return 0;
5093 return -ENODEV;
5094 }
c770a65c 5095 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5096 bnx2_enable_bmsr1(bp);
5097 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5098 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5099 bnx2_disable_bmsr1(bp);
c770a65c 5100 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5101
b6016b76
MC
5102 if (bmsr & BMSR_LSTATUS) {
5103 return 0;
5104 }
5105 return -ENODEV;
5106}
5107
5108static int
5109bnx2_test_intr(struct bnx2 *bp)
5110{
5111 int i;
b6016b76
MC
5112 u16 status_idx;
5113
5114 if (!netif_running(bp->dev))
5115 return -ENODEV;
5116
5117 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5118
5119 /* This register is not touched during run-time. */
bf5295bb 5120 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5121 REG_RD(bp, BNX2_HC_COMMAND);
5122
5123 for (i = 0; i < 10; i++) {
5124 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5125 status_idx) {
5126
5127 break;
5128 }
5129
5130 msleep_interruptible(10);
5131 }
5132 if (i < 10)
5133 return 0;
5134
5135 return -ENODEV;
5136}
5137
5138static void
48b01e2d 5139bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5140{
48b01e2d
MC
5141 spin_lock(&bp->phy_lock);
5142 if (bp->serdes_an_pending)
5143 bp->serdes_an_pending--;
5144 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5145 u32 bmcr;
b6016b76 5146
48b01e2d 5147 bp->current_interval = bp->timer_interval;
cd339a0e 5148
ca58c3af 5149 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5150
48b01e2d
MC
5151 if (bmcr & BMCR_ANENABLE) {
5152 u32 phy1, phy2;
b6016b76 5153
48b01e2d
MC
5154 bnx2_write_phy(bp, 0x1c, 0x7c00);
5155 bnx2_read_phy(bp, 0x1c, &phy1);
cea94db9 5156
48b01e2d
MC
5157 bnx2_write_phy(bp, 0x17, 0x0f01);
5158 bnx2_read_phy(bp, 0x15, &phy2);
5159 bnx2_write_phy(bp, 0x17, 0x0f01);
5160 bnx2_read_phy(bp, 0x15, &phy2);
b6016b76 5161
48b01e2d
MC
5162 if ((phy1 & 0x10) && /* SIGNAL DETECT */
5163 !(phy2 & 0x20)) { /* no CONFIG */
5164
5165 bmcr &= ~BMCR_ANENABLE;
5166 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 5167 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
48b01e2d
MC
5168 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5169 }
b6016b76 5170 }
48b01e2d
MC
5171 }
5172 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5173 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5174 u32 phy2;
b6016b76 5175
48b01e2d
MC
5176 bnx2_write_phy(bp, 0x17, 0x0f01);
5177 bnx2_read_phy(bp, 0x15, &phy2);
5178 if (phy2 & 0x20) {
5179 u32 bmcr;
cd339a0e 5180
ca58c3af 5181 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 5182 bmcr |= BMCR_ANENABLE;
ca58c3af 5183 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 5184
48b01e2d
MC
5185 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5186 }
5187 } else
5188 bp->current_interval = bp->timer_interval;
b6016b76 5189
48b01e2d
MC
5190 spin_unlock(&bp->phy_lock);
5191}
b6016b76 5192
f8dd064e
MC
5193static void
5194bnx2_5708_serdes_timer(struct bnx2 *bp)
5195{
0d8a6571
MC
5196 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5197 return;
5198
f8dd064e
MC
5199 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5200 bp->serdes_an_pending = 0;
5201 return;
5202 }
b6016b76 5203
f8dd064e
MC
5204 spin_lock(&bp->phy_lock);
5205 if (bp->serdes_an_pending)
5206 bp->serdes_an_pending--;
5207 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5208 u32 bmcr;
b6016b76 5209
ca58c3af 5210 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 5211 if (bmcr & BMCR_ANENABLE) {
605a9e20 5212 bnx2_enable_forced_2g5(bp);
f8dd064e
MC
5213 bp->current_interval = SERDES_FORCED_TIMEOUT;
5214 } else {
605a9e20 5215 bnx2_disable_forced_2g5(bp);
f8dd064e
MC
5216 bp->serdes_an_pending = 2;
5217 bp->current_interval = bp->timer_interval;
b6016b76 5218 }
b6016b76 5219
f8dd064e
MC
5220 } else
5221 bp->current_interval = bp->timer_interval;
b6016b76 5222
f8dd064e
MC
5223 spin_unlock(&bp->phy_lock);
5224}
5225
48b01e2d
MC
5226static void
5227bnx2_timer(unsigned long data)
5228{
5229 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 5230
48b01e2d
MC
5231 if (!netif_running(bp->dev))
5232 return;
b6016b76 5233
48b01e2d
MC
5234 if (atomic_read(&bp->intr_sem) != 0)
5235 goto bnx2_restart_timer;
b6016b76 5236
df149d70 5237 bnx2_send_heart_beat(bp);
b6016b76 5238
48b01e2d 5239 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 5240
02537b06
MC
5241 /* workaround occasional corrupted counters */
5242 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5243 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5244 BNX2_HC_COMMAND_STATS_NOW);
5245
f8dd064e
MC
5246 if (bp->phy_flags & PHY_SERDES_FLAG) {
5247 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5248 bnx2_5706_serdes_timer(bp);
27a005b8 5249 else
f8dd064e 5250 bnx2_5708_serdes_timer(bp);
b6016b76
MC
5251 }
5252
5253bnx2_restart_timer:
cd339a0e 5254 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5255}
5256
8e6a72c4
MC
5257static int
5258bnx2_request_irq(struct bnx2 *bp)
5259{
5260 struct net_device *dev = bp->dev;
6d866ffc
MC
5261 unsigned long flags;
5262 struct bnx2_irq *irq = &bp->irq_tbl[0];
5263 int rc;
8e6a72c4 5264
6d866ffc
MC
5265 if (bp->flags & USING_MSI_FLAG)
5266 flags = 0;
5267 else
5268 flags = IRQF_SHARED;
5269 rc = request_irq(irq->vector, irq->handler, flags, dev->name, dev);
8e6a72c4
MC
5270 return rc;
5271}
5272
5273static void
5274bnx2_free_irq(struct bnx2 *bp)
5275{
5276 struct net_device *dev = bp->dev;
5277
6d866ffc 5278 free_irq(bp->irq_tbl[0].vector, dev);
8e6a72c4 5279 if (bp->flags & USING_MSI_FLAG) {
8e6a72c4
MC
5280 pci_disable_msi(bp->pdev);
5281 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
6d866ffc
MC
5282 }
5283}
5284
5285static void
5286bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5287{
5288 bp->irq_tbl[0].handler = bnx2_interrupt;
5289 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5290
5291 if ((bp->flags & MSI_CAP_FLAG) && !dis_msi) {
5292 if (pci_enable_msi(bp->pdev) == 0) {
5293 bp->flags |= USING_MSI_FLAG;
5294 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5295 bp->flags |= ONE_SHOT_MSI_FLAG;
5296 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5297 } else
5298 bp->irq_tbl[0].handler = bnx2_msi;
5299 }
5300 }
5301
5302 bp->irq_tbl[0].vector = bp->pdev->irq;
8e6a72c4
MC
5303}
5304
b6016b76
MC
5305/* Called with rtnl_lock */
5306static int
5307bnx2_open(struct net_device *dev)
5308{
972ec0d4 5309 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5310 int rc;
5311
1b2f922f
MC
5312 netif_carrier_off(dev);
5313
829ca9a3 5314 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5315 bnx2_disable_int(bp);
5316
5317 rc = bnx2_alloc_mem(bp);
5318 if (rc)
5319 return rc;
5320
6d866ffc 5321 bnx2_setup_int_mode(bp, disable_msi);
35efa7c1 5322 bnx2_napi_enable(bp);
8e6a72c4
MC
5323 rc = bnx2_request_irq(bp);
5324
b6016b76 5325 if (rc) {
35efa7c1 5326 bnx2_napi_disable(bp);
b6016b76
MC
5327 bnx2_free_mem(bp);
5328 return rc;
5329 }
5330
5331 rc = bnx2_init_nic(bp);
5332
5333 if (rc) {
35efa7c1 5334 bnx2_napi_disable(bp);
8e6a72c4 5335 bnx2_free_irq(bp);
b6016b76
MC
5336 bnx2_free_skbs(bp);
5337 bnx2_free_mem(bp);
5338 return rc;
5339 }
6aa20a22 5340
cd339a0e 5341 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5342
5343 atomic_set(&bp->intr_sem, 0);
5344
5345 bnx2_enable_int(bp);
5346
5347 if (bp->flags & USING_MSI_FLAG) {
5348 /* Test MSI to make sure it is working
5349 * If MSI test fails, go back to INTx mode
5350 */
5351 if (bnx2_test_intr(bp) != 0) {
5352 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5353 " using MSI, switching to INTx mode. Please"
5354 " report this failure to the PCI maintainer"
5355 " and include system chipset information.\n",
5356 bp->dev->name);
5357
5358 bnx2_disable_int(bp);
8e6a72c4 5359 bnx2_free_irq(bp);
b6016b76 5360
6d866ffc
MC
5361 bnx2_setup_int_mode(bp, 1);
5362
b6016b76
MC
5363 rc = bnx2_init_nic(bp);
5364
8e6a72c4
MC
5365 if (!rc)
5366 rc = bnx2_request_irq(bp);
5367
b6016b76 5368 if (rc) {
35efa7c1 5369 bnx2_napi_disable(bp);
b6016b76
MC
5370 bnx2_free_skbs(bp);
5371 bnx2_free_mem(bp);
5372 del_timer_sync(&bp->timer);
5373 return rc;
5374 }
5375 bnx2_enable_int(bp);
5376 }
5377 }
5378 if (bp->flags & USING_MSI_FLAG) {
5379 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5380 }
5381
5382 netif_start_queue(dev);
5383
5384 return 0;
5385}
5386
5387static void
c4028958 5388bnx2_reset_task(struct work_struct *work)
b6016b76 5389{
c4028958 5390 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 5391
afdc08b9
MC
5392 if (!netif_running(bp->dev))
5393 return;
5394
5395 bp->in_reset_task = 1;
b6016b76
MC
5396 bnx2_netif_stop(bp);
5397
5398 bnx2_init_nic(bp);
5399
5400 atomic_set(&bp->intr_sem, 1);
5401 bnx2_netif_start(bp);
afdc08b9 5402 bp->in_reset_task = 0;
b6016b76
MC
5403}
5404
5405static void
5406bnx2_tx_timeout(struct net_device *dev)
5407{
972ec0d4 5408 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5409
5410 /* This allows the netif to be shutdown gracefully before resetting */
5411 schedule_work(&bp->reset_task);
5412}
5413
5414#ifdef BCM_VLAN
5415/* Called with rtnl_lock */
5416static void
5417bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5418{
972ec0d4 5419 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5420
5421 bnx2_netif_stop(bp);
5422
5423 bp->vlgrp = vlgrp;
5424 bnx2_set_rx_mode(dev);
5425
5426 bnx2_netif_start(bp);
5427}
b6016b76
MC
5428#endif
5429
932ff279 5430/* Called with netif_tx_lock.
2f8af120
MC
5431 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5432 * netif_wake_queue().
b6016b76
MC
5433 */
5434static int
5435bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5436{
972ec0d4 5437 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5438 dma_addr_t mapping;
5439 struct tx_bd *txbd;
5440 struct sw_bd *tx_buf;
5441 u32 len, vlan_tag_flags, last_frag, mss;
5442 u16 prod, ring_prod;
5443 int i;
a550c99b 5444 struct bnx2_napi *bnapi = &bp->bnx2_napi;
b6016b76 5445
a550c99b
MC
5446 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5447 (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
5448 netif_stop_queue(dev);
5449 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5450 dev->name);
5451
5452 return NETDEV_TX_BUSY;
5453 }
5454 len = skb_headlen(skb);
5455 prod = bp->tx_prod;
5456 ring_prod = TX_RING_IDX(prod);
5457
5458 vlan_tag_flags = 0;
84fa7933 5459 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
5460 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5461 }
5462
5463 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5464 vlan_tag_flags |=
5465 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5466 }
fde82055 5467 if ((mss = skb_shinfo(skb)->gso_size)) {
b6016b76 5468 u32 tcp_opt_len, ip_tcp_len;
eddc9ec5 5469 struct iphdr *iph;
b6016b76 5470
b6016b76
MC
5471 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5472
4666f87a
MC
5473 tcp_opt_len = tcp_optlen(skb);
5474
5475 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5476 u32 tcp_off = skb_transport_offset(skb) -
5477 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 5478
4666f87a
MC
5479 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5480 TX_BD_FLAGS_SW_FLAGS;
5481 if (likely(tcp_off == 0))
5482 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5483 else {
5484 tcp_off >>= 3;
5485 vlan_tag_flags |= ((tcp_off & 0x3) <<
5486 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5487 ((tcp_off & 0x10) <<
5488 TX_BD_FLAGS_TCP6_OFF4_SHL);
5489 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5490 }
5491 } else {
5492 if (skb_header_cloned(skb) &&
5493 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5494 dev_kfree_skb(skb);
5495 return NETDEV_TX_OK;
5496 }
b6016b76 5497
4666f87a
MC
5498 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5499
5500 iph = ip_hdr(skb);
5501 iph->check = 0;
5502 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5503 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5504 iph->daddr, 0,
5505 IPPROTO_TCP,
5506 0);
5507 if (tcp_opt_len || (iph->ihl > 5)) {
5508 vlan_tag_flags |= ((iph->ihl - 5) +
5509 (tcp_opt_len >> 2)) << 8;
5510 }
b6016b76 5511 }
4666f87a 5512 } else
b6016b76 5513 mss = 0;
b6016b76
MC
5514
5515 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 5516
b6016b76
MC
5517 tx_buf = &bp->tx_buf_ring[ring_prod];
5518 tx_buf->skb = skb;
5519 pci_unmap_addr_set(tx_buf, mapping, mapping);
5520
5521 txbd = &bp->tx_desc_ring[ring_prod];
5522
5523 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5524 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5525 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5526 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5527
5528 last_frag = skb_shinfo(skb)->nr_frags;
5529
5530 for (i = 0; i < last_frag; i++) {
5531 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5532
5533 prod = NEXT_TX_BD(prod);
5534 ring_prod = TX_RING_IDX(prod);
5535 txbd = &bp->tx_desc_ring[ring_prod];
5536
5537 len = frag->size;
5538 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5539 len, PCI_DMA_TODEVICE);
5540 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5541 mapping, mapping);
5542
5543 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5544 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5545 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5546 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5547
5548 }
5549 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5550
5551 prod = NEXT_TX_BD(prod);
5552 bp->tx_prod_bseq += skb->len;
5553
234754d5
MC
5554 REG_WR16(bp, bp->tx_bidx_addr, prod);
5555 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
5556
5557 mmiowb();
5558
5559 bp->tx_prod = prod;
5560 dev->trans_start = jiffies;
5561
a550c99b 5562 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
e89bbf10 5563 netif_stop_queue(dev);
a550c99b 5564 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
e89bbf10 5565 netif_wake_queue(dev);
b6016b76
MC
5566 }
5567
5568 return NETDEV_TX_OK;
5569}
5570
5571/* Called with rtnl_lock */
5572static int
5573bnx2_close(struct net_device *dev)
5574{
972ec0d4 5575 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5576 u32 reset_code;
5577
afdc08b9
MC
5578 /* Calling flush_scheduled_work() may deadlock because
5579 * linkwatch_event() may be on the workqueue and it will try to get
5580 * the rtnl_lock which we are holding.
5581 */
5582 while (bp->in_reset_task)
5583 msleep(1);
5584
bea3348e 5585 bnx2_disable_int_sync(bp);
35efa7c1 5586 bnx2_napi_disable(bp);
b6016b76 5587 del_timer_sync(&bp->timer);
dda1e390 5588 if (bp->flags & NO_WOL_FLAG)
6c4f095e 5589 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 5590 else if (bp->wol)
b6016b76
MC
5591 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5592 else
5593 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5594 bnx2_reset_chip(bp, reset_code);
8e6a72c4 5595 bnx2_free_irq(bp);
b6016b76
MC
5596 bnx2_free_skbs(bp);
5597 bnx2_free_mem(bp);
5598 bp->link_up = 0;
5599 netif_carrier_off(bp->dev);
829ca9a3 5600 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
5601 return 0;
5602}
5603
5604#define GET_NET_STATS64(ctr) \
5605 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5606 (unsigned long) (ctr##_lo)
5607
5608#define GET_NET_STATS32(ctr) \
5609 (ctr##_lo)
5610
5611#if (BITS_PER_LONG == 64)
5612#define GET_NET_STATS GET_NET_STATS64
5613#else
5614#define GET_NET_STATS GET_NET_STATS32
5615#endif
5616
5617static struct net_device_stats *
5618bnx2_get_stats(struct net_device *dev)
5619{
972ec0d4 5620 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5621 struct statistics_block *stats_blk = bp->stats_blk;
5622 struct net_device_stats *net_stats = &bp->net_stats;
5623
5624 if (bp->stats_blk == NULL) {
5625 return net_stats;
5626 }
5627 net_stats->rx_packets =
5628 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5629 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5630 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5631
5632 net_stats->tx_packets =
5633 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5634 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5635 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5636
5637 net_stats->rx_bytes =
5638 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5639
5640 net_stats->tx_bytes =
5641 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5642
6aa20a22 5643 net_stats->multicast =
b6016b76
MC
5644 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5645
6aa20a22 5646 net_stats->collisions =
b6016b76
MC
5647 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5648
6aa20a22 5649 net_stats->rx_length_errors =
b6016b76
MC
5650 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5651 stats_blk->stat_EtherStatsOverrsizePkts);
5652
6aa20a22 5653 net_stats->rx_over_errors =
b6016b76
MC
5654 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5655
6aa20a22 5656 net_stats->rx_frame_errors =
b6016b76
MC
5657 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5658
6aa20a22 5659 net_stats->rx_crc_errors =
b6016b76
MC
5660 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5661
5662 net_stats->rx_errors = net_stats->rx_length_errors +
5663 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5664 net_stats->rx_crc_errors;
5665
5666 net_stats->tx_aborted_errors =
5667 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5668 stats_blk->stat_Dot3StatsLateCollisions);
5669
5b0c76ad
MC
5670 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5671 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
5672 net_stats->tx_carrier_errors = 0;
5673 else {
5674 net_stats->tx_carrier_errors =
5675 (unsigned long)
5676 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5677 }
5678
5679 net_stats->tx_errors =
6aa20a22 5680 (unsigned long)
b6016b76
MC
5681 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5682 +
5683 net_stats->tx_aborted_errors +
5684 net_stats->tx_carrier_errors;
5685
cea94db9
MC
5686 net_stats->rx_missed_errors =
5687 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5688 stats_blk->stat_FwRxDrop);
5689
b6016b76
MC
5690 return net_stats;
5691}
5692
5693/* All ethtool functions called with rtnl_lock */
5694
5695static int
5696bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5697{
972ec0d4 5698 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 5699 int support_serdes = 0, support_copper = 0;
b6016b76
MC
5700
5701 cmd->supported = SUPPORTED_Autoneg;
7b6b8347
MC
5702 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5703 support_serdes = 1;
5704 support_copper = 1;
5705 } else if (bp->phy_port == PORT_FIBRE)
5706 support_serdes = 1;
5707 else
5708 support_copper = 1;
5709
5710 if (support_serdes) {
b6016b76
MC
5711 cmd->supported |= SUPPORTED_1000baseT_Full |
5712 SUPPORTED_FIBRE;
605a9e20
MC
5713 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5714 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 5715
b6016b76 5716 }
7b6b8347 5717 if (support_copper) {
b6016b76
MC
5718 cmd->supported |= SUPPORTED_10baseT_Half |
5719 SUPPORTED_10baseT_Full |
5720 SUPPORTED_100baseT_Half |
5721 SUPPORTED_100baseT_Full |
5722 SUPPORTED_1000baseT_Full |
5723 SUPPORTED_TP;
5724
b6016b76
MC
5725 }
5726
7b6b8347
MC
5727 spin_lock_bh(&bp->phy_lock);
5728 cmd->port = bp->phy_port;
b6016b76
MC
5729 cmd->advertising = bp->advertising;
5730
5731 if (bp->autoneg & AUTONEG_SPEED) {
5732 cmd->autoneg = AUTONEG_ENABLE;
5733 }
5734 else {
5735 cmd->autoneg = AUTONEG_DISABLE;
5736 }
5737
5738 if (netif_carrier_ok(dev)) {
5739 cmd->speed = bp->line_speed;
5740 cmd->duplex = bp->duplex;
5741 }
5742 else {
5743 cmd->speed = -1;
5744 cmd->duplex = -1;
5745 }
7b6b8347 5746 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5747
5748 cmd->transceiver = XCVR_INTERNAL;
5749 cmd->phy_address = bp->phy_addr;
5750
5751 return 0;
5752}
6aa20a22 5753
b6016b76
MC
5754static int
5755bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5756{
972ec0d4 5757 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5758 u8 autoneg = bp->autoneg;
5759 u8 req_duplex = bp->req_duplex;
5760 u16 req_line_speed = bp->req_line_speed;
5761 u32 advertising = bp->advertising;
7b6b8347
MC
5762 int err = -EINVAL;
5763
5764 spin_lock_bh(&bp->phy_lock);
5765
5766 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5767 goto err_out_unlock;
5768
5769 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5770 goto err_out_unlock;
b6016b76
MC
5771
5772 if (cmd->autoneg == AUTONEG_ENABLE) {
5773 autoneg |= AUTONEG_SPEED;
5774
6aa20a22 5775 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5776
5777 /* allow advertising 1 speed */
5778 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5779 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5780 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5781 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5782
7b6b8347
MC
5783 if (cmd->port == PORT_FIBRE)
5784 goto err_out_unlock;
b6016b76
MC
5785
5786 advertising = cmd->advertising;
5787
27a005b8 5788 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
7b6b8347
MC
5789 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5790 (cmd->port == PORT_TP))
5791 goto err_out_unlock;
5792 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 5793 advertising = cmd->advertising;
7b6b8347
MC
5794 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5795 goto err_out_unlock;
b6016b76 5796 else {
7b6b8347 5797 if (cmd->port == PORT_FIBRE)
b6016b76 5798 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 5799 else
b6016b76 5800 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5801 }
5802 advertising |= ADVERTISED_Autoneg;
5803 }
5804 else {
7b6b8347 5805 if (cmd->port == PORT_FIBRE) {
80be4434
MC
5806 if ((cmd->speed != SPEED_1000 &&
5807 cmd->speed != SPEED_2500) ||
5808 (cmd->duplex != DUPLEX_FULL))
7b6b8347 5809 goto err_out_unlock;
80be4434
MC
5810
5811 if (cmd->speed == SPEED_2500 &&
5812 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
7b6b8347 5813 goto err_out_unlock;
b6016b76 5814 }
7b6b8347
MC
5815 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5816 goto err_out_unlock;
5817
b6016b76
MC
5818 autoneg &= ~AUTONEG_SPEED;
5819 req_line_speed = cmd->speed;
5820 req_duplex = cmd->duplex;
5821 advertising = 0;
5822 }
5823
5824 bp->autoneg = autoneg;
5825 bp->advertising = advertising;
5826 bp->req_line_speed = req_line_speed;
5827 bp->req_duplex = req_duplex;
5828
7b6b8347 5829 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 5830
7b6b8347 5831err_out_unlock:
c770a65c 5832 spin_unlock_bh(&bp->phy_lock);
b6016b76 5833
7b6b8347 5834 return err;
b6016b76
MC
5835}
5836
5837static void
5838bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5839{
972ec0d4 5840 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5841
5842 strcpy(info->driver, DRV_MODULE_NAME);
5843 strcpy(info->version, DRV_MODULE_VERSION);
5844 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 5845 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
5846}
5847
244ac4f4
MC
5848#define BNX2_REGDUMP_LEN (32 * 1024)
5849
5850static int
5851bnx2_get_regs_len(struct net_device *dev)
5852{
5853 return BNX2_REGDUMP_LEN;
5854}
5855
5856static void
5857bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5858{
5859 u32 *p = _p, i, offset;
5860 u8 *orig_p = _p;
5861 struct bnx2 *bp = netdev_priv(dev);
5862 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5863 0x0800, 0x0880, 0x0c00, 0x0c10,
5864 0x0c30, 0x0d08, 0x1000, 0x101c,
5865 0x1040, 0x1048, 0x1080, 0x10a4,
5866 0x1400, 0x1490, 0x1498, 0x14f0,
5867 0x1500, 0x155c, 0x1580, 0x15dc,
5868 0x1600, 0x1658, 0x1680, 0x16d8,
5869 0x1800, 0x1820, 0x1840, 0x1854,
5870 0x1880, 0x1894, 0x1900, 0x1984,
5871 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5872 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5873 0x2000, 0x2030, 0x23c0, 0x2400,
5874 0x2800, 0x2820, 0x2830, 0x2850,
5875 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5876 0x3c00, 0x3c94, 0x4000, 0x4010,
5877 0x4080, 0x4090, 0x43c0, 0x4458,
5878 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5879 0x4fc0, 0x5010, 0x53c0, 0x5444,
5880 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5881 0x5fc0, 0x6000, 0x6400, 0x6428,
5882 0x6800, 0x6848, 0x684c, 0x6860,
5883 0x6888, 0x6910, 0x8000 };
5884
5885 regs->version = 0;
5886
5887 memset(p, 0, BNX2_REGDUMP_LEN);
5888
5889 if (!netif_running(bp->dev))
5890 return;
5891
5892 i = 0;
5893 offset = reg_boundaries[0];
5894 p += offset;
5895 while (offset < BNX2_REGDUMP_LEN) {
5896 *p++ = REG_RD(bp, offset);
5897 offset += 4;
5898 if (offset == reg_boundaries[i + 1]) {
5899 offset = reg_boundaries[i + 2];
5900 p = (u32 *) (orig_p + offset);
5901 i += 2;
5902 }
5903 }
5904}
5905
b6016b76
MC
5906static void
5907bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5908{
972ec0d4 5909 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5910
5911 if (bp->flags & NO_WOL_FLAG) {
5912 wol->supported = 0;
5913 wol->wolopts = 0;
5914 }
5915 else {
5916 wol->supported = WAKE_MAGIC;
5917 if (bp->wol)
5918 wol->wolopts = WAKE_MAGIC;
5919 else
5920 wol->wolopts = 0;
5921 }
5922 memset(&wol->sopass, 0, sizeof(wol->sopass));
5923}
5924
5925static int
5926bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5927{
972ec0d4 5928 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5929
5930 if (wol->wolopts & ~WAKE_MAGIC)
5931 return -EINVAL;
5932
5933 if (wol->wolopts & WAKE_MAGIC) {
5934 if (bp->flags & NO_WOL_FLAG)
5935 return -EINVAL;
5936
5937 bp->wol = 1;
5938 }
5939 else {
5940 bp->wol = 0;
5941 }
5942 return 0;
5943}
5944
5945static int
5946bnx2_nway_reset(struct net_device *dev)
5947{
972ec0d4 5948 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5949 u32 bmcr;
5950
5951 if (!(bp->autoneg & AUTONEG_SPEED)) {
5952 return -EINVAL;
5953 }
5954
c770a65c 5955 spin_lock_bh(&bp->phy_lock);
b6016b76 5956
7b6b8347
MC
5957 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5958 int rc;
5959
5960 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5961 spin_unlock_bh(&bp->phy_lock);
5962 return rc;
5963 }
5964
b6016b76
MC
5965 /* Force a link down visible on the other side */
5966 if (bp->phy_flags & PHY_SERDES_FLAG) {
ca58c3af 5967 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 5968 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5969
5970 msleep(20);
5971
c770a65c 5972 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
5973
5974 bp->current_interval = SERDES_AN_TIMEOUT;
5975 bp->serdes_an_pending = 1;
5976 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5977 }
5978
ca58c3af 5979 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5980 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 5981 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 5982
c770a65c 5983 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5984
5985 return 0;
5986}
5987
5988static int
5989bnx2_get_eeprom_len(struct net_device *dev)
5990{
972ec0d4 5991 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5992
1122db71 5993 if (bp->flash_info == NULL)
b6016b76
MC
5994 return 0;
5995
1122db71 5996 return (int) bp->flash_size;
b6016b76
MC
5997}
5998
5999static int
6000bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6001 u8 *eebuf)
6002{
972ec0d4 6003 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6004 int rc;
6005
1064e944 6006 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
6007
6008 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6009
6010 return rc;
6011}
6012
6013static int
6014bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6015 u8 *eebuf)
6016{
972ec0d4 6017 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6018 int rc;
6019
1064e944 6020 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
6021
6022 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6023
6024 return rc;
6025}
6026
6027static int
6028bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6029{
972ec0d4 6030 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6031
6032 memset(coal, 0, sizeof(struct ethtool_coalesce));
6033
6034 coal->rx_coalesce_usecs = bp->rx_ticks;
6035 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6036 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6037 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6038
6039 coal->tx_coalesce_usecs = bp->tx_ticks;
6040 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6041 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6042 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6043
6044 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6045
6046 return 0;
6047}
6048
6049static int
6050bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6051{
972ec0d4 6052 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6053
6054 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6055 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6056
6aa20a22 6057 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
6058 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6059
6060 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6061 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6062
6063 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6064 if (bp->rx_quick_cons_trip_int > 0xff)
6065 bp->rx_quick_cons_trip_int = 0xff;
6066
6067 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6068 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6069
6070 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6071 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6072
6073 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6074 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6075
6076 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6077 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6078 0xff;
6079
6080 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
6081 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6082 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6083 bp->stats_ticks = USEC_PER_SEC;
6084 }
7ea6920e
MC
6085 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6086 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6087 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6088
6089 if (netif_running(bp->dev)) {
6090 bnx2_netif_stop(bp);
6091 bnx2_init_nic(bp);
6092 bnx2_netif_start(bp);
6093 }
6094
6095 return 0;
6096}
6097
6098static void
6099bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6100{
972ec0d4 6101 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6102
13daffa2 6103 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 6104 ering->rx_mini_max_pending = 0;
47bf4246 6105 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
6106
6107 ering->rx_pending = bp->rx_ring_size;
6108 ering->rx_mini_pending = 0;
47bf4246 6109 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
6110
6111 ering->tx_max_pending = MAX_TX_DESC_CNT;
6112 ering->tx_pending = bp->tx_ring_size;
6113}
6114
6115static int
5d5d0015 6116bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 6117{
13daffa2
MC
6118 if (netif_running(bp->dev)) {
6119 bnx2_netif_stop(bp);
6120 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6121 bnx2_free_skbs(bp);
6122 bnx2_free_mem(bp);
6123 }
6124
5d5d0015
MC
6125 bnx2_set_rx_ring_size(bp, rx);
6126 bp->tx_ring_size = tx;
b6016b76
MC
6127
6128 if (netif_running(bp->dev)) {
13daffa2
MC
6129 int rc;
6130
6131 rc = bnx2_alloc_mem(bp);
6132 if (rc)
6133 return rc;
b6016b76
MC
6134 bnx2_init_nic(bp);
6135 bnx2_netif_start(bp);
6136 }
b6016b76
MC
6137 return 0;
6138}
6139
5d5d0015
MC
6140static int
6141bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6142{
6143 struct bnx2 *bp = netdev_priv(dev);
6144 int rc;
6145
6146 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6147 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6148 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6149
6150 return -EINVAL;
6151 }
6152 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6153 return rc;
6154}
6155
b6016b76
MC
6156static void
6157bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6158{
972ec0d4 6159 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6160
6161 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6162 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6163 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6164}
6165
6166static int
6167bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6168{
972ec0d4 6169 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6170
6171 bp->req_flow_ctrl = 0;
6172 if (epause->rx_pause)
6173 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6174 if (epause->tx_pause)
6175 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6176
6177 if (epause->autoneg) {
6178 bp->autoneg |= AUTONEG_FLOW_CTRL;
6179 }
6180 else {
6181 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6182 }
6183
c770a65c 6184 spin_lock_bh(&bp->phy_lock);
b6016b76 6185
0d8a6571 6186 bnx2_setup_phy(bp, bp->phy_port);
b6016b76 6187
c770a65c 6188 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6189
6190 return 0;
6191}
6192
6193static u32
6194bnx2_get_rx_csum(struct net_device *dev)
6195{
972ec0d4 6196 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6197
6198 return bp->rx_csum;
6199}
6200
6201static int
6202bnx2_set_rx_csum(struct net_device *dev, u32 data)
6203{
972ec0d4 6204 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6205
6206 bp->rx_csum = data;
6207 return 0;
6208}
6209
b11d6213
MC
6210static int
6211bnx2_set_tso(struct net_device *dev, u32 data)
6212{
4666f87a
MC
6213 struct bnx2 *bp = netdev_priv(dev);
6214
6215 if (data) {
b11d6213 6216 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6217 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6218 dev->features |= NETIF_F_TSO6;
6219 } else
6220 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6221 NETIF_F_TSO_ECN);
b11d6213
MC
6222 return 0;
6223}
6224
cea94db9 6225#define BNX2_NUM_STATS 46
b6016b76 6226
14ab9b86 6227static struct {
b6016b76
MC
6228 char string[ETH_GSTRING_LEN];
6229} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6230 { "rx_bytes" },
6231 { "rx_error_bytes" },
6232 { "tx_bytes" },
6233 { "tx_error_bytes" },
6234 { "rx_ucast_packets" },
6235 { "rx_mcast_packets" },
6236 { "rx_bcast_packets" },
6237 { "tx_ucast_packets" },
6238 { "tx_mcast_packets" },
6239 { "tx_bcast_packets" },
6240 { "tx_mac_errors" },
6241 { "tx_carrier_errors" },
6242 { "rx_crc_errors" },
6243 { "rx_align_errors" },
6244 { "tx_single_collisions" },
6245 { "tx_multi_collisions" },
6246 { "tx_deferred" },
6247 { "tx_excess_collisions" },
6248 { "tx_late_collisions" },
6249 { "tx_total_collisions" },
6250 { "rx_fragments" },
6251 { "rx_jabbers" },
6252 { "rx_undersize_packets" },
6253 { "rx_oversize_packets" },
6254 { "rx_64_byte_packets" },
6255 { "rx_65_to_127_byte_packets" },
6256 { "rx_128_to_255_byte_packets" },
6257 { "rx_256_to_511_byte_packets" },
6258 { "rx_512_to_1023_byte_packets" },
6259 { "rx_1024_to_1522_byte_packets" },
6260 { "rx_1523_to_9022_byte_packets" },
6261 { "tx_64_byte_packets" },
6262 { "tx_65_to_127_byte_packets" },
6263 { "tx_128_to_255_byte_packets" },
6264 { "tx_256_to_511_byte_packets" },
6265 { "tx_512_to_1023_byte_packets" },
6266 { "tx_1024_to_1522_byte_packets" },
6267 { "tx_1523_to_9022_byte_packets" },
6268 { "rx_xon_frames" },
6269 { "rx_xoff_frames" },
6270 { "tx_xon_frames" },
6271 { "tx_xoff_frames" },
6272 { "rx_mac_ctrl_frames" },
6273 { "rx_filtered_packets" },
6274 { "rx_discards" },
cea94db9 6275 { "rx_fw_discards" },
b6016b76
MC
6276};
6277
6278#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6279
f71e1309 6280static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6281 STATS_OFFSET32(stat_IfHCInOctets_hi),
6282 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6283 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6284 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6285 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6286 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6287 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6288 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6289 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6290 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6291 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
6292 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6293 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6294 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6295 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6296 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6297 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6298 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6299 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6300 STATS_OFFSET32(stat_EtherStatsCollisions),
6301 STATS_OFFSET32(stat_EtherStatsFragments),
6302 STATS_OFFSET32(stat_EtherStatsJabbers),
6303 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6304 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6305 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6306 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6307 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6308 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6309 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6310 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6311 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6312 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6313 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6314 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6315 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6316 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6317 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6318 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6319 STATS_OFFSET32(stat_XonPauseFramesReceived),
6320 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6321 STATS_OFFSET32(stat_OutXonSent),
6322 STATS_OFFSET32(stat_OutXoffSent),
6323 STATS_OFFSET32(stat_MacControlFramesReceived),
6324 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6325 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 6326 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
6327};
6328
6329/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6330 * skipped because of errata.
6aa20a22 6331 */
14ab9b86 6332static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6333 8,0,8,8,8,8,8,8,8,8,
6334 4,0,4,4,4,4,4,4,4,4,
6335 4,4,4,4,4,4,4,4,4,4,
6336 4,4,4,4,4,4,4,4,4,4,
cea94db9 6337 4,4,4,4,4,4,
b6016b76
MC
6338};
6339
5b0c76ad
MC
6340static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6341 8,0,8,8,8,8,8,8,8,8,
6342 4,4,4,4,4,4,4,4,4,4,
6343 4,4,4,4,4,4,4,4,4,4,
6344 4,4,4,4,4,4,4,4,4,4,
cea94db9 6345 4,4,4,4,4,4,
5b0c76ad
MC
6346};
6347
b6016b76
MC
6348#define BNX2_NUM_TESTS 6
6349
14ab9b86 6350static struct {
b6016b76
MC
6351 char string[ETH_GSTRING_LEN];
6352} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6353 { "register_test (offline)" },
6354 { "memory_test (offline)" },
6355 { "loopback_test (offline)" },
6356 { "nvram_test (online)" },
6357 { "interrupt_test (online)" },
6358 { "link_test (online)" },
6359};
6360
6361static int
b9f2c044 6362bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 6363{
b9f2c044
JG
6364 switch (sset) {
6365 case ETH_SS_TEST:
6366 return BNX2_NUM_TESTS;
6367 case ETH_SS_STATS:
6368 return BNX2_NUM_STATS;
6369 default:
6370 return -EOPNOTSUPP;
6371 }
b6016b76
MC
6372}
6373
6374static void
6375bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6376{
972ec0d4 6377 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6378
6379 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6380 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
6381 int i;
6382
b6016b76
MC
6383 bnx2_netif_stop(bp);
6384 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6385 bnx2_free_skbs(bp);
6386
6387 if (bnx2_test_registers(bp) != 0) {
6388 buf[0] = 1;
6389 etest->flags |= ETH_TEST_FL_FAILED;
6390 }
6391 if (bnx2_test_memory(bp) != 0) {
6392 buf[1] = 1;
6393 etest->flags |= ETH_TEST_FL_FAILED;
6394 }
bc5a0690 6395 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 6396 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
6397
6398 if (!netif_running(bp->dev)) {
6399 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6400 }
6401 else {
6402 bnx2_init_nic(bp);
6403 bnx2_netif_start(bp);
6404 }
6405
6406 /* wait for link up */
80be4434
MC
6407 for (i = 0; i < 7; i++) {
6408 if (bp->link_up)
6409 break;
6410 msleep_interruptible(1000);
6411 }
b6016b76
MC
6412 }
6413
6414 if (bnx2_test_nvram(bp) != 0) {
6415 buf[3] = 1;
6416 etest->flags |= ETH_TEST_FL_FAILED;
6417 }
6418 if (bnx2_test_intr(bp) != 0) {
6419 buf[4] = 1;
6420 etest->flags |= ETH_TEST_FL_FAILED;
6421 }
6422
6423 if (bnx2_test_link(bp) != 0) {
6424 buf[5] = 1;
6425 etest->flags |= ETH_TEST_FL_FAILED;
6426
6427 }
6428}
6429
6430static void
6431bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6432{
6433 switch (stringset) {
6434 case ETH_SS_STATS:
6435 memcpy(buf, bnx2_stats_str_arr,
6436 sizeof(bnx2_stats_str_arr));
6437 break;
6438 case ETH_SS_TEST:
6439 memcpy(buf, bnx2_tests_str_arr,
6440 sizeof(bnx2_tests_str_arr));
6441 break;
6442 }
6443}
6444
b6016b76
MC
6445static void
6446bnx2_get_ethtool_stats(struct net_device *dev,
6447 struct ethtool_stats *stats, u64 *buf)
6448{
972ec0d4 6449 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6450 int i;
6451 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 6452 u8 *stats_len_arr = NULL;
b6016b76
MC
6453
6454 if (hw_stats == NULL) {
6455 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6456 return;
6457 }
6458
5b0c76ad
MC
6459 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6460 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6461 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6462 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 6463 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
6464 else
6465 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
6466
6467 for (i = 0; i < BNX2_NUM_STATS; i++) {
6468 if (stats_len_arr[i] == 0) {
6469 /* skip this counter */
6470 buf[i] = 0;
6471 continue;
6472 }
6473 if (stats_len_arr[i] == 4) {
6474 /* 4-byte counter */
6475 buf[i] = (u64)
6476 *(hw_stats + bnx2_stats_offset_arr[i]);
6477 continue;
6478 }
6479 /* 8-byte counter */
6480 buf[i] = (((u64) *(hw_stats +
6481 bnx2_stats_offset_arr[i])) << 32) +
6482 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6483 }
6484}
6485
6486static int
6487bnx2_phys_id(struct net_device *dev, u32 data)
6488{
972ec0d4 6489 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6490 int i;
6491 u32 save;
6492
6493 if (data == 0)
6494 data = 2;
6495
6496 save = REG_RD(bp, BNX2_MISC_CFG);
6497 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6498
6499 for (i = 0; i < (data * 2); i++) {
6500 if ((i % 2) == 0) {
6501 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6502 }
6503 else {
6504 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6505 BNX2_EMAC_LED_1000MB_OVERRIDE |
6506 BNX2_EMAC_LED_100MB_OVERRIDE |
6507 BNX2_EMAC_LED_10MB_OVERRIDE |
6508 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6509 BNX2_EMAC_LED_TRAFFIC);
6510 }
6511 msleep_interruptible(500);
6512 if (signal_pending(current))
6513 break;
6514 }
6515 REG_WR(bp, BNX2_EMAC_LED, 0);
6516 REG_WR(bp, BNX2_MISC_CFG, save);
6517 return 0;
6518}
6519
4666f87a
MC
6520static int
6521bnx2_set_tx_csum(struct net_device *dev, u32 data)
6522{
6523 struct bnx2 *bp = netdev_priv(dev);
6524
6525 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 6526 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
6527 else
6528 return (ethtool_op_set_tx_csum(dev, data));
6529}
6530
7282d491 6531static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
6532 .get_settings = bnx2_get_settings,
6533 .set_settings = bnx2_set_settings,
6534 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
6535 .get_regs_len = bnx2_get_regs_len,
6536 .get_regs = bnx2_get_regs,
b6016b76
MC
6537 .get_wol = bnx2_get_wol,
6538 .set_wol = bnx2_set_wol,
6539 .nway_reset = bnx2_nway_reset,
6540 .get_link = ethtool_op_get_link,
6541 .get_eeprom_len = bnx2_get_eeprom_len,
6542 .get_eeprom = bnx2_get_eeprom,
6543 .set_eeprom = bnx2_set_eeprom,
6544 .get_coalesce = bnx2_get_coalesce,
6545 .set_coalesce = bnx2_set_coalesce,
6546 .get_ringparam = bnx2_get_ringparam,
6547 .set_ringparam = bnx2_set_ringparam,
6548 .get_pauseparam = bnx2_get_pauseparam,
6549 .set_pauseparam = bnx2_set_pauseparam,
6550 .get_rx_csum = bnx2_get_rx_csum,
6551 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 6552 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 6553 .set_sg = ethtool_op_set_sg,
b11d6213 6554 .set_tso = bnx2_set_tso,
b6016b76
MC
6555 .self_test = bnx2_self_test,
6556 .get_strings = bnx2_get_strings,
6557 .phys_id = bnx2_phys_id,
b6016b76 6558 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 6559 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
6560};
6561
6562/* Called with rtnl_lock */
6563static int
6564bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6565{
14ab9b86 6566 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 6567 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6568 int err;
6569
6570 switch(cmd) {
6571 case SIOCGMIIPHY:
6572 data->phy_id = bp->phy_addr;
6573
6574 /* fallthru */
6575 case SIOCGMIIREG: {
6576 u32 mii_regval;
6577
7b6b8347
MC
6578 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6579 return -EOPNOTSUPP;
6580
dad3e452
MC
6581 if (!netif_running(dev))
6582 return -EAGAIN;
6583
c770a65c 6584 spin_lock_bh(&bp->phy_lock);
b6016b76 6585 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 6586 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6587
6588 data->val_out = mii_regval;
6589
6590 return err;
6591 }
6592
6593 case SIOCSMIIREG:
6594 if (!capable(CAP_NET_ADMIN))
6595 return -EPERM;
6596
7b6b8347
MC
6597 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6598 return -EOPNOTSUPP;
6599
dad3e452
MC
6600 if (!netif_running(dev))
6601 return -EAGAIN;
6602
c770a65c 6603 spin_lock_bh(&bp->phy_lock);
b6016b76 6604 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 6605 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6606
6607 return err;
6608
6609 default:
6610 /* do nothing */
6611 break;
6612 }
6613 return -EOPNOTSUPP;
6614}
6615
6616/* Called with rtnl_lock */
6617static int
6618bnx2_change_mac_addr(struct net_device *dev, void *p)
6619{
6620 struct sockaddr *addr = p;
972ec0d4 6621 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6622
73eef4cd
MC
6623 if (!is_valid_ether_addr(addr->sa_data))
6624 return -EINVAL;
6625
b6016b76
MC
6626 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6627 if (netif_running(dev))
6628 bnx2_set_mac_addr(bp);
6629
6630 return 0;
6631}
6632
6633/* Called with rtnl_lock */
6634static int
6635bnx2_change_mtu(struct net_device *dev, int new_mtu)
6636{
972ec0d4 6637 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6638
6639 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6640 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6641 return -EINVAL;
6642
6643 dev->mtu = new_mtu;
5d5d0015 6644 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
b6016b76
MC
6645}
6646
6647#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6648static void
6649poll_bnx2(struct net_device *dev)
6650{
972ec0d4 6651 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6652
6653 disable_irq(bp->pdev->irq);
7d12e780 6654 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
6655 enable_irq(bp->pdev->irq);
6656}
6657#endif
6658
253c8b75
MC
6659static void __devinit
6660bnx2_get_5709_media(struct bnx2 *bp)
6661{
6662 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6663 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6664 u32 strap;
6665
6666 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6667 return;
6668 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6669 bp->phy_flags |= PHY_SERDES_FLAG;
6670 return;
6671 }
6672
6673 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6674 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6675 else
6676 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6677
6678 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6679 switch (strap) {
6680 case 0x4:
6681 case 0x5:
6682 case 0x6:
6683 bp->phy_flags |= PHY_SERDES_FLAG;
6684 return;
6685 }
6686 } else {
6687 switch (strap) {
6688 case 0x1:
6689 case 0x2:
6690 case 0x4:
6691 bp->phy_flags |= PHY_SERDES_FLAG;
6692 return;
6693 }
6694 }
6695}
6696
883e5151
MC
6697static void __devinit
6698bnx2_get_pci_speed(struct bnx2 *bp)
6699{
6700 u32 reg;
6701
6702 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6703 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6704 u32 clkreg;
6705
6706 bp->flags |= PCIX_FLAG;
6707
6708 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6709
6710 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6711 switch (clkreg) {
6712 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6713 bp->bus_speed_mhz = 133;
6714 break;
6715
6716 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6717 bp->bus_speed_mhz = 100;
6718 break;
6719
6720 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6721 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6722 bp->bus_speed_mhz = 66;
6723 break;
6724
6725 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6726 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6727 bp->bus_speed_mhz = 50;
6728 break;
6729
6730 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6731 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6732 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6733 bp->bus_speed_mhz = 33;
6734 break;
6735 }
6736 }
6737 else {
6738 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6739 bp->bus_speed_mhz = 66;
6740 else
6741 bp->bus_speed_mhz = 33;
6742 }
6743
6744 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6745 bp->flags |= PCI_32BIT_FLAG;
6746
6747}
6748
b6016b76
MC
6749static int __devinit
6750bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6751{
6752 struct bnx2 *bp;
6753 unsigned long mem_len;
58fc2ea4 6754 int rc, i, j;
b6016b76 6755 u32 reg;
40453c83 6756 u64 dma_mask, persist_dma_mask;
b6016b76 6757
b6016b76 6758 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 6759 bp = netdev_priv(dev);
b6016b76
MC
6760
6761 bp->flags = 0;
6762 bp->phy_flags = 0;
6763
6764 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6765 rc = pci_enable_device(pdev);
6766 if (rc) {
898eb71c 6767 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
b6016b76
MC
6768 goto err_out;
6769 }
6770
6771 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 6772 dev_err(&pdev->dev,
2e8a538d 6773 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
6774 rc = -ENODEV;
6775 goto err_out_disable;
6776 }
6777
6778 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6779 if (rc) {
9b91cf9d 6780 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
6781 goto err_out_disable;
6782 }
6783
6784 pci_set_master(pdev);
6785
6786 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6787 if (bp->pm_cap == 0) {
9b91cf9d 6788 dev_err(&pdev->dev,
2e8a538d 6789 "Cannot find power management capability, aborting.\n");
b6016b76
MC
6790 rc = -EIO;
6791 goto err_out_release;
6792 }
6793
b6016b76
MC
6794 bp->dev = dev;
6795 bp->pdev = pdev;
6796
6797 spin_lock_init(&bp->phy_lock);
1b8227c4 6798 spin_lock_init(&bp->indirect_lock);
c4028958 6799 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
6800
6801 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
59b47d8a 6802 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
b6016b76
MC
6803 dev->mem_end = dev->mem_start + mem_len;
6804 dev->irq = pdev->irq;
6805
6806 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6807
6808 if (!bp->regview) {
9b91cf9d 6809 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
6810 rc = -ENOMEM;
6811 goto err_out_release;
6812 }
6813
6814 /* Configure byte swap and enable write to the reg_window registers.
6815 * Rely on CPU to do target byte swapping on big endian systems
6816 * The chip's target access swapping will not swap all accesses
6817 */
6818 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6819 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6820 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6821
829ca9a3 6822 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6823
6824 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6825
883e5151
MC
6826 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6827 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6828 dev_err(&pdev->dev,
6829 "Cannot find PCIE capability, aborting.\n");
6830 rc = -EIO;
6831 goto err_out_unmap;
6832 }
6833 bp->flags |= PCIE_FLAG;
6834 } else {
59b47d8a
MC
6835 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6836 if (bp->pcix_cap == 0) {
6837 dev_err(&pdev->dev,
6838 "Cannot find PCIX capability, aborting.\n");
6839 rc = -EIO;
6840 goto err_out_unmap;
6841 }
6842 }
6843
8e6a72c4
MC
6844 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6845 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6846 bp->flags |= MSI_CAP_FLAG;
6847 }
6848
40453c83
MC
6849 /* 5708 cannot support DMA addresses > 40-bit. */
6850 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6851 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6852 else
6853 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6854
6855 /* Configure DMA attributes. */
6856 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6857 dev->features |= NETIF_F_HIGHDMA;
6858 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6859 if (rc) {
6860 dev_err(&pdev->dev,
6861 "pci_set_consistent_dma_mask failed, aborting.\n");
6862 goto err_out_unmap;
6863 }
6864 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6865 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6866 goto err_out_unmap;
6867 }
6868
883e5151
MC
6869 if (!(bp->flags & PCIE_FLAG))
6870 bnx2_get_pci_speed(bp);
b6016b76
MC
6871
6872 /* 5706A0 may falsely detect SERR and PERR. */
6873 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6874 reg = REG_RD(bp, PCI_COMMAND);
6875 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6876 REG_WR(bp, PCI_COMMAND, reg);
6877 }
6878 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6879 !(bp->flags & PCIX_FLAG)) {
6880
9b91cf9d 6881 dev_err(&pdev->dev,
2e8a538d 6882 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
6883 goto err_out_unmap;
6884 }
6885
6886 bnx2_init_nvram(bp);
6887
e3648b3d
MC
6888 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6889
6890 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
6891 BNX2_SHM_HDR_SIGNATURE_SIG) {
6892 u32 off = PCI_FUNC(pdev->devfn) << 2;
6893
6894 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6895 } else
e3648b3d
MC
6896 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6897
b6016b76
MC
6898 /* Get the permanent MAC address. First we need to make sure the
6899 * firmware is actually running.
6900 */
e3648b3d 6901 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
6902
6903 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6904 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 6905 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
6906 rc = -ENODEV;
6907 goto err_out_unmap;
6908 }
6909
58fc2ea4
MC
6910 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6911 for (i = 0, j = 0; i < 3; i++) {
6912 u8 num, k, skip0;
6913
6914 num = (u8) (reg >> (24 - (i * 8)));
6915 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6916 if (num >= k || !skip0 || k == 1) {
6917 bp->fw_version[j++] = (num / k) + '0';
6918 skip0 = 0;
6919 }
6920 }
6921 if (i != 2)
6922 bp->fw_version[j++] = '.';
6923 }
846f5c62
MC
6924 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6925 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6926 bp->wol = 1;
6927
6928 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
c2d3db8c
MC
6929 bp->flags |= ASF_ENABLE_FLAG;
6930
6931 for (i = 0; i < 30; i++) {
6932 reg = REG_RD_IND(bp, bp->shmem_base +
6933 BNX2_BC_STATE_CONDITION);
6934 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6935 break;
6936 msleep(10);
6937 }
6938 }
58fc2ea4
MC
6939 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6940 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6941 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6942 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6943 int i;
6944 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6945
6946 bp->fw_version[j++] = ' ';
6947 for (i = 0; i < 3; i++) {
6948 reg = REG_RD_IND(bp, addr + i * 4);
6949 reg = swab32(reg);
6950 memcpy(&bp->fw_version[j], &reg, 4);
6951 j += 4;
6952 }
6953 }
b6016b76 6954
e3648b3d 6955 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
6956 bp->mac_addr[0] = (u8) (reg >> 8);
6957 bp->mac_addr[1] = (u8) reg;
6958
e3648b3d 6959 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
6960 bp->mac_addr[2] = (u8) (reg >> 24);
6961 bp->mac_addr[3] = (u8) (reg >> 16);
6962 bp->mac_addr[4] = (u8) (reg >> 8);
6963 bp->mac_addr[5] = (u8) reg;
6964
5d5d0015
MC
6965 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6966
b6016b76 6967 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 6968 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
6969
6970 bp->rx_csum = 1;
6971
b6016b76
MC
6972 bp->tx_quick_cons_trip_int = 20;
6973 bp->tx_quick_cons_trip = 20;
6974 bp->tx_ticks_int = 80;
6975 bp->tx_ticks = 80;
6aa20a22 6976
b6016b76
MC
6977 bp->rx_quick_cons_trip_int = 6;
6978 bp->rx_quick_cons_trip = 6;
6979 bp->rx_ticks_int = 18;
6980 bp->rx_ticks = 18;
6981
7ea6920e 6982 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6983
6984 bp->timer_interval = HZ;
cd339a0e 6985 bp->current_interval = HZ;
b6016b76 6986
5b0c76ad
MC
6987 bp->phy_addr = 1;
6988
b6016b76 6989 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
6990 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6991 bnx2_get_5709_media(bp);
6992 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
b6016b76 6993 bp->phy_flags |= PHY_SERDES_FLAG;
bac0dff6 6994
0d8a6571 6995 bp->phy_port = PORT_TP;
bac0dff6 6996 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 6997 bp->phy_port = PORT_FIBRE;
846f5c62
MC
6998 reg = REG_RD_IND(bp, bp->shmem_base +
6999 BNX2_SHARED_HW_CFG_CONFIG);
7000 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7001 bp->flags |= NO_WOL_FLAG;
7002 bp->wol = 0;
7003 }
bac0dff6 7004 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5b0c76ad 7005 bp->phy_addr = 2;
5b0c76ad
MC
7006 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7007 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
7008 }
0d8a6571
MC
7009 bnx2_init_remote_phy(bp);
7010
261dd5ca
MC
7011 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7012 CHIP_NUM(bp) == CHIP_NUM_5708)
7013 bp->phy_flags |= PHY_CRC_FIX_FLAG;
fb0c18bd
MC
7014 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7015 (CHIP_REV(bp) == CHIP_REV_Ax ||
7016 CHIP_REV(bp) == CHIP_REV_Bx))
b659f44e 7017 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
b6016b76 7018
16088272
MC
7019 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7020 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
846f5c62 7021 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
dda1e390 7022 bp->flags |= NO_WOL_FLAG;
846f5c62
MC
7023 bp->wol = 0;
7024 }
dda1e390 7025
b6016b76
MC
7026 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7027 bp->tx_quick_cons_trip_int =
7028 bp->tx_quick_cons_trip;
7029 bp->tx_ticks_int = bp->tx_ticks;
7030 bp->rx_quick_cons_trip_int =
7031 bp->rx_quick_cons_trip;
7032 bp->rx_ticks_int = bp->rx_ticks;
7033 bp->comp_prod_trip_int = bp->comp_prod_trip;
7034 bp->com_ticks_int = bp->com_ticks;
7035 bp->cmd_ticks_int = bp->cmd_ticks;
7036 }
7037
f9317a40
MC
7038 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7039 *
7040 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7041 * with byte enables disabled on the unused 32-bit word. This is legal
7042 * but causes problems on the AMD 8132 which will eventually stop
7043 * responding after a while.
7044 *
7045 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 7046 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
7047 */
7048 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7049 struct pci_dev *amd_8132 = NULL;
7050
7051 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7052 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7053 amd_8132))) {
f9317a40 7054
44c10138
AK
7055 if (amd_8132->revision >= 0x10 &&
7056 amd_8132->revision <= 0x13) {
f9317a40
MC
7057 disable_msi = 1;
7058 pci_dev_put(amd_8132);
7059 break;
7060 }
7061 }
7062 }
7063
deaf391b 7064 bnx2_set_default_link(bp);
b6016b76
MC
7065 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7066
cd339a0e
MC
7067 init_timer(&bp->timer);
7068 bp->timer.expires = RUN_AT(bp->timer_interval);
7069 bp->timer.data = (unsigned long) bp;
7070 bp->timer.function = bnx2_timer;
7071
b6016b76
MC
7072 return 0;
7073
7074err_out_unmap:
7075 if (bp->regview) {
7076 iounmap(bp->regview);
73eef4cd 7077 bp->regview = NULL;
b6016b76
MC
7078 }
7079
7080err_out_release:
7081 pci_release_regions(pdev);
7082
7083err_out_disable:
7084 pci_disable_device(pdev);
7085 pci_set_drvdata(pdev, NULL);
7086
7087err_out:
7088 return rc;
7089}
7090
883e5151
MC
7091static char * __devinit
7092bnx2_bus_string(struct bnx2 *bp, char *str)
7093{
7094 char *s = str;
7095
7096 if (bp->flags & PCIE_FLAG) {
7097 s += sprintf(s, "PCI Express");
7098 } else {
7099 s += sprintf(s, "PCI");
7100 if (bp->flags & PCIX_FLAG)
7101 s += sprintf(s, "-X");
7102 if (bp->flags & PCI_32BIT_FLAG)
7103 s += sprintf(s, " 32-bit");
7104 else
7105 s += sprintf(s, " 64-bit");
7106 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7107 }
7108 return str;
7109}
7110
35efa7c1
MC
7111static int __devinit
7112bnx2_init_napi(struct bnx2 *bp)
7113{
7114 struct bnx2_napi *bnapi = &bp->bnx2_napi;
7115
7116 bnapi->bp = bp;
7117 netif_napi_add(bp->dev, &bnapi->napi, bnx2_poll, 64);
7118}
7119
b6016b76
MC
7120static int __devinit
7121bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7122{
7123 static int version_printed = 0;
7124 struct net_device *dev = NULL;
7125 struct bnx2 *bp;
0795af57 7126 int rc;
883e5151 7127 char str[40];
0795af57 7128 DECLARE_MAC_BUF(mac);
b6016b76
MC
7129
7130 if (version_printed++ == 0)
7131 printk(KERN_INFO "%s", version);
7132
7133 /* dev zeroed in init_etherdev */
7134 dev = alloc_etherdev(sizeof(*bp));
7135
7136 if (!dev)
7137 return -ENOMEM;
7138
7139 rc = bnx2_init_board(pdev, dev);
7140 if (rc < 0) {
7141 free_netdev(dev);
7142 return rc;
7143 }
7144
7145 dev->open = bnx2_open;
7146 dev->hard_start_xmit = bnx2_start_xmit;
7147 dev->stop = bnx2_close;
7148 dev->get_stats = bnx2_get_stats;
7149 dev->set_multicast_list = bnx2_set_rx_mode;
7150 dev->do_ioctl = bnx2_ioctl;
7151 dev->set_mac_address = bnx2_change_mac_addr;
7152 dev->change_mtu = bnx2_change_mtu;
7153 dev->tx_timeout = bnx2_tx_timeout;
7154 dev->watchdog_timeo = TX_TIMEOUT;
7155#ifdef BCM_VLAN
7156 dev->vlan_rx_register = bnx2_vlan_rx_register;
b6016b76 7157#endif
b6016b76 7158 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 7159
972ec0d4 7160 bp = netdev_priv(dev);
35efa7c1 7161 bnx2_init_napi(bp);
b6016b76
MC
7162
7163#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7164 dev->poll_controller = poll_bnx2;
7165#endif
7166
1b2f922f
MC
7167 pci_set_drvdata(pdev, dev);
7168
7169 memcpy(dev->dev_addr, bp->mac_addr, 6);
7170 memcpy(dev->perm_addr, bp->mac_addr, 6);
7171 bp->name = board_info[ent->driver_data].name;
7172
d212f87b 7173 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 7174 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
7175 dev->features |= NETIF_F_IPV6_CSUM;
7176
1b2f922f
MC
7177#ifdef BCM_VLAN
7178 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7179#endif
7180 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
7181 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7182 dev->features |= NETIF_F_TSO6;
1b2f922f 7183
b6016b76 7184 if ((rc = register_netdev(dev))) {
9b91cf9d 7185 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
7186 if (bp->regview)
7187 iounmap(bp->regview);
7188 pci_release_regions(pdev);
7189 pci_disable_device(pdev);
7190 pci_set_drvdata(pdev, NULL);
7191 free_netdev(dev);
7192 return rc;
7193 }
7194
883e5151 7195 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
0795af57 7196 "IRQ %d, node addr %s\n",
b6016b76
MC
7197 dev->name,
7198 bp->name,
7199 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7200 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 7201 bnx2_bus_string(bp, str),
b6016b76 7202 dev->base_addr,
0795af57 7203 bp->pdev->irq, print_mac(mac, dev->dev_addr));
b6016b76 7204
b6016b76
MC
7205 return 0;
7206}
7207
7208static void __devexit
7209bnx2_remove_one(struct pci_dev *pdev)
7210{
7211 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7212 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7213
afdc08b9
MC
7214 flush_scheduled_work();
7215
b6016b76
MC
7216 unregister_netdev(dev);
7217
7218 if (bp->regview)
7219 iounmap(bp->regview);
7220
7221 free_netdev(dev);
7222 pci_release_regions(pdev);
7223 pci_disable_device(pdev);
7224 pci_set_drvdata(pdev, NULL);
7225}
7226
7227static int
829ca9a3 7228bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
7229{
7230 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7231 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7232 u32 reset_code;
7233
6caebb02
MC
7234 /* PCI register 4 needs to be saved whether netif_running() or not.
7235 * MSI address and data need to be saved if using MSI and
7236 * netif_running().
7237 */
7238 pci_save_state(pdev);
b6016b76
MC
7239 if (!netif_running(dev))
7240 return 0;
7241
1d60290f 7242 flush_scheduled_work();
b6016b76
MC
7243 bnx2_netif_stop(bp);
7244 netif_device_detach(dev);
7245 del_timer_sync(&bp->timer);
dda1e390 7246 if (bp->flags & NO_WOL_FLAG)
6c4f095e 7247 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 7248 else if (bp->wol)
b6016b76
MC
7249 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7250 else
7251 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7252 bnx2_reset_chip(bp, reset_code);
7253 bnx2_free_skbs(bp);
829ca9a3 7254 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
7255 return 0;
7256}
7257
7258static int
7259bnx2_resume(struct pci_dev *pdev)
7260{
7261 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7262 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7263
6caebb02 7264 pci_restore_state(pdev);
b6016b76
MC
7265 if (!netif_running(dev))
7266 return 0;
7267
829ca9a3 7268 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
7269 netif_device_attach(dev);
7270 bnx2_init_nic(bp);
7271 bnx2_netif_start(bp);
7272 return 0;
7273}
7274
7275static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
7276 .name = DRV_MODULE_NAME,
7277 .id_table = bnx2_pci_tbl,
7278 .probe = bnx2_init_one,
7279 .remove = __devexit_p(bnx2_remove_one),
7280 .suspend = bnx2_suspend,
7281 .resume = bnx2_resume,
b6016b76
MC
7282};
7283
7284static int __init bnx2_init(void)
7285{
29917620 7286 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
7287}
7288
7289static void __exit bnx2_cleanup(void)
7290{
7291 pci_unregister_driver(&bnx2_pci_driver);
7292}
7293
7294module_init(bnx2_init);
7295module_exit(bnx2_cleanup);
7296
7297
7298