]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/bnx2.c
Merge branch 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[mirror_ubuntu-zesty-kernel.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12 #include <linux/config.h>
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <asm/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #ifdef NETIF_F_TSO
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #define BCM_TSO 1
47 #endif
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52
53 #include "bnx2.h"
54 #include "bnx2_fw.h"
55
56 #define DRV_MODULE_NAME "bnx2"
57 #define PFX DRV_MODULE_NAME ": "
58 #define DRV_MODULE_VERSION "1.4.39"
59 #define DRV_MODULE_RELDATE "March 22, 2006"
60
61 #define RUN_AT(x) (jiffies + (x))
62
63 /* Time in jiffies before concluding the transmitter is hung. */
64 #define TX_TIMEOUT (5*HZ)
65
66 static char version[] __devinitdata =
67 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68
69 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
70 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_MODULE_VERSION);
73
74 static int disable_msi = 0;
75
76 module_param(disable_msi, int, 0);
77 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
78
79 typedef enum {
80 BCM5706 = 0,
81 NC370T,
82 NC370I,
83 BCM5706S,
84 NC370F,
85 BCM5708,
86 BCM5708S,
87 } board_t;
88
89 /* indexed by board_t, above */
90 static const struct {
91 char *name;
92 } board_info[] __devinitdata = {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
98 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
100 };
101
102 static struct pci_device_id bnx2_pci_tbl[] = {
103 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
104 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
105 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
106 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
112 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
117 { 0, }
118 };
119
120 static struct flash_spec flash_table[] =
121 {
122 /* Slow EEPROM */
123 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
124 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
125 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
126 "EEPROM - slow"},
127 /* Expansion entry 0001 */
128 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
129 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
130 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
131 "Entry 0001"},
132 /* Saifun SA25F010 (non-buffered flash) */
133 /* strap, cfg1, & write1 need updates */
134 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
136 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
137 "Non-buffered flash (128kB)"},
138 /* Saifun SA25F020 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
140 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
143 "Non-buffered flash (256kB)"},
144 /* Expansion entry 0100 */
145 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
148 "Entry 0100"},
149 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
150 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
151 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
152 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
153 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
154 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
155 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
156 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
157 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
158 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
159 /* Saifun SA25F005 (non-buffered flash) */
160 /* strap, cfg1, & write1 need updates */
161 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
162 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
163 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
164 "Non-buffered flash (64kB)"},
165 /* Fast EEPROM */
166 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
167 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
168 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
169 "EEPROM - fast"},
170 /* Expansion entry 1001 */
171 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
172 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
174 "Entry 1001"},
175 /* Expansion entry 1010 */
176 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
177 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 1010"},
180 /* ATMEL AT45DB011B (buffered flash) */
181 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
182 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
183 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
184 "Buffered flash (128kB)"},
185 /* Expansion entry 1100 */
186 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
187 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
189 "Entry 1100"},
190 /* Expansion entry 1101 */
191 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
192 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194 "Entry 1101"},
195 /* Ateml Expansion entry 1110 */
196 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
197 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
198 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
199 "Entry 1110 (Atmel)"},
200 /* ATMEL AT45DB021B (buffered flash) */
201 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
202 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
203 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
204 "Buffered flash (256kB)"},
205 };
206
207 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
208
209 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
210 {
211 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
212
213 if (diff > MAX_TX_DESC_CNT)
214 diff = (diff & MAX_TX_DESC_CNT) - 1;
215 return (bp->tx_ring_size - diff);
216 }
217
218 static u32
219 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
220 {
221 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
222 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
223 }
224
225 static void
226 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
227 {
228 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
229 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
230 }
231
232 static void
233 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
234 {
235 offset += cid_addr;
236 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
237 REG_WR(bp, BNX2_CTX_DATA, val);
238 }
239
240 static int
241 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
242 {
243 u32 val1;
244 int i, ret;
245
246 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
247 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
248 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
249
250 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
251 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
252
253 udelay(40);
254 }
255
256 val1 = (bp->phy_addr << 21) | (reg << 16) |
257 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
258 BNX2_EMAC_MDIO_COMM_START_BUSY;
259 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
260
261 for (i = 0; i < 50; i++) {
262 udelay(10);
263
264 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
265 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
266 udelay(5);
267
268 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
269 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
270
271 break;
272 }
273 }
274
275 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
276 *val = 0x0;
277 ret = -EBUSY;
278 }
279 else {
280 *val = val1;
281 ret = 0;
282 }
283
284 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
285 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
286 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
287
288 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
289 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
290
291 udelay(40);
292 }
293
294 return ret;
295 }
296
297 static int
298 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
299 {
300 u32 val1;
301 int i, ret;
302
303 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
306
307 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
308 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309
310 udelay(40);
311 }
312
313 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
314 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
315 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
316 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
317
318 for (i = 0; i < 50; i++) {
319 udelay(10);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
323 udelay(5);
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
329 ret = -EBUSY;
330 else
331 ret = 0;
332
333 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
334 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
335 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
336
337 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
338 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339
340 udelay(40);
341 }
342
343 return ret;
344 }
345
346 static void
347 bnx2_disable_int(struct bnx2 *bp)
348 {
349 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
350 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
351 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
352 }
353
354 static void
355 bnx2_enable_int(struct bnx2 *bp)
356 {
357 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
358 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
359 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
360
361 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
362 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
363
364 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
365 }
366
367 static void
368 bnx2_disable_int_sync(struct bnx2 *bp)
369 {
370 atomic_inc(&bp->intr_sem);
371 bnx2_disable_int(bp);
372 synchronize_irq(bp->pdev->irq);
373 }
374
375 static void
376 bnx2_netif_stop(struct bnx2 *bp)
377 {
378 bnx2_disable_int_sync(bp);
379 if (netif_running(bp->dev)) {
380 netif_poll_disable(bp->dev);
381 netif_tx_disable(bp->dev);
382 bp->dev->trans_start = jiffies; /* prevent tx timeout */
383 }
384 }
385
386 static void
387 bnx2_netif_start(struct bnx2 *bp)
388 {
389 if (atomic_dec_and_test(&bp->intr_sem)) {
390 if (netif_running(bp->dev)) {
391 netif_wake_queue(bp->dev);
392 netif_poll_enable(bp->dev);
393 bnx2_enable_int(bp);
394 }
395 }
396 }
397
398 static void
399 bnx2_free_mem(struct bnx2 *bp)
400 {
401 int i;
402
403 if (bp->status_blk) {
404 pci_free_consistent(bp->pdev, bp->status_stats_size,
405 bp->status_blk, bp->status_blk_mapping);
406 bp->status_blk = NULL;
407 bp->stats_blk = NULL;
408 }
409 if (bp->tx_desc_ring) {
410 pci_free_consistent(bp->pdev,
411 sizeof(struct tx_bd) * TX_DESC_CNT,
412 bp->tx_desc_ring, bp->tx_desc_mapping);
413 bp->tx_desc_ring = NULL;
414 }
415 kfree(bp->tx_buf_ring);
416 bp->tx_buf_ring = NULL;
417 for (i = 0; i < bp->rx_max_ring; i++) {
418 if (bp->rx_desc_ring[i])
419 pci_free_consistent(bp->pdev,
420 sizeof(struct rx_bd) * RX_DESC_CNT,
421 bp->rx_desc_ring[i],
422 bp->rx_desc_mapping[i]);
423 bp->rx_desc_ring[i] = NULL;
424 }
425 vfree(bp->rx_buf_ring);
426 bp->rx_buf_ring = NULL;
427 }
428
429 static int
430 bnx2_alloc_mem(struct bnx2 *bp)
431 {
432 int i, status_blk_size;
433
434 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
435 GFP_KERNEL);
436 if (bp->tx_buf_ring == NULL)
437 return -ENOMEM;
438
439 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
440 sizeof(struct tx_bd) *
441 TX_DESC_CNT,
442 &bp->tx_desc_mapping);
443 if (bp->tx_desc_ring == NULL)
444 goto alloc_mem_err;
445
446 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
447 bp->rx_max_ring);
448 if (bp->rx_buf_ring == NULL)
449 goto alloc_mem_err;
450
451 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
452 bp->rx_max_ring);
453
454 for (i = 0; i < bp->rx_max_ring; i++) {
455 bp->rx_desc_ring[i] =
456 pci_alloc_consistent(bp->pdev,
457 sizeof(struct rx_bd) * RX_DESC_CNT,
458 &bp->rx_desc_mapping[i]);
459 if (bp->rx_desc_ring[i] == NULL)
460 goto alloc_mem_err;
461
462 }
463
464 /* Combine status and statistics blocks into one allocation. */
465 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
466 bp->status_stats_size = status_blk_size +
467 sizeof(struct statistics_block);
468
469 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
470 &bp->status_blk_mapping);
471 if (bp->status_blk == NULL)
472 goto alloc_mem_err;
473
474 memset(bp->status_blk, 0, bp->status_stats_size);
475
476 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
477 status_blk_size);
478
479 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
480
481 return 0;
482
483 alloc_mem_err:
484 bnx2_free_mem(bp);
485 return -ENOMEM;
486 }
487
488 static void
489 bnx2_report_fw_link(struct bnx2 *bp)
490 {
491 u32 fw_link_status = 0;
492
493 if (bp->link_up) {
494 u32 bmsr;
495
496 switch (bp->line_speed) {
497 case SPEED_10:
498 if (bp->duplex == DUPLEX_HALF)
499 fw_link_status = BNX2_LINK_STATUS_10HALF;
500 else
501 fw_link_status = BNX2_LINK_STATUS_10FULL;
502 break;
503 case SPEED_100:
504 if (bp->duplex == DUPLEX_HALF)
505 fw_link_status = BNX2_LINK_STATUS_100HALF;
506 else
507 fw_link_status = BNX2_LINK_STATUS_100FULL;
508 break;
509 case SPEED_1000:
510 if (bp->duplex == DUPLEX_HALF)
511 fw_link_status = BNX2_LINK_STATUS_1000HALF;
512 else
513 fw_link_status = BNX2_LINK_STATUS_1000FULL;
514 break;
515 case SPEED_2500:
516 if (bp->duplex == DUPLEX_HALF)
517 fw_link_status = BNX2_LINK_STATUS_2500HALF;
518 else
519 fw_link_status = BNX2_LINK_STATUS_2500FULL;
520 break;
521 }
522
523 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
524
525 if (bp->autoneg) {
526 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
527
528 bnx2_read_phy(bp, MII_BMSR, &bmsr);
529 bnx2_read_phy(bp, MII_BMSR, &bmsr);
530
531 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
532 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
533 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
534 else
535 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
536 }
537 }
538 else
539 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
540
541 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
542 }
543
544 static void
545 bnx2_report_link(struct bnx2 *bp)
546 {
547 if (bp->link_up) {
548 netif_carrier_on(bp->dev);
549 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
550
551 printk("%d Mbps ", bp->line_speed);
552
553 if (bp->duplex == DUPLEX_FULL)
554 printk("full duplex");
555 else
556 printk("half duplex");
557
558 if (bp->flow_ctrl) {
559 if (bp->flow_ctrl & FLOW_CTRL_RX) {
560 printk(", receive ");
561 if (bp->flow_ctrl & FLOW_CTRL_TX)
562 printk("& transmit ");
563 }
564 else {
565 printk(", transmit ");
566 }
567 printk("flow control ON");
568 }
569 printk("\n");
570 }
571 else {
572 netif_carrier_off(bp->dev);
573 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
574 }
575
576 bnx2_report_fw_link(bp);
577 }
578
579 static void
580 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
581 {
582 u32 local_adv, remote_adv;
583
584 bp->flow_ctrl = 0;
585 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
586 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
587
588 if (bp->duplex == DUPLEX_FULL) {
589 bp->flow_ctrl = bp->req_flow_ctrl;
590 }
591 return;
592 }
593
594 if (bp->duplex != DUPLEX_FULL) {
595 return;
596 }
597
598 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
599 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
600 u32 val;
601
602 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
603 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
604 bp->flow_ctrl |= FLOW_CTRL_TX;
605 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
606 bp->flow_ctrl |= FLOW_CTRL_RX;
607 return;
608 }
609
610 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
611 bnx2_read_phy(bp, MII_LPA, &remote_adv);
612
613 if (bp->phy_flags & PHY_SERDES_FLAG) {
614 u32 new_local_adv = 0;
615 u32 new_remote_adv = 0;
616
617 if (local_adv & ADVERTISE_1000XPAUSE)
618 new_local_adv |= ADVERTISE_PAUSE_CAP;
619 if (local_adv & ADVERTISE_1000XPSE_ASYM)
620 new_local_adv |= ADVERTISE_PAUSE_ASYM;
621 if (remote_adv & ADVERTISE_1000XPAUSE)
622 new_remote_adv |= ADVERTISE_PAUSE_CAP;
623 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
624 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
625
626 local_adv = new_local_adv;
627 remote_adv = new_remote_adv;
628 }
629
630 /* See Table 28B-3 of 802.3ab-1999 spec. */
631 if (local_adv & ADVERTISE_PAUSE_CAP) {
632 if(local_adv & ADVERTISE_PAUSE_ASYM) {
633 if (remote_adv & ADVERTISE_PAUSE_CAP) {
634 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
635 }
636 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
637 bp->flow_ctrl = FLOW_CTRL_RX;
638 }
639 }
640 else {
641 if (remote_adv & ADVERTISE_PAUSE_CAP) {
642 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
643 }
644 }
645 }
646 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
647 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
648 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
649
650 bp->flow_ctrl = FLOW_CTRL_TX;
651 }
652 }
653 }
654
655 static int
656 bnx2_5708s_linkup(struct bnx2 *bp)
657 {
658 u32 val;
659
660 bp->link_up = 1;
661 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
662 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
663 case BCM5708S_1000X_STAT1_SPEED_10:
664 bp->line_speed = SPEED_10;
665 break;
666 case BCM5708S_1000X_STAT1_SPEED_100:
667 bp->line_speed = SPEED_100;
668 break;
669 case BCM5708S_1000X_STAT1_SPEED_1G:
670 bp->line_speed = SPEED_1000;
671 break;
672 case BCM5708S_1000X_STAT1_SPEED_2G5:
673 bp->line_speed = SPEED_2500;
674 break;
675 }
676 if (val & BCM5708S_1000X_STAT1_FD)
677 bp->duplex = DUPLEX_FULL;
678 else
679 bp->duplex = DUPLEX_HALF;
680
681 return 0;
682 }
683
684 static int
685 bnx2_5706s_linkup(struct bnx2 *bp)
686 {
687 u32 bmcr, local_adv, remote_adv, common;
688
689 bp->link_up = 1;
690 bp->line_speed = SPEED_1000;
691
692 bnx2_read_phy(bp, MII_BMCR, &bmcr);
693 if (bmcr & BMCR_FULLDPLX) {
694 bp->duplex = DUPLEX_FULL;
695 }
696 else {
697 bp->duplex = DUPLEX_HALF;
698 }
699
700 if (!(bmcr & BMCR_ANENABLE)) {
701 return 0;
702 }
703
704 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
705 bnx2_read_phy(bp, MII_LPA, &remote_adv);
706
707 common = local_adv & remote_adv;
708 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
709
710 if (common & ADVERTISE_1000XFULL) {
711 bp->duplex = DUPLEX_FULL;
712 }
713 else {
714 bp->duplex = DUPLEX_HALF;
715 }
716 }
717
718 return 0;
719 }
720
721 static int
722 bnx2_copper_linkup(struct bnx2 *bp)
723 {
724 u32 bmcr;
725
726 bnx2_read_phy(bp, MII_BMCR, &bmcr);
727 if (bmcr & BMCR_ANENABLE) {
728 u32 local_adv, remote_adv, common;
729
730 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
731 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
732
733 common = local_adv & (remote_adv >> 2);
734 if (common & ADVERTISE_1000FULL) {
735 bp->line_speed = SPEED_1000;
736 bp->duplex = DUPLEX_FULL;
737 }
738 else if (common & ADVERTISE_1000HALF) {
739 bp->line_speed = SPEED_1000;
740 bp->duplex = DUPLEX_HALF;
741 }
742 else {
743 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
744 bnx2_read_phy(bp, MII_LPA, &remote_adv);
745
746 common = local_adv & remote_adv;
747 if (common & ADVERTISE_100FULL) {
748 bp->line_speed = SPEED_100;
749 bp->duplex = DUPLEX_FULL;
750 }
751 else if (common & ADVERTISE_100HALF) {
752 bp->line_speed = SPEED_100;
753 bp->duplex = DUPLEX_HALF;
754 }
755 else if (common & ADVERTISE_10FULL) {
756 bp->line_speed = SPEED_10;
757 bp->duplex = DUPLEX_FULL;
758 }
759 else if (common & ADVERTISE_10HALF) {
760 bp->line_speed = SPEED_10;
761 bp->duplex = DUPLEX_HALF;
762 }
763 else {
764 bp->line_speed = 0;
765 bp->link_up = 0;
766 }
767 }
768 }
769 else {
770 if (bmcr & BMCR_SPEED100) {
771 bp->line_speed = SPEED_100;
772 }
773 else {
774 bp->line_speed = SPEED_10;
775 }
776 if (bmcr & BMCR_FULLDPLX) {
777 bp->duplex = DUPLEX_FULL;
778 }
779 else {
780 bp->duplex = DUPLEX_HALF;
781 }
782 }
783
784 return 0;
785 }
786
787 static int
788 bnx2_set_mac_link(struct bnx2 *bp)
789 {
790 u32 val;
791
792 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
793 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
794 (bp->duplex == DUPLEX_HALF)) {
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
796 }
797
798 /* Configure the EMAC mode register. */
799 val = REG_RD(bp, BNX2_EMAC_MODE);
800
801 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
802 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
803 BNX2_EMAC_MODE_25G);
804
805 if (bp->link_up) {
806 switch (bp->line_speed) {
807 case SPEED_10:
808 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
809 val |= BNX2_EMAC_MODE_PORT_MII_10;
810 break;
811 }
812 /* fall through */
813 case SPEED_100:
814 val |= BNX2_EMAC_MODE_PORT_MII;
815 break;
816 case SPEED_2500:
817 val |= BNX2_EMAC_MODE_25G;
818 /* fall through */
819 case SPEED_1000:
820 val |= BNX2_EMAC_MODE_PORT_GMII;
821 break;
822 }
823 }
824 else {
825 val |= BNX2_EMAC_MODE_PORT_GMII;
826 }
827
828 /* Set the MAC to operate in the appropriate duplex mode. */
829 if (bp->duplex == DUPLEX_HALF)
830 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
831 REG_WR(bp, BNX2_EMAC_MODE, val);
832
833 /* Enable/disable rx PAUSE. */
834 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
835
836 if (bp->flow_ctrl & FLOW_CTRL_RX)
837 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
838 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
839
840 /* Enable/disable tx PAUSE. */
841 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
842 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
843
844 if (bp->flow_ctrl & FLOW_CTRL_TX)
845 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
846 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
847
848 /* Acknowledge the interrupt. */
849 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
850
851 return 0;
852 }
853
854 static int
855 bnx2_set_link(struct bnx2 *bp)
856 {
857 u32 bmsr;
858 u8 link_up;
859
860 if (bp->loopback == MAC_LOOPBACK) {
861 bp->link_up = 1;
862 return 0;
863 }
864
865 link_up = bp->link_up;
866
867 bnx2_read_phy(bp, MII_BMSR, &bmsr);
868 bnx2_read_phy(bp, MII_BMSR, &bmsr);
869
870 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
871 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
872 u32 val;
873
874 val = REG_RD(bp, BNX2_EMAC_STATUS);
875 if (val & BNX2_EMAC_STATUS_LINK)
876 bmsr |= BMSR_LSTATUS;
877 else
878 bmsr &= ~BMSR_LSTATUS;
879 }
880
881 if (bmsr & BMSR_LSTATUS) {
882 bp->link_up = 1;
883
884 if (bp->phy_flags & PHY_SERDES_FLAG) {
885 if (CHIP_NUM(bp) == CHIP_NUM_5706)
886 bnx2_5706s_linkup(bp);
887 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
888 bnx2_5708s_linkup(bp);
889 }
890 else {
891 bnx2_copper_linkup(bp);
892 }
893 bnx2_resolve_flow_ctrl(bp);
894 }
895 else {
896 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
897 (bp->autoneg & AUTONEG_SPEED)) {
898
899 u32 bmcr;
900
901 bnx2_read_phy(bp, MII_BMCR, &bmcr);
902 if (!(bmcr & BMCR_ANENABLE)) {
903 bnx2_write_phy(bp, MII_BMCR, bmcr |
904 BMCR_ANENABLE);
905 }
906 }
907 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
908 bp->link_up = 0;
909 }
910
911 if (bp->link_up != link_up) {
912 bnx2_report_link(bp);
913 }
914
915 bnx2_set_mac_link(bp);
916
917 return 0;
918 }
919
920 static int
921 bnx2_reset_phy(struct bnx2 *bp)
922 {
923 int i;
924 u32 reg;
925
926 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
927
928 #define PHY_RESET_MAX_WAIT 100
929 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
930 udelay(10);
931
932 bnx2_read_phy(bp, MII_BMCR, &reg);
933 if (!(reg & BMCR_RESET)) {
934 udelay(20);
935 break;
936 }
937 }
938 if (i == PHY_RESET_MAX_WAIT) {
939 return -EBUSY;
940 }
941 return 0;
942 }
943
944 static u32
945 bnx2_phy_get_pause_adv(struct bnx2 *bp)
946 {
947 u32 adv = 0;
948
949 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
950 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
951
952 if (bp->phy_flags & PHY_SERDES_FLAG) {
953 adv = ADVERTISE_1000XPAUSE;
954 }
955 else {
956 adv = ADVERTISE_PAUSE_CAP;
957 }
958 }
959 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
960 if (bp->phy_flags & PHY_SERDES_FLAG) {
961 adv = ADVERTISE_1000XPSE_ASYM;
962 }
963 else {
964 adv = ADVERTISE_PAUSE_ASYM;
965 }
966 }
967 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
968 if (bp->phy_flags & PHY_SERDES_FLAG) {
969 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
970 }
971 else {
972 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
973 }
974 }
975 return adv;
976 }
977
978 static int
979 bnx2_setup_serdes_phy(struct bnx2 *bp)
980 {
981 u32 adv, bmcr, up1;
982 u32 new_adv = 0;
983
984 if (!(bp->autoneg & AUTONEG_SPEED)) {
985 u32 new_bmcr;
986 int force_link_down = 0;
987
988 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
989 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
990 if (up1 & BCM5708S_UP1_2G5) {
991 up1 &= ~BCM5708S_UP1_2G5;
992 bnx2_write_phy(bp, BCM5708S_UP1, up1);
993 force_link_down = 1;
994 }
995 }
996
997 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
998 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
999
1000 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1001 new_bmcr = bmcr & ~BMCR_ANENABLE;
1002 new_bmcr |= BMCR_SPEED1000;
1003 if (bp->req_duplex == DUPLEX_FULL) {
1004 adv |= ADVERTISE_1000XFULL;
1005 new_bmcr |= BMCR_FULLDPLX;
1006 }
1007 else {
1008 adv |= ADVERTISE_1000XHALF;
1009 new_bmcr &= ~BMCR_FULLDPLX;
1010 }
1011 if ((new_bmcr != bmcr) || (force_link_down)) {
1012 /* Force a link down visible on the other side */
1013 if (bp->link_up) {
1014 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1015 ~(ADVERTISE_1000XFULL |
1016 ADVERTISE_1000XHALF));
1017 bnx2_write_phy(bp, MII_BMCR, bmcr |
1018 BMCR_ANRESTART | BMCR_ANENABLE);
1019
1020 bp->link_up = 0;
1021 netif_carrier_off(bp->dev);
1022 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1023 }
1024 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1025 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1026 }
1027 return 0;
1028 }
1029
1030 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1031 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1032 up1 |= BCM5708S_UP1_2G5;
1033 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1034 }
1035
1036 if (bp->advertising & ADVERTISED_1000baseT_Full)
1037 new_adv |= ADVERTISE_1000XFULL;
1038
1039 new_adv |= bnx2_phy_get_pause_adv(bp);
1040
1041 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1042 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1043
1044 bp->serdes_an_pending = 0;
1045 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1046 /* Force a link down visible on the other side */
1047 if (bp->link_up) {
1048 int i;
1049
1050 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1051 for (i = 0; i < 110; i++) {
1052 udelay(100);
1053 }
1054 }
1055
1056 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1057 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1058 BMCR_ANENABLE);
1059 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1060 /* Speed up link-up time when the link partner
1061 * does not autonegotiate which is very common
1062 * in blade servers. Some blade servers use
1063 * IPMI for kerboard input and it's important
1064 * to minimize link disruptions. Autoneg. involves
1065 * exchanging base pages plus 3 next pages and
1066 * normally completes in about 120 msec.
1067 */
1068 bp->current_interval = SERDES_AN_TIMEOUT;
1069 bp->serdes_an_pending = 1;
1070 mod_timer(&bp->timer, jiffies + bp->current_interval);
1071 }
1072 }
1073
1074 return 0;
1075 }
1076
1077 #define ETHTOOL_ALL_FIBRE_SPEED \
1078 (ADVERTISED_1000baseT_Full)
1079
1080 #define ETHTOOL_ALL_COPPER_SPEED \
1081 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1082 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1083 ADVERTISED_1000baseT_Full)
1084
1085 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1086 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1087
1088 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1089
1090 static int
1091 bnx2_setup_copper_phy(struct bnx2 *bp)
1092 {
1093 u32 bmcr;
1094 u32 new_bmcr;
1095
1096 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1097
1098 if (bp->autoneg & AUTONEG_SPEED) {
1099 u32 adv_reg, adv1000_reg;
1100 u32 new_adv_reg = 0;
1101 u32 new_adv1000_reg = 0;
1102
1103 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1104 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1105 ADVERTISE_PAUSE_ASYM);
1106
1107 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1108 adv1000_reg &= PHY_ALL_1000_SPEED;
1109
1110 if (bp->advertising & ADVERTISED_10baseT_Half)
1111 new_adv_reg |= ADVERTISE_10HALF;
1112 if (bp->advertising & ADVERTISED_10baseT_Full)
1113 new_adv_reg |= ADVERTISE_10FULL;
1114 if (bp->advertising & ADVERTISED_100baseT_Half)
1115 new_adv_reg |= ADVERTISE_100HALF;
1116 if (bp->advertising & ADVERTISED_100baseT_Full)
1117 new_adv_reg |= ADVERTISE_100FULL;
1118 if (bp->advertising & ADVERTISED_1000baseT_Full)
1119 new_adv1000_reg |= ADVERTISE_1000FULL;
1120
1121 new_adv_reg |= ADVERTISE_CSMA;
1122
1123 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1124
1125 if ((adv1000_reg != new_adv1000_reg) ||
1126 (adv_reg != new_adv_reg) ||
1127 ((bmcr & BMCR_ANENABLE) == 0)) {
1128
1129 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1130 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1131 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1132 BMCR_ANENABLE);
1133 }
1134 else if (bp->link_up) {
1135 /* Flow ctrl may have changed from auto to forced */
1136 /* or vice-versa. */
1137
1138 bnx2_resolve_flow_ctrl(bp);
1139 bnx2_set_mac_link(bp);
1140 }
1141 return 0;
1142 }
1143
1144 new_bmcr = 0;
1145 if (bp->req_line_speed == SPEED_100) {
1146 new_bmcr |= BMCR_SPEED100;
1147 }
1148 if (bp->req_duplex == DUPLEX_FULL) {
1149 new_bmcr |= BMCR_FULLDPLX;
1150 }
1151 if (new_bmcr != bmcr) {
1152 u32 bmsr;
1153 int i = 0;
1154
1155 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1156 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1157
1158 if (bmsr & BMSR_LSTATUS) {
1159 /* Force link down */
1160 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1161 do {
1162 udelay(100);
1163 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1164 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1165 i++;
1166 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1167 }
1168
1169 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1170
1171 /* Normally, the new speed is setup after the link has
1172 * gone down and up again. In some cases, link will not go
1173 * down so we need to set up the new speed here.
1174 */
1175 if (bmsr & BMSR_LSTATUS) {
1176 bp->line_speed = bp->req_line_speed;
1177 bp->duplex = bp->req_duplex;
1178 bnx2_resolve_flow_ctrl(bp);
1179 bnx2_set_mac_link(bp);
1180 }
1181 }
1182 return 0;
1183 }
1184
1185 static int
1186 bnx2_setup_phy(struct bnx2 *bp)
1187 {
1188 if (bp->loopback == MAC_LOOPBACK)
1189 return 0;
1190
1191 if (bp->phy_flags & PHY_SERDES_FLAG) {
1192 return (bnx2_setup_serdes_phy(bp));
1193 }
1194 else {
1195 return (bnx2_setup_copper_phy(bp));
1196 }
1197 }
1198
1199 static int
1200 bnx2_init_5708s_phy(struct bnx2 *bp)
1201 {
1202 u32 val;
1203
1204 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1205 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1206 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1207
1208 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1209 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1210 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1211
1212 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1213 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1214 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1215
1216 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1217 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1218 val |= BCM5708S_UP1_2G5;
1219 bnx2_write_phy(bp, BCM5708S_UP1, val);
1220 }
1221
1222 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1223 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1224 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1225 /* increase tx signal amplitude */
1226 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1227 BCM5708S_BLK_ADDR_TX_MISC);
1228 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1229 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1230 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1231 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1232 }
1233
1234 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1235 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1236
1237 if (val) {
1238 u32 is_backplane;
1239
1240 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1241 BNX2_SHARED_HW_CFG_CONFIG);
1242 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1243 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1244 BCM5708S_BLK_ADDR_TX_MISC);
1245 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1247 BCM5708S_BLK_ADDR_DIG);
1248 }
1249 }
1250 return 0;
1251 }
1252
1253 static int
1254 bnx2_init_5706s_phy(struct bnx2 *bp)
1255 {
1256 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1257
1258 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1259 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1260 }
1261
1262 if (bp->dev->mtu > 1500) {
1263 u32 val;
1264
1265 /* Set extended packet length bit */
1266 bnx2_write_phy(bp, 0x18, 0x7);
1267 bnx2_read_phy(bp, 0x18, &val);
1268 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1269
1270 bnx2_write_phy(bp, 0x1c, 0x6c00);
1271 bnx2_read_phy(bp, 0x1c, &val);
1272 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1273 }
1274 else {
1275 u32 val;
1276
1277 bnx2_write_phy(bp, 0x18, 0x7);
1278 bnx2_read_phy(bp, 0x18, &val);
1279 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1280
1281 bnx2_write_phy(bp, 0x1c, 0x6c00);
1282 bnx2_read_phy(bp, 0x1c, &val);
1283 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1284 }
1285
1286 return 0;
1287 }
1288
1289 static int
1290 bnx2_init_copper_phy(struct bnx2 *bp)
1291 {
1292 u32 val;
1293
1294 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1295
1296 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1297 bnx2_write_phy(bp, 0x18, 0x0c00);
1298 bnx2_write_phy(bp, 0x17, 0x000a);
1299 bnx2_write_phy(bp, 0x15, 0x310b);
1300 bnx2_write_phy(bp, 0x17, 0x201f);
1301 bnx2_write_phy(bp, 0x15, 0x9506);
1302 bnx2_write_phy(bp, 0x17, 0x401f);
1303 bnx2_write_phy(bp, 0x15, 0x14e2);
1304 bnx2_write_phy(bp, 0x18, 0x0400);
1305 }
1306
1307 if (bp->dev->mtu > 1500) {
1308 /* Set extended packet length bit */
1309 bnx2_write_phy(bp, 0x18, 0x7);
1310 bnx2_read_phy(bp, 0x18, &val);
1311 bnx2_write_phy(bp, 0x18, val | 0x4000);
1312
1313 bnx2_read_phy(bp, 0x10, &val);
1314 bnx2_write_phy(bp, 0x10, val | 0x1);
1315 }
1316 else {
1317 bnx2_write_phy(bp, 0x18, 0x7);
1318 bnx2_read_phy(bp, 0x18, &val);
1319 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1320
1321 bnx2_read_phy(bp, 0x10, &val);
1322 bnx2_write_phy(bp, 0x10, val & ~0x1);
1323 }
1324
1325 /* ethernet@wirespeed */
1326 bnx2_write_phy(bp, 0x18, 0x7007);
1327 bnx2_read_phy(bp, 0x18, &val);
1328 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1329 return 0;
1330 }
1331
1332
1333 static int
1334 bnx2_init_phy(struct bnx2 *bp)
1335 {
1336 u32 val;
1337 int rc = 0;
1338
1339 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1340 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1341
1342 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1343
1344 bnx2_reset_phy(bp);
1345
1346 bnx2_read_phy(bp, MII_PHYSID1, &val);
1347 bp->phy_id = val << 16;
1348 bnx2_read_phy(bp, MII_PHYSID2, &val);
1349 bp->phy_id |= val & 0xffff;
1350
1351 if (bp->phy_flags & PHY_SERDES_FLAG) {
1352 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1353 rc = bnx2_init_5706s_phy(bp);
1354 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1355 rc = bnx2_init_5708s_phy(bp);
1356 }
1357 else {
1358 rc = bnx2_init_copper_phy(bp);
1359 }
1360
1361 bnx2_setup_phy(bp);
1362
1363 return rc;
1364 }
1365
1366 static int
1367 bnx2_set_mac_loopback(struct bnx2 *bp)
1368 {
1369 u32 mac_mode;
1370
1371 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1372 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1373 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1374 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1375 bp->link_up = 1;
1376 return 0;
1377 }
1378
1379 static int bnx2_test_link(struct bnx2 *);
1380
1381 static int
1382 bnx2_set_phy_loopback(struct bnx2 *bp)
1383 {
1384 u32 mac_mode;
1385 int rc, i;
1386
1387 spin_lock_bh(&bp->phy_lock);
1388 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1389 BMCR_SPEED1000);
1390 spin_unlock_bh(&bp->phy_lock);
1391 if (rc)
1392 return rc;
1393
1394 for (i = 0; i < 10; i++) {
1395 if (bnx2_test_link(bp) == 0)
1396 break;
1397 udelay(10);
1398 }
1399
1400 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1401 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1402 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1403 BNX2_EMAC_MODE_25G);
1404
1405 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1406 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1407 bp->link_up = 1;
1408 return 0;
1409 }
1410
1411 static int
1412 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1413 {
1414 int i;
1415 u32 val;
1416
1417 bp->fw_wr_seq++;
1418 msg_data |= bp->fw_wr_seq;
1419
1420 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1421
1422 /* wait for an acknowledgement. */
1423 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1424 msleep(10);
1425
1426 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1427
1428 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1429 break;
1430 }
1431 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1432 return 0;
1433
1434 /* If we timed out, inform the firmware that this is the case. */
1435 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1436 if (!silent)
1437 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1438 "%x\n", msg_data);
1439
1440 msg_data &= ~BNX2_DRV_MSG_CODE;
1441 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1442
1443 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1444
1445 return -EBUSY;
1446 }
1447
1448 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1449 return -EIO;
1450
1451 return 0;
1452 }
1453
1454 static void
1455 bnx2_init_context(struct bnx2 *bp)
1456 {
1457 u32 vcid;
1458
1459 vcid = 96;
1460 while (vcid) {
1461 u32 vcid_addr, pcid_addr, offset;
1462
1463 vcid--;
1464
1465 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1466 u32 new_vcid;
1467
1468 vcid_addr = GET_PCID_ADDR(vcid);
1469 if (vcid & 0x8) {
1470 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1471 }
1472 else {
1473 new_vcid = vcid;
1474 }
1475 pcid_addr = GET_PCID_ADDR(new_vcid);
1476 }
1477 else {
1478 vcid_addr = GET_CID_ADDR(vcid);
1479 pcid_addr = vcid_addr;
1480 }
1481
1482 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1483 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1484
1485 /* Zero out the context. */
1486 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1487 CTX_WR(bp, 0x00, offset, 0);
1488 }
1489
1490 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1491 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1492 }
1493 }
1494
1495 static int
1496 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1497 {
1498 u16 *good_mbuf;
1499 u32 good_mbuf_cnt;
1500 u32 val;
1501
1502 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1503 if (good_mbuf == NULL) {
1504 printk(KERN_ERR PFX "Failed to allocate memory in "
1505 "bnx2_alloc_bad_rbuf\n");
1506 return -ENOMEM;
1507 }
1508
1509 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1510 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1511
1512 good_mbuf_cnt = 0;
1513
1514 /* Allocate a bunch of mbufs and save the good ones in an array. */
1515 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1516 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1517 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1518
1519 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1520
1521 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1522
1523 /* The addresses with Bit 9 set are bad memory blocks. */
1524 if (!(val & (1 << 9))) {
1525 good_mbuf[good_mbuf_cnt] = (u16) val;
1526 good_mbuf_cnt++;
1527 }
1528
1529 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1530 }
1531
1532 /* Free the good ones back to the mbuf pool thus discarding
1533 * all the bad ones. */
1534 while (good_mbuf_cnt) {
1535 good_mbuf_cnt--;
1536
1537 val = good_mbuf[good_mbuf_cnt];
1538 val = (val << 9) | val | 1;
1539
1540 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1541 }
1542 kfree(good_mbuf);
1543 return 0;
1544 }
1545
1546 static void
1547 bnx2_set_mac_addr(struct bnx2 *bp)
1548 {
1549 u32 val;
1550 u8 *mac_addr = bp->dev->dev_addr;
1551
1552 val = (mac_addr[0] << 8) | mac_addr[1];
1553
1554 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1555
1556 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1557 (mac_addr[4] << 8) | mac_addr[5];
1558
1559 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1560 }
1561
1562 static inline int
1563 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1564 {
1565 struct sk_buff *skb;
1566 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1567 dma_addr_t mapping;
1568 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1569 unsigned long align;
1570
1571 skb = dev_alloc_skb(bp->rx_buf_size);
1572 if (skb == NULL) {
1573 return -ENOMEM;
1574 }
1575
1576 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1577 skb_reserve(skb, 8 - align);
1578 }
1579
1580 skb->dev = bp->dev;
1581 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1582 PCI_DMA_FROMDEVICE);
1583
1584 rx_buf->skb = skb;
1585 pci_unmap_addr_set(rx_buf, mapping, mapping);
1586
1587 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1588 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1589
1590 bp->rx_prod_bseq += bp->rx_buf_use_size;
1591
1592 return 0;
1593 }
1594
1595 static void
1596 bnx2_phy_int(struct bnx2 *bp)
1597 {
1598 u32 new_link_state, old_link_state;
1599
1600 new_link_state = bp->status_blk->status_attn_bits &
1601 STATUS_ATTN_BITS_LINK_STATE;
1602 old_link_state = bp->status_blk->status_attn_bits_ack &
1603 STATUS_ATTN_BITS_LINK_STATE;
1604 if (new_link_state != old_link_state) {
1605 if (new_link_state) {
1606 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1607 STATUS_ATTN_BITS_LINK_STATE);
1608 }
1609 else {
1610 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1611 STATUS_ATTN_BITS_LINK_STATE);
1612 }
1613 bnx2_set_link(bp);
1614 }
1615 }
1616
1617 static void
1618 bnx2_tx_int(struct bnx2 *bp)
1619 {
1620 struct status_block *sblk = bp->status_blk;
1621 u16 hw_cons, sw_cons, sw_ring_cons;
1622 int tx_free_bd = 0;
1623
1624 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1625 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1626 hw_cons++;
1627 }
1628 sw_cons = bp->tx_cons;
1629
1630 while (sw_cons != hw_cons) {
1631 struct sw_bd *tx_buf;
1632 struct sk_buff *skb;
1633 int i, last;
1634
1635 sw_ring_cons = TX_RING_IDX(sw_cons);
1636
1637 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1638 skb = tx_buf->skb;
1639 #ifdef BCM_TSO
1640 /* partial BD completions possible with TSO packets */
1641 if (skb_shinfo(skb)->tso_size) {
1642 u16 last_idx, last_ring_idx;
1643
1644 last_idx = sw_cons +
1645 skb_shinfo(skb)->nr_frags + 1;
1646 last_ring_idx = sw_ring_cons +
1647 skb_shinfo(skb)->nr_frags + 1;
1648 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1649 last_idx++;
1650 }
1651 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1652 break;
1653 }
1654 }
1655 #endif
1656 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1657 skb_headlen(skb), PCI_DMA_TODEVICE);
1658
1659 tx_buf->skb = NULL;
1660 last = skb_shinfo(skb)->nr_frags;
1661
1662 for (i = 0; i < last; i++) {
1663 sw_cons = NEXT_TX_BD(sw_cons);
1664
1665 pci_unmap_page(bp->pdev,
1666 pci_unmap_addr(
1667 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1668 mapping),
1669 skb_shinfo(skb)->frags[i].size,
1670 PCI_DMA_TODEVICE);
1671 }
1672
1673 sw_cons = NEXT_TX_BD(sw_cons);
1674
1675 tx_free_bd += last + 1;
1676
1677 dev_kfree_skb_irq(skb);
1678
1679 hw_cons = bp->hw_tx_cons =
1680 sblk->status_tx_quick_consumer_index0;
1681
1682 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1683 hw_cons++;
1684 }
1685 }
1686
1687 bp->tx_cons = sw_cons;
1688
1689 if (unlikely(netif_queue_stopped(bp->dev))) {
1690 spin_lock(&bp->tx_lock);
1691 if ((netif_queue_stopped(bp->dev)) &&
1692 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1693
1694 netif_wake_queue(bp->dev);
1695 }
1696 spin_unlock(&bp->tx_lock);
1697 }
1698 }
1699
1700 static inline void
1701 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1702 u16 cons, u16 prod)
1703 {
1704 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1705 struct rx_bd *cons_bd, *prod_bd;
1706
1707 cons_rx_buf = &bp->rx_buf_ring[cons];
1708 prod_rx_buf = &bp->rx_buf_ring[prod];
1709
1710 pci_dma_sync_single_for_device(bp->pdev,
1711 pci_unmap_addr(cons_rx_buf, mapping),
1712 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1713
1714 bp->rx_prod_bseq += bp->rx_buf_use_size;
1715
1716 prod_rx_buf->skb = skb;
1717
1718 if (cons == prod)
1719 return;
1720
1721 pci_unmap_addr_set(prod_rx_buf, mapping,
1722 pci_unmap_addr(cons_rx_buf, mapping));
1723
1724 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1725 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1726 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1727 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1728 }
1729
1730 static int
1731 bnx2_rx_int(struct bnx2 *bp, int budget)
1732 {
1733 struct status_block *sblk = bp->status_blk;
1734 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1735 struct l2_fhdr *rx_hdr;
1736 int rx_pkt = 0;
1737
1738 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1739 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1740 hw_cons++;
1741 }
1742 sw_cons = bp->rx_cons;
1743 sw_prod = bp->rx_prod;
1744
1745 /* Memory barrier necessary as speculative reads of the rx
1746 * buffer can be ahead of the index in the status block
1747 */
1748 rmb();
1749 while (sw_cons != hw_cons) {
1750 unsigned int len;
1751 u32 status;
1752 struct sw_bd *rx_buf;
1753 struct sk_buff *skb;
1754 dma_addr_t dma_addr;
1755
1756 sw_ring_cons = RX_RING_IDX(sw_cons);
1757 sw_ring_prod = RX_RING_IDX(sw_prod);
1758
1759 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1760 skb = rx_buf->skb;
1761
1762 rx_buf->skb = NULL;
1763
1764 dma_addr = pci_unmap_addr(rx_buf, mapping);
1765
1766 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1767 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1768
1769 rx_hdr = (struct l2_fhdr *) skb->data;
1770 len = rx_hdr->l2_fhdr_pkt_len - 4;
1771
1772 if ((status = rx_hdr->l2_fhdr_status) &
1773 (L2_FHDR_ERRORS_BAD_CRC |
1774 L2_FHDR_ERRORS_PHY_DECODE |
1775 L2_FHDR_ERRORS_ALIGNMENT |
1776 L2_FHDR_ERRORS_TOO_SHORT |
1777 L2_FHDR_ERRORS_GIANT_FRAME)) {
1778
1779 goto reuse_rx;
1780 }
1781
1782 /* Since we don't have a jumbo ring, copy small packets
1783 * if mtu > 1500
1784 */
1785 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1786 struct sk_buff *new_skb;
1787
1788 new_skb = dev_alloc_skb(len + 2);
1789 if (new_skb == NULL)
1790 goto reuse_rx;
1791
1792 /* aligned copy */
1793 memcpy(new_skb->data,
1794 skb->data + bp->rx_offset - 2,
1795 len + 2);
1796
1797 skb_reserve(new_skb, 2);
1798 skb_put(new_skb, len);
1799 new_skb->dev = bp->dev;
1800
1801 bnx2_reuse_rx_skb(bp, skb,
1802 sw_ring_cons, sw_ring_prod);
1803
1804 skb = new_skb;
1805 }
1806 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1807 pci_unmap_single(bp->pdev, dma_addr,
1808 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1809
1810 skb_reserve(skb, bp->rx_offset);
1811 skb_put(skb, len);
1812 }
1813 else {
1814 reuse_rx:
1815 bnx2_reuse_rx_skb(bp, skb,
1816 sw_ring_cons, sw_ring_prod);
1817 goto next_rx;
1818 }
1819
1820 skb->protocol = eth_type_trans(skb, bp->dev);
1821
1822 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1823 (htons(skb->protocol) != 0x8100)) {
1824
1825 dev_kfree_skb_irq(skb);
1826 goto next_rx;
1827
1828 }
1829
1830 skb->ip_summed = CHECKSUM_NONE;
1831 if (bp->rx_csum &&
1832 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1833 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1834
1835 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1836 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1837 skb->ip_summed = CHECKSUM_UNNECESSARY;
1838 }
1839
1840 #ifdef BCM_VLAN
1841 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1842 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1843 rx_hdr->l2_fhdr_vlan_tag);
1844 }
1845 else
1846 #endif
1847 netif_receive_skb(skb);
1848
1849 bp->dev->last_rx = jiffies;
1850 rx_pkt++;
1851
1852 next_rx:
1853 sw_cons = NEXT_RX_BD(sw_cons);
1854 sw_prod = NEXT_RX_BD(sw_prod);
1855
1856 if ((rx_pkt == budget))
1857 break;
1858
1859 /* Refresh hw_cons to see if there is new work */
1860 if (sw_cons == hw_cons) {
1861 hw_cons = bp->hw_rx_cons =
1862 sblk->status_rx_quick_consumer_index0;
1863 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1864 hw_cons++;
1865 rmb();
1866 }
1867 }
1868 bp->rx_cons = sw_cons;
1869 bp->rx_prod = sw_prod;
1870
1871 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1872
1873 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1874
1875 mmiowb();
1876
1877 return rx_pkt;
1878
1879 }
1880
1881 /* MSI ISR - The only difference between this and the INTx ISR
1882 * is that the MSI interrupt is always serviced.
1883 */
1884 static irqreturn_t
1885 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1886 {
1887 struct net_device *dev = dev_instance;
1888 struct bnx2 *bp = netdev_priv(dev);
1889
1890 prefetch(bp->status_blk);
1891 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1892 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1893 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1894
1895 /* Return here if interrupt is disabled. */
1896 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1897 return IRQ_HANDLED;
1898
1899 netif_rx_schedule(dev);
1900
1901 return IRQ_HANDLED;
1902 }
1903
1904 static irqreturn_t
1905 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1906 {
1907 struct net_device *dev = dev_instance;
1908 struct bnx2 *bp = netdev_priv(dev);
1909
1910 /* When using INTx, it is possible for the interrupt to arrive
1911 * at the CPU before the status block posted prior to the
1912 * interrupt. Reading a register will flush the status block.
1913 * When using MSI, the MSI message will always complete after
1914 * the status block write.
1915 */
1916 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1917 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1918 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1919 return IRQ_NONE;
1920
1921 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1922 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1923 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1924
1925 /* Return here if interrupt is shared and is disabled. */
1926 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1927 return IRQ_HANDLED;
1928
1929 netif_rx_schedule(dev);
1930
1931 return IRQ_HANDLED;
1932 }
1933
1934 static inline int
1935 bnx2_has_work(struct bnx2 *bp)
1936 {
1937 struct status_block *sblk = bp->status_blk;
1938
1939 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1940 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1941 return 1;
1942
1943 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1944 bp->link_up)
1945 return 1;
1946
1947 return 0;
1948 }
1949
1950 static int
1951 bnx2_poll(struct net_device *dev, int *budget)
1952 {
1953 struct bnx2 *bp = netdev_priv(dev);
1954
1955 if ((bp->status_blk->status_attn_bits &
1956 STATUS_ATTN_BITS_LINK_STATE) !=
1957 (bp->status_blk->status_attn_bits_ack &
1958 STATUS_ATTN_BITS_LINK_STATE)) {
1959
1960 spin_lock(&bp->phy_lock);
1961 bnx2_phy_int(bp);
1962 spin_unlock(&bp->phy_lock);
1963
1964 /* This is needed to take care of transient status
1965 * during link changes.
1966 */
1967 REG_WR(bp, BNX2_HC_COMMAND,
1968 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1969 REG_RD(bp, BNX2_HC_COMMAND);
1970 }
1971
1972 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1973 bnx2_tx_int(bp);
1974
1975 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1976 int orig_budget = *budget;
1977 int work_done;
1978
1979 if (orig_budget > dev->quota)
1980 orig_budget = dev->quota;
1981
1982 work_done = bnx2_rx_int(bp, orig_budget);
1983 *budget -= work_done;
1984 dev->quota -= work_done;
1985 }
1986
1987 bp->last_status_idx = bp->status_blk->status_idx;
1988 rmb();
1989
1990 if (!bnx2_has_work(bp)) {
1991 netif_rx_complete(dev);
1992 if (likely(bp->flags & USING_MSI_FLAG)) {
1993 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1994 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1995 bp->last_status_idx);
1996 return 0;
1997 }
1998 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1999 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2000 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2001 bp->last_status_idx);
2002
2003 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2004 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2005 bp->last_status_idx);
2006 return 0;
2007 }
2008
2009 return 1;
2010 }
2011
2012 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
2013 * from set_multicast.
2014 */
2015 static void
2016 bnx2_set_rx_mode(struct net_device *dev)
2017 {
2018 struct bnx2 *bp = netdev_priv(dev);
2019 u32 rx_mode, sort_mode;
2020 int i;
2021
2022 spin_lock_bh(&bp->phy_lock);
2023
2024 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2025 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2026 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2027 #ifdef BCM_VLAN
2028 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2029 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2030 #else
2031 if (!(bp->flags & ASF_ENABLE_FLAG))
2032 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2033 #endif
2034 if (dev->flags & IFF_PROMISC) {
2035 /* Promiscuous mode. */
2036 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2037 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
2038 }
2039 else if (dev->flags & IFF_ALLMULTI) {
2040 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2041 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2042 0xffffffff);
2043 }
2044 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2045 }
2046 else {
2047 /* Accept one or more multicast(s). */
2048 struct dev_mc_list *mclist;
2049 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2050 u32 regidx;
2051 u32 bit;
2052 u32 crc;
2053
2054 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2055
2056 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2057 i++, mclist = mclist->next) {
2058
2059 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2060 bit = crc & 0xff;
2061 regidx = (bit & 0xe0) >> 5;
2062 bit &= 0x1f;
2063 mc_filter[regidx] |= (1 << bit);
2064 }
2065
2066 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2067 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2068 mc_filter[i]);
2069 }
2070
2071 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2072 }
2073
2074 if (rx_mode != bp->rx_mode) {
2075 bp->rx_mode = rx_mode;
2076 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2077 }
2078
2079 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2080 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2081 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2082
2083 spin_unlock_bh(&bp->phy_lock);
2084 }
2085
2086 static void
2087 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2088 u32 rv2p_proc)
2089 {
2090 int i;
2091 u32 val;
2092
2093
2094 for (i = 0; i < rv2p_code_len; i += 8) {
2095 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2096 rv2p_code++;
2097 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2098 rv2p_code++;
2099
2100 if (rv2p_proc == RV2P_PROC1) {
2101 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2102 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2103 }
2104 else {
2105 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2106 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2107 }
2108 }
2109
2110 /* Reset the processor, un-stall is done later. */
2111 if (rv2p_proc == RV2P_PROC1) {
2112 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2113 }
2114 else {
2115 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2116 }
2117 }
2118
2119 static void
2120 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2121 {
2122 u32 offset;
2123 u32 val;
2124
2125 /* Halt the CPU. */
2126 val = REG_RD_IND(bp, cpu_reg->mode);
2127 val |= cpu_reg->mode_value_halt;
2128 REG_WR_IND(bp, cpu_reg->mode, val);
2129 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2130
2131 /* Load the Text area. */
2132 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2133 if (fw->text) {
2134 int j;
2135
2136 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2137 REG_WR_IND(bp, offset, fw->text[j]);
2138 }
2139 }
2140
2141 /* Load the Data area. */
2142 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2143 if (fw->data) {
2144 int j;
2145
2146 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2147 REG_WR_IND(bp, offset, fw->data[j]);
2148 }
2149 }
2150
2151 /* Load the SBSS area. */
2152 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2153 if (fw->sbss) {
2154 int j;
2155
2156 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2157 REG_WR_IND(bp, offset, fw->sbss[j]);
2158 }
2159 }
2160
2161 /* Load the BSS area. */
2162 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2163 if (fw->bss) {
2164 int j;
2165
2166 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2167 REG_WR_IND(bp, offset, fw->bss[j]);
2168 }
2169 }
2170
2171 /* Load the Read-Only area. */
2172 offset = cpu_reg->spad_base +
2173 (fw->rodata_addr - cpu_reg->mips_view_base);
2174 if (fw->rodata) {
2175 int j;
2176
2177 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2178 REG_WR_IND(bp, offset, fw->rodata[j]);
2179 }
2180 }
2181
2182 /* Clear the pre-fetch instruction. */
2183 REG_WR_IND(bp, cpu_reg->inst, 0);
2184 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2185
2186 /* Start the CPU. */
2187 val = REG_RD_IND(bp, cpu_reg->mode);
2188 val &= ~cpu_reg->mode_value_halt;
2189 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2190 REG_WR_IND(bp, cpu_reg->mode, val);
2191 }
2192
2193 static void
2194 bnx2_init_cpus(struct bnx2 *bp)
2195 {
2196 struct cpu_reg cpu_reg;
2197 struct fw_info fw;
2198
2199 /* Initialize the RV2P processor. */
2200 load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2201 load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2202
2203 /* Initialize the RX Processor. */
2204 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2205 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2206 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2207 cpu_reg.state = BNX2_RXP_CPU_STATE;
2208 cpu_reg.state_value_clear = 0xffffff;
2209 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2210 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2211 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2212 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2213 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2214 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2215 cpu_reg.mips_view_base = 0x8000000;
2216
2217 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2218 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2219 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2220 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2221
2222 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2223 fw.text_len = bnx2_RXP_b06FwTextLen;
2224 fw.text_index = 0;
2225 fw.text = bnx2_RXP_b06FwText;
2226
2227 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2228 fw.data_len = bnx2_RXP_b06FwDataLen;
2229 fw.data_index = 0;
2230 fw.data = bnx2_RXP_b06FwData;
2231
2232 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2233 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2234 fw.sbss_index = 0;
2235 fw.sbss = bnx2_RXP_b06FwSbss;
2236
2237 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2238 fw.bss_len = bnx2_RXP_b06FwBssLen;
2239 fw.bss_index = 0;
2240 fw.bss = bnx2_RXP_b06FwBss;
2241
2242 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2243 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2244 fw.rodata_index = 0;
2245 fw.rodata = bnx2_RXP_b06FwRodata;
2246
2247 load_cpu_fw(bp, &cpu_reg, &fw);
2248
2249 /* Initialize the TX Processor. */
2250 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2251 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2252 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2253 cpu_reg.state = BNX2_TXP_CPU_STATE;
2254 cpu_reg.state_value_clear = 0xffffff;
2255 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2256 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2257 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2258 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2259 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2260 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2261 cpu_reg.mips_view_base = 0x8000000;
2262
2263 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2264 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2265 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2266 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2267
2268 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2269 fw.text_len = bnx2_TXP_b06FwTextLen;
2270 fw.text_index = 0;
2271 fw.text = bnx2_TXP_b06FwText;
2272
2273 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2274 fw.data_len = bnx2_TXP_b06FwDataLen;
2275 fw.data_index = 0;
2276 fw.data = bnx2_TXP_b06FwData;
2277
2278 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2279 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2280 fw.sbss_index = 0;
2281 fw.sbss = bnx2_TXP_b06FwSbss;
2282
2283 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2284 fw.bss_len = bnx2_TXP_b06FwBssLen;
2285 fw.bss_index = 0;
2286 fw.bss = bnx2_TXP_b06FwBss;
2287
2288 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2289 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2290 fw.rodata_index = 0;
2291 fw.rodata = bnx2_TXP_b06FwRodata;
2292
2293 load_cpu_fw(bp, &cpu_reg, &fw);
2294
2295 /* Initialize the TX Patch-up Processor. */
2296 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2297 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2298 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2299 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2300 cpu_reg.state_value_clear = 0xffffff;
2301 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2302 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2303 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2304 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2305 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2306 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2307 cpu_reg.mips_view_base = 0x8000000;
2308
2309 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2310 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2311 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2312 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2313
2314 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2315 fw.text_len = bnx2_TPAT_b06FwTextLen;
2316 fw.text_index = 0;
2317 fw.text = bnx2_TPAT_b06FwText;
2318
2319 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2320 fw.data_len = bnx2_TPAT_b06FwDataLen;
2321 fw.data_index = 0;
2322 fw.data = bnx2_TPAT_b06FwData;
2323
2324 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2325 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2326 fw.sbss_index = 0;
2327 fw.sbss = bnx2_TPAT_b06FwSbss;
2328
2329 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2330 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2331 fw.bss_index = 0;
2332 fw.bss = bnx2_TPAT_b06FwBss;
2333
2334 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2335 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2336 fw.rodata_index = 0;
2337 fw.rodata = bnx2_TPAT_b06FwRodata;
2338
2339 load_cpu_fw(bp, &cpu_reg, &fw);
2340
2341 /* Initialize the Completion Processor. */
2342 cpu_reg.mode = BNX2_COM_CPU_MODE;
2343 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2344 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2345 cpu_reg.state = BNX2_COM_CPU_STATE;
2346 cpu_reg.state_value_clear = 0xffffff;
2347 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2348 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2349 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2350 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2351 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2352 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2353 cpu_reg.mips_view_base = 0x8000000;
2354
2355 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2356 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2357 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2358 fw.start_addr = bnx2_COM_b06FwStartAddr;
2359
2360 fw.text_addr = bnx2_COM_b06FwTextAddr;
2361 fw.text_len = bnx2_COM_b06FwTextLen;
2362 fw.text_index = 0;
2363 fw.text = bnx2_COM_b06FwText;
2364
2365 fw.data_addr = bnx2_COM_b06FwDataAddr;
2366 fw.data_len = bnx2_COM_b06FwDataLen;
2367 fw.data_index = 0;
2368 fw.data = bnx2_COM_b06FwData;
2369
2370 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2371 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2372 fw.sbss_index = 0;
2373 fw.sbss = bnx2_COM_b06FwSbss;
2374
2375 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2376 fw.bss_len = bnx2_COM_b06FwBssLen;
2377 fw.bss_index = 0;
2378 fw.bss = bnx2_COM_b06FwBss;
2379
2380 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2381 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2382 fw.rodata_index = 0;
2383 fw.rodata = bnx2_COM_b06FwRodata;
2384
2385 load_cpu_fw(bp, &cpu_reg, &fw);
2386
2387 }
2388
2389 static int
2390 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2391 {
2392 u16 pmcsr;
2393
2394 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2395
2396 switch (state) {
2397 case PCI_D0: {
2398 u32 val;
2399
2400 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2401 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2402 PCI_PM_CTRL_PME_STATUS);
2403
2404 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2405 /* delay required during transition out of D3hot */
2406 msleep(20);
2407
2408 val = REG_RD(bp, BNX2_EMAC_MODE);
2409 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2410 val &= ~BNX2_EMAC_MODE_MPKT;
2411 REG_WR(bp, BNX2_EMAC_MODE, val);
2412
2413 val = REG_RD(bp, BNX2_RPM_CONFIG);
2414 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2415 REG_WR(bp, BNX2_RPM_CONFIG, val);
2416 break;
2417 }
2418 case PCI_D3hot: {
2419 int i;
2420 u32 val, wol_msg;
2421
2422 if (bp->wol) {
2423 u32 advertising;
2424 u8 autoneg;
2425
2426 autoneg = bp->autoneg;
2427 advertising = bp->advertising;
2428
2429 bp->autoneg = AUTONEG_SPEED;
2430 bp->advertising = ADVERTISED_10baseT_Half |
2431 ADVERTISED_10baseT_Full |
2432 ADVERTISED_100baseT_Half |
2433 ADVERTISED_100baseT_Full |
2434 ADVERTISED_Autoneg;
2435
2436 bnx2_setup_copper_phy(bp);
2437
2438 bp->autoneg = autoneg;
2439 bp->advertising = advertising;
2440
2441 bnx2_set_mac_addr(bp);
2442
2443 val = REG_RD(bp, BNX2_EMAC_MODE);
2444
2445 /* Enable port mode. */
2446 val &= ~BNX2_EMAC_MODE_PORT;
2447 val |= BNX2_EMAC_MODE_PORT_MII |
2448 BNX2_EMAC_MODE_MPKT_RCVD |
2449 BNX2_EMAC_MODE_ACPI_RCVD |
2450 BNX2_EMAC_MODE_MPKT;
2451
2452 REG_WR(bp, BNX2_EMAC_MODE, val);
2453
2454 /* receive all multicast */
2455 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2456 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2457 0xffffffff);
2458 }
2459 REG_WR(bp, BNX2_EMAC_RX_MODE,
2460 BNX2_EMAC_RX_MODE_SORT_MODE);
2461
2462 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2463 BNX2_RPM_SORT_USER0_MC_EN;
2464 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2465 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2466 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2467 BNX2_RPM_SORT_USER0_ENA);
2468
2469 /* Need to enable EMAC and RPM for WOL. */
2470 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2471 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2472 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2473 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2474
2475 val = REG_RD(bp, BNX2_RPM_CONFIG);
2476 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2477 REG_WR(bp, BNX2_RPM_CONFIG, val);
2478
2479 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2480 }
2481 else {
2482 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2483 }
2484
2485 if (!(bp->flags & NO_WOL_FLAG))
2486 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2487
2488 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2489 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2490 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2491
2492 if (bp->wol)
2493 pmcsr |= 3;
2494 }
2495 else {
2496 pmcsr |= 3;
2497 }
2498 if (bp->wol) {
2499 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2500 }
2501 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2502 pmcsr);
2503
2504 /* No more memory access after this point until
2505 * device is brought back to D0.
2506 */
2507 udelay(50);
2508 break;
2509 }
2510 default:
2511 return -EINVAL;
2512 }
2513 return 0;
2514 }
2515
2516 static int
2517 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2518 {
2519 u32 val;
2520 int j;
2521
2522 /* Request access to the flash interface. */
2523 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2524 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2525 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2526 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2527 break;
2528
2529 udelay(5);
2530 }
2531
2532 if (j >= NVRAM_TIMEOUT_COUNT)
2533 return -EBUSY;
2534
2535 return 0;
2536 }
2537
2538 static int
2539 bnx2_release_nvram_lock(struct bnx2 *bp)
2540 {
2541 int j;
2542 u32 val;
2543
2544 /* Relinquish nvram interface. */
2545 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2546
2547 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2548 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2549 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2550 break;
2551
2552 udelay(5);
2553 }
2554
2555 if (j >= NVRAM_TIMEOUT_COUNT)
2556 return -EBUSY;
2557
2558 return 0;
2559 }
2560
2561
2562 static int
2563 bnx2_enable_nvram_write(struct bnx2 *bp)
2564 {
2565 u32 val;
2566
2567 val = REG_RD(bp, BNX2_MISC_CFG);
2568 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2569
2570 if (!bp->flash_info->buffered) {
2571 int j;
2572
2573 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2574 REG_WR(bp, BNX2_NVM_COMMAND,
2575 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2576
2577 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2578 udelay(5);
2579
2580 val = REG_RD(bp, BNX2_NVM_COMMAND);
2581 if (val & BNX2_NVM_COMMAND_DONE)
2582 break;
2583 }
2584
2585 if (j >= NVRAM_TIMEOUT_COUNT)
2586 return -EBUSY;
2587 }
2588 return 0;
2589 }
2590
2591 static void
2592 bnx2_disable_nvram_write(struct bnx2 *bp)
2593 {
2594 u32 val;
2595
2596 val = REG_RD(bp, BNX2_MISC_CFG);
2597 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2598 }
2599
2600
2601 static void
2602 bnx2_enable_nvram_access(struct bnx2 *bp)
2603 {
2604 u32 val;
2605
2606 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2607 /* Enable both bits, even on read. */
2608 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2609 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2610 }
2611
2612 static void
2613 bnx2_disable_nvram_access(struct bnx2 *bp)
2614 {
2615 u32 val;
2616
2617 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2618 /* Disable both bits, even after read. */
2619 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2620 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2621 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2622 }
2623
2624 static int
2625 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2626 {
2627 u32 cmd;
2628 int j;
2629
2630 if (bp->flash_info->buffered)
2631 /* Buffered flash, no erase needed */
2632 return 0;
2633
2634 /* Build an erase command */
2635 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2636 BNX2_NVM_COMMAND_DOIT;
2637
2638 /* Need to clear DONE bit separately. */
2639 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2640
2641 /* Address of the NVRAM to read from. */
2642 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2643
2644 /* Issue an erase command. */
2645 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2646
2647 /* Wait for completion. */
2648 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2649 u32 val;
2650
2651 udelay(5);
2652
2653 val = REG_RD(bp, BNX2_NVM_COMMAND);
2654 if (val & BNX2_NVM_COMMAND_DONE)
2655 break;
2656 }
2657
2658 if (j >= NVRAM_TIMEOUT_COUNT)
2659 return -EBUSY;
2660
2661 return 0;
2662 }
2663
2664 static int
2665 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2666 {
2667 u32 cmd;
2668 int j;
2669
2670 /* Build the command word. */
2671 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2672
2673 /* Calculate an offset of a buffered flash. */
2674 if (bp->flash_info->buffered) {
2675 offset = ((offset / bp->flash_info->page_size) <<
2676 bp->flash_info->page_bits) +
2677 (offset % bp->flash_info->page_size);
2678 }
2679
2680 /* Need to clear DONE bit separately. */
2681 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2682
2683 /* Address of the NVRAM to read from. */
2684 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2685
2686 /* Issue a read command. */
2687 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2688
2689 /* Wait for completion. */
2690 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2691 u32 val;
2692
2693 udelay(5);
2694
2695 val = REG_RD(bp, BNX2_NVM_COMMAND);
2696 if (val & BNX2_NVM_COMMAND_DONE) {
2697 val = REG_RD(bp, BNX2_NVM_READ);
2698
2699 val = be32_to_cpu(val);
2700 memcpy(ret_val, &val, 4);
2701 break;
2702 }
2703 }
2704 if (j >= NVRAM_TIMEOUT_COUNT)
2705 return -EBUSY;
2706
2707 return 0;
2708 }
2709
2710
2711 static int
2712 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2713 {
2714 u32 cmd, val32;
2715 int j;
2716
2717 /* Build the command word. */
2718 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2719
2720 /* Calculate an offset of a buffered flash. */
2721 if (bp->flash_info->buffered) {
2722 offset = ((offset / bp->flash_info->page_size) <<
2723 bp->flash_info->page_bits) +
2724 (offset % bp->flash_info->page_size);
2725 }
2726
2727 /* Need to clear DONE bit separately. */
2728 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2729
2730 memcpy(&val32, val, 4);
2731 val32 = cpu_to_be32(val32);
2732
2733 /* Write the data. */
2734 REG_WR(bp, BNX2_NVM_WRITE, val32);
2735
2736 /* Address of the NVRAM to write to. */
2737 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2738
2739 /* Issue the write command. */
2740 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2741
2742 /* Wait for completion. */
2743 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2744 udelay(5);
2745
2746 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2747 break;
2748 }
2749 if (j >= NVRAM_TIMEOUT_COUNT)
2750 return -EBUSY;
2751
2752 return 0;
2753 }
2754
2755 static int
2756 bnx2_init_nvram(struct bnx2 *bp)
2757 {
2758 u32 val;
2759 int j, entry_count, rc;
2760 struct flash_spec *flash;
2761
2762 /* Determine the selected interface. */
2763 val = REG_RD(bp, BNX2_NVM_CFG1);
2764
2765 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2766
2767 rc = 0;
2768 if (val & 0x40000000) {
2769
2770 /* Flash interface has been reconfigured */
2771 for (j = 0, flash = &flash_table[0]; j < entry_count;
2772 j++, flash++) {
2773 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2774 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2775 bp->flash_info = flash;
2776 break;
2777 }
2778 }
2779 }
2780 else {
2781 u32 mask;
2782 /* Not yet been reconfigured */
2783
2784 if (val & (1 << 23))
2785 mask = FLASH_BACKUP_STRAP_MASK;
2786 else
2787 mask = FLASH_STRAP_MASK;
2788
2789 for (j = 0, flash = &flash_table[0]; j < entry_count;
2790 j++, flash++) {
2791
2792 if ((val & mask) == (flash->strapping & mask)) {
2793 bp->flash_info = flash;
2794
2795 /* Request access to the flash interface. */
2796 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2797 return rc;
2798
2799 /* Enable access to flash interface */
2800 bnx2_enable_nvram_access(bp);
2801
2802 /* Reconfigure the flash interface */
2803 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2804 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2805 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2806 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2807
2808 /* Disable access to flash interface */
2809 bnx2_disable_nvram_access(bp);
2810 bnx2_release_nvram_lock(bp);
2811
2812 break;
2813 }
2814 }
2815 } /* if (val & 0x40000000) */
2816
2817 if (j == entry_count) {
2818 bp->flash_info = NULL;
2819 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2820 return -ENODEV;
2821 }
2822
2823 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2824 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2825 if (val)
2826 bp->flash_size = val;
2827 else
2828 bp->flash_size = bp->flash_info->total_size;
2829
2830 return rc;
2831 }
2832
2833 static int
2834 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2835 int buf_size)
2836 {
2837 int rc = 0;
2838 u32 cmd_flags, offset32, len32, extra;
2839
2840 if (buf_size == 0)
2841 return 0;
2842
2843 /* Request access to the flash interface. */
2844 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2845 return rc;
2846
2847 /* Enable access to flash interface */
2848 bnx2_enable_nvram_access(bp);
2849
2850 len32 = buf_size;
2851 offset32 = offset;
2852 extra = 0;
2853
2854 cmd_flags = 0;
2855
2856 if (offset32 & 3) {
2857 u8 buf[4];
2858 u32 pre_len;
2859
2860 offset32 &= ~3;
2861 pre_len = 4 - (offset & 3);
2862
2863 if (pre_len >= len32) {
2864 pre_len = len32;
2865 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2866 BNX2_NVM_COMMAND_LAST;
2867 }
2868 else {
2869 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2870 }
2871
2872 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2873
2874 if (rc)
2875 return rc;
2876
2877 memcpy(ret_buf, buf + (offset & 3), pre_len);
2878
2879 offset32 += 4;
2880 ret_buf += pre_len;
2881 len32 -= pre_len;
2882 }
2883 if (len32 & 3) {
2884 extra = 4 - (len32 & 3);
2885 len32 = (len32 + 4) & ~3;
2886 }
2887
2888 if (len32 == 4) {
2889 u8 buf[4];
2890
2891 if (cmd_flags)
2892 cmd_flags = BNX2_NVM_COMMAND_LAST;
2893 else
2894 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2895 BNX2_NVM_COMMAND_LAST;
2896
2897 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2898
2899 memcpy(ret_buf, buf, 4 - extra);
2900 }
2901 else if (len32 > 0) {
2902 u8 buf[4];
2903
2904 /* Read the first word. */
2905 if (cmd_flags)
2906 cmd_flags = 0;
2907 else
2908 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2909
2910 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2911
2912 /* Advance to the next dword. */
2913 offset32 += 4;
2914 ret_buf += 4;
2915 len32 -= 4;
2916
2917 while (len32 > 4 && rc == 0) {
2918 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2919
2920 /* Advance to the next dword. */
2921 offset32 += 4;
2922 ret_buf += 4;
2923 len32 -= 4;
2924 }
2925
2926 if (rc)
2927 return rc;
2928
2929 cmd_flags = BNX2_NVM_COMMAND_LAST;
2930 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2931
2932 memcpy(ret_buf, buf, 4 - extra);
2933 }
2934
2935 /* Disable access to flash interface */
2936 bnx2_disable_nvram_access(bp);
2937
2938 bnx2_release_nvram_lock(bp);
2939
2940 return rc;
2941 }
2942
2943 static int
2944 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2945 int buf_size)
2946 {
2947 u32 written, offset32, len32;
2948 u8 *buf, start[4], end[4];
2949 int rc = 0;
2950 int align_start, align_end;
2951
2952 buf = data_buf;
2953 offset32 = offset;
2954 len32 = buf_size;
2955 align_start = align_end = 0;
2956
2957 if ((align_start = (offset32 & 3))) {
2958 offset32 &= ~3;
2959 len32 += align_start;
2960 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2961 return rc;
2962 }
2963
2964 if (len32 & 3) {
2965 if ((len32 > 4) || !align_start) {
2966 align_end = 4 - (len32 & 3);
2967 len32 += align_end;
2968 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2969 end, 4))) {
2970 return rc;
2971 }
2972 }
2973 }
2974
2975 if (align_start || align_end) {
2976 buf = kmalloc(len32, GFP_KERNEL);
2977 if (buf == 0)
2978 return -ENOMEM;
2979 if (align_start) {
2980 memcpy(buf, start, 4);
2981 }
2982 if (align_end) {
2983 memcpy(buf + len32 - 4, end, 4);
2984 }
2985 memcpy(buf + align_start, data_buf, buf_size);
2986 }
2987
2988 written = 0;
2989 while ((written < len32) && (rc == 0)) {
2990 u32 page_start, page_end, data_start, data_end;
2991 u32 addr, cmd_flags;
2992 int i;
2993 u8 flash_buffer[264];
2994
2995 /* Find the page_start addr */
2996 page_start = offset32 + written;
2997 page_start -= (page_start % bp->flash_info->page_size);
2998 /* Find the page_end addr */
2999 page_end = page_start + bp->flash_info->page_size;
3000 /* Find the data_start addr */
3001 data_start = (written == 0) ? offset32 : page_start;
3002 /* Find the data_end addr */
3003 data_end = (page_end > offset32 + len32) ?
3004 (offset32 + len32) : page_end;
3005
3006 /* Request access to the flash interface. */
3007 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3008 goto nvram_write_end;
3009
3010 /* Enable access to flash interface */
3011 bnx2_enable_nvram_access(bp);
3012
3013 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3014 if (bp->flash_info->buffered == 0) {
3015 int j;
3016
3017 /* Read the whole page into the buffer
3018 * (non-buffer flash only) */
3019 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3020 if (j == (bp->flash_info->page_size - 4)) {
3021 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3022 }
3023 rc = bnx2_nvram_read_dword(bp,
3024 page_start + j,
3025 &flash_buffer[j],
3026 cmd_flags);
3027
3028 if (rc)
3029 goto nvram_write_end;
3030
3031 cmd_flags = 0;
3032 }
3033 }
3034
3035 /* Enable writes to flash interface (unlock write-protect) */
3036 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3037 goto nvram_write_end;
3038
3039 /* Erase the page */
3040 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3041 goto nvram_write_end;
3042
3043 /* Re-enable the write again for the actual write */
3044 bnx2_enable_nvram_write(bp);
3045
3046 /* Loop to write back the buffer data from page_start to
3047 * data_start */
3048 i = 0;
3049 if (bp->flash_info->buffered == 0) {
3050 for (addr = page_start; addr < data_start;
3051 addr += 4, i += 4) {
3052
3053 rc = bnx2_nvram_write_dword(bp, addr,
3054 &flash_buffer[i], cmd_flags);
3055
3056 if (rc != 0)
3057 goto nvram_write_end;
3058
3059 cmd_flags = 0;
3060 }
3061 }
3062
3063 /* Loop to write the new data from data_start to data_end */
3064 for (addr = data_start; addr < data_end; addr += 4, i++) {
3065 if ((addr == page_end - 4) ||
3066 ((bp->flash_info->buffered) &&
3067 (addr == data_end - 4))) {
3068
3069 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3070 }
3071 rc = bnx2_nvram_write_dword(bp, addr, buf,
3072 cmd_flags);
3073
3074 if (rc != 0)
3075 goto nvram_write_end;
3076
3077 cmd_flags = 0;
3078 buf += 4;
3079 }
3080
3081 /* Loop to write back the buffer data from data_end
3082 * to page_end */
3083 if (bp->flash_info->buffered == 0) {
3084 for (addr = data_end; addr < page_end;
3085 addr += 4, i += 4) {
3086
3087 if (addr == page_end-4) {
3088 cmd_flags = BNX2_NVM_COMMAND_LAST;
3089 }
3090 rc = bnx2_nvram_write_dword(bp, addr,
3091 &flash_buffer[i], cmd_flags);
3092
3093 if (rc != 0)
3094 goto nvram_write_end;
3095
3096 cmd_flags = 0;
3097 }
3098 }
3099
3100 /* Disable writes to flash interface (lock write-protect) */
3101 bnx2_disable_nvram_write(bp);
3102
3103 /* Disable access to flash interface */
3104 bnx2_disable_nvram_access(bp);
3105 bnx2_release_nvram_lock(bp);
3106
3107 /* Increment written */
3108 written += data_end - data_start;
3109 }
3110
3111 nvram_write_end:
3112 if (align_start || align_end)
3113 kfree(buf);
3114 return rc;
3115 }
3116
3117 static int
3118 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3119 {
3120 u32 val;
3121 int i, rc = 0;
3122
3123 /* Wait for the current PCI transaction to complete before
3124 * issuing a reset. */
3125 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3126 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3127 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3128 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3129 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3130 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3131 udelay(5);
3132
3133 /* Wait for the firmware to tell us it is ok to issue a reset. */
3134 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3135
3136 /* Deposit a driver reset signature so the firmware knows that
3137 * this is a soft reset. */
3138 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3139 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3140
3141 /* Do a dummy read to force the chip to complete all current transaction
3142 * before we issue a reset. */
3143 val = REG_RD(bp, BNX2_MISC_ID);
3144
3145 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3146 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3147 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3148
3149 /* Chip reset. */
3150 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3151
3152 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3153 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3154 msleep(15);
3155
3156 /* Reset takes approximate 30 usec */
3157 for (i = 0; i < 10; i++) {
3158 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3159 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3160 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3161 break;
3162 }
3163 udelay(10);
3164 }
3165
3166 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3167 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3168 printk(KERN_ERR PFX "Chip reset did not complete\n");
3169 return -EBUSY;
3170 }
3171
3172 /* Make sure byte swapping is properly configured. */
3173 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3174 if (val != 0x01020304) {
3175 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3176 return -ENODEV;
3177 }
3178
3179 /* Wait for the firmware to finish its initialization. */
3180 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3181 if (rc)
3182 return rc;
3183
3184 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3185 /* Adjust the voltage regular to two steps lower. The default
3186 * of this register is 0x0000000e. */
3187 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3188
3189 /* Remove bad rbuf memory from the free pool. */
3190 rc = bnx2_alloc_bad_rbuf(bp);
3191 }
3192
3193 return rc;
3194 }
3195
3196 static int
3197 bnx2_init_chip(struct bnx2 *bp)
3198 {
3199 u32 val;
3200 int rc;
3201
3202 /* Make sure the interrupt is not active. */
3203 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3204
3205 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3206 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3207 #ifdef __BIG_ENDIAN
3208 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3209 #endif
3210 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3211 DMA_READ_CHANS << 12 |
3212 DMA_WRITE_CHANS << 16;
3213
3214 val |= (0x2 << 20) | (1 << 11);
3215
3216 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3217 val |= (1 << 23);
3218
3219 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3220 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3221 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3222
3223 REG_WR(bp, BNX2_DMA_CONFIG, val);
3224
3225 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3226 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3227 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3228 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3229 }
3230
3231 if (bp->flags & PCIX_FLAG) {
3232 u16 val16;
3233
3234 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3235 &val16);
3236 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3237 val16 & ~PCI_X_CMD_ERO);
3238 }
3239
3240 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3241 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3242 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3243 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3244
3245 /* Initialize context mapping and zero out the quick contexts. The
3246 * context block must have already been enabled. */
3247 bnx2_init_context(bp);
3248
3249 bnx2_init_cpus(bp);
3250 bnx2_init_nvram(bp);
3251
3252 bnx2_set_mac_addr(bp);
3253
3254 val = REG_RD(bp, BNX2_MQ_CONFIG);
3255 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3256 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3257 REG_WR(bp, BNX2_MQ_CONFIG, val);
3258
3259 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3260 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3261 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3262
3263 val = (BCM_PAGE_BITS - 8) << 24;
3264 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3265
3266 /* Configure page size. */
3267 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3268 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3269 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3270 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3271
3272 val = bp->mac_addr[0] +
3273 (bp->mac_addr[1] << 8) +
3274 (bp->mac_addr[2] << 16) +
3275 bp->mac_addr[3] +
3276 (bp->mac_addr[4] << 8) +
3277 (bp->mac_addr[5] << 16);
3278 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3279
3280 /* Program the MTU. Also include 4 bytes for CRC32. */
3281 val = bp->dev->mtu + ETH_HLEN + 4;
3282 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3283 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3284 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3285
3286 bp->last_status_idx = 0;
3287 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3288
3289 /* Set up how to generate a link change interrupt. */
3290 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3291
3292 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3293 (u64) bp->status_blk_mapping & 0xffffffff);
3294 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3295
3296 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3297 (u64) bp->stats_blk_mapping & 0xffffffff);
3298 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3299 (u64) bp->stats_blk_mapping >> 32);
3300
3301 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3302 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3303
3304 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3305 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3306
3307 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3308 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3309
3310 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3311
3312 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3313
3314 REG_WR(bp, BNX2_HC_COM_TICKS,
3315 (bp->com_ticks_int << 16) | bp->com_ticks);
3316
3317 REG_WR(bp, BNX2_HC_CMD_TICKS,
3318 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3319
3320 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3321 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3322
3323 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3324 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3325 else {
3326 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3327 BNX2_HC_CONFIG_TX_TMR_MODE |
3328 BNX2_HC_CONFIG_COLLECT_STATS);
3329 }
3330
3331 /* Clear internal stats counters. */
3332 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3333
3334 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3335
3336 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3337 BNX2_PORT_FEATURE_ASF_ENABLED)
3338 bp->flags |= ASF_ENABLE_FLAG;
3339
3340 /* Initialize the receive filter. */
3341 bnx2_set_rx_mode(bp->dev);
3342
3343 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3344 0);
3345
3346 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3347 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3348
3349 udelay(20);
3350
3351 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3352
3353 return rc;
3354 }
3355
3356
3357 static void
3358 bnx2_init_tx_ring(struct bnx2 *bp)
3359 {
3360 struct tx_bd *txbd;
3361 u32 val;
3362
3363 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3364
3365 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3366 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3367
3368 bp->tx_prod = 0;
3369 bp->tx_cons = 0;
3370 bp->hw_tx_cons = 0;
3371 bp->tx_prod_bseq = 0;
3372
3373 val = BNX2_L2CTX_TYPE_TYPE_L2;
3374 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3375 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3376
3377 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3378 val |= 8 << 16;
3379 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3380
3381 val = (u64) bp->tx_desc_mapping >> 32;
3382 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3383
3384 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3385 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3386 }
3387
3388 static void
3389 bnx2_init_rx_ring(struct bnx2 *bp)
3390 {
3391 struct rx_bd *rxbd;
3392 int i;
3393 u16 prod, ring_prod;
3394 u32 val;
3395
3396 /* 8 for CRC and VLAN */
3397 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3398 /* 8 for alignment */
3399 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3400
3401 ring_prod = prod = bp->rx_prod = 0;
3402 bp->rx_cons = 0;
3403 bp->hw_rx_cons = 0;
3404 bp->rx_prod_bseq = 0;
3405
3406 for (i = 0; i < bp->rx_max_ring; i++) {
3407 int j;
3408
3409 rxbd = &bp->rx_desc_ring[i][0];
3410 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3411 rxbd->rx_bd_len = bp->rx_buf_use_size;
3412 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3413 }
3414 if (i == (bp->rx_max_ring - 1))
3415 j = 0;
3416 else
3417 j = i + 1;
3418 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3419 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3420 0xffffffff;
3421 }
3422
3423 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3424 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3425 val |= 0x02 << 8;
3426 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3427
3428 val = (u64) bp->rx_desc_mapping[0] >> 32;
3429 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3430
3431 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3432 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3433
3434 for (i = 0; i < bp->rx_ring_size; i++) {
3435 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3436 break;
3437 }
3438 prod = NEXT_RX_BD(prod);
3439 ring_prod = RX_RING_IDX(prod);
3440 }
3441 bp->rx_prod = prod;
3442
3443 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3444
3445 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3446 }
3447
3448 static void
3449 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3450 {
3451 u32 num_rings, max;
3452
3453 bp->rx_ring_size = size;
3454 num_rings = 1;
3455 while (size > MAX_RX_DESC_CNT) {
3456 size -= MAX_RX_DESC_CNT;
3457 num_rings++;
3458 }
3459 /* round to next power of 2 */
3460 max = MAX_RX_RINGS;
3461 while ((max & num_rings) == 0)
3462 max >>= 1;
3463
3464 if (num_rings != max)
3465 max <<= 1;
3466
3467 bp->rx_max_ring = max;
3468 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3469 }
3470
3471 static void
3472 bnx2_free_tx_skbs(struct bnx2 *bp)
3473 {
3474 int i;
3475
3476 if (bp->tx_buf_ring == NULL)
3477 return;
3478
3479 for (i = 0; i < TX_DESC_CNT; ) {
3480 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3481 struct sk_buff *skb = tx_buf->skb;
3482 int j, last;
3483
3484 if (skb == NULL) {
3485 i++;
3486 continue;
3487 }
3488
3489 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3490 skb_headlen(skb), PCI_DMA_TODEVICE);
3491
3492 tx_buf->skb = NULL;
3493
3494 last = skb_shinfo(skb)->nr_frags;
3495 for (j = 0; j < last; j++) {
3496 tx_buf = &bp->tx_buf_ring[i + j + 1];
3497 pci_unmap_page(bp->pdev,
3498 pci_unmap_addr(tx_buf, mapping),
3499 skb_shinfo(skb)->frags[j].size,
3500 PCI_DMA_TODEVICE);
3501 }
3502 dev_kfree_skb_any(skb);
3503 i += j + 1;
3504 }
3505
3506 }
3507
3508 static void
3509 bnx2_free_rx_skbs(struct bnx2 *bp)
3510 {
3511 int i;
3512
3513 if (bp->rx_buf_ring == NULL)
3514 return;
3515
3516 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3517 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3518 struct sk_buff *skb = rx_buf->skb;
3519
3520 if (skb == NULL)
3521 continue;
3522
3523 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3524 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3525
3526 rx_buf->skb = NULL;
3527
3528 dev_kfree_skb_any(skb);
3529 }
3530 }
3531
3532 static void
3533 bnx2_free_skbs(struct bnx2 *bp)
3534 {
3535 bnx2_free_tx_skbs(bp);
3536 bnx2_free_rx_skbs(bp);
3537 }
3538
3539 static int
3540 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3541 {
3542 int rc;
3543
3544 rc = bnx2_reset_chip(bp, reset_code);
3545 bnx2_free_skbs(bp);
3546 if (rc)
3547 return rc;
3548
3549 bnx2_init_chip(bp);
3550 bnx2_init_tx_ring(bp);
3551 bnx2_init_rx_ring(bp);
3552 return 0;
3553 }
3554
3555 static int
3556 bnx2_init_nic(struct bnx2 *bp)
3557 {
3558 int rc;
3559
3560 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3561 return rc;
3562
3563 bnx2_init_phy(bp);
3564 bnx2_set_link(bp);
3565 return 0;
3566 }
3567
3568 static int
3569 bnx2_test_registers(struct bnx2 *bp)
3570 {
3571 int ret;
3572 int i;
3573 static const struct {
3574 u16 offset;
3575 u16 flags;
3576 u32 rw_mask;
3577 u32 ro_mask;
3578 } reg_tbl[] = {
3579 { 0x006c, 0, 0x00000000, 0x0000003f },
3580 { 0x0090, 0, 0xffffffff, 0x00000000 },
3581 { 0x0094, 0, 0x00000000, 0x00000000 },
3582
3583 { 0x0404, 0, 0x00003f00, 0x00000000 },
3584 { 0x0418, 0, 0x00000000, 0xffffffff },
3585 { 0x041c, 0, 0x00000000, 0xffffffff },
3586 { 0x0420, 0, 0x00000000, 0x80ffffff },
3587 { 0x0424, 0, 0x00000000, 0x00000000 },
3588 { 0x0428, 0, 0x00000000, 0x00000001 },
3589 { 0x0450, 0, 0x00000000, 0x0000ffff },
3590 { 0x0454, 0, 0x00000000, 0xffffffff },
3591 { 0x0458, 0, 0x00000000, 0xffffffff },
3592
3593 { 0x0808, 0, 0x00000000, 0xffffffff },
3594 { 0x0854, 0, 0x00000000, 0xffffffff },
3595 { 0x0868, 0, 0x00000000, 0x77777777 },
3596 { 0x086c, 0, 0x00000000, 0x77777777 },
3597 { 0x0870, 0, 0x00000000, 0x77777777 },
3598 { 0x0874, 0, 0x00000000, 0x77777777 },
3599
3600 { 0x0c00, 0, 0x00000000, 0x00000001 },
3601 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3602 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3603
3604 { 0x1000, 0, 0x00000000, 0x00000001 },
3605 { 0x1004, 0, 0x00000000, 0x000f0001 },
3606
3607 { 0x1408, 0, 0x01c00800, 0x00000000 },
3608 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3609 { 0x14a8, 0, 0x00000000, 0x000001ff },
3610 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3611 { 0x14b0, 0, 0x00000002, 0x00000001 },
3612 { 0x14b8, 0, 0x00000000, 0x00000000 },
3613 { 0x14c0, 0, 0x00000000, 0x00000009 },
3614 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3615 { 0x14cc, 0, 0x00000000, 0x00000001 },
3616 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3617
3618 { 0x1800, 0, 0x00000000, 0x00000001 },
3619 { 0x1804, 0, 0x00000000, 0x00000003 },
3620
3621 { 0x2800, 0, 0x00000000, 0x00000001 },
3622 { 0x2804, 0, 0x00000000, 0x00003f01 },
3623 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3624 { 0x2810, 0, 0xffff0000, 0x00000000 },
3625 { 0x2814, 0, 0xffff0000, 0x00000000 },
3626 { 0x2818, 0, 0xffff0000, 0x00000000 },
3627 { 0x281c, 0, 0xffff0000, 0x00000000 },
3628 { 0x2834, 0, 0xffffffff, 0x00000000 },
3629 { 0x2840, 0, 0x00000000, 0xffffffff },
3630 { 0x2844, 0, 0x00000000, 0xffffffff },
3631 { 0x2848, 0, 0xffffffff, 0x00000000 },
3632 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3633
3634 { 0x2c00, 0, 0x00000000, 0x00000011 },
3635 { 0x2c04, 0, 0x00000000, 0x00030007 },
3636
3637 { 0x3c00, 0, 0x00000000, 0x00000001 },
3638 { 0x3c04, 0, 0x00000000, 0x00070000 },
3639 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3640 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3641 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3642 { 0x3c14, 0, 0x00000000, 0xffffffff },
3643 { 0x3c18, 0, 0x00000000, 0xffffffff },
3644 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3645 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3646
3647 { 0x5004, 0, 0x00000000, 0x0000007f },
3648 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3649 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3650
3651 { 0x5c00, 0, 0x00000000, 0x00000001 },
3652 { 0x5c04, 0, 0x00000000, 0x0003000f },
3653 { 0x5c08, 0, 0x00000003, 0x00000000 },
3654 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3655 { 0x5c10, 0, 0x00000000, 0xffffffff },
3656 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3657 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3658 { 0x5c88, 0, 0x00000000, 0x00077373 },
3659 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3660
3661 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3662 { 0x680c, 0, 0xffffffff, 0x00000000 },
3663 { 0x6810, 0, 0xffffffff, 0x00000000 },
3664 { 0x6814, 0, 0xffffffff, 0x00000000 },
3665 { 0x6818, 0, 0xffffffff, 0x00000000 },
3666 { 0x681c, 0, 0xffffffff, 0x00000000 },
3667 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3668 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3669 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3670 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3671 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3672 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3673 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3674 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3675 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3676 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3677 { 0x684c, 0, 0xffffffff, 0x00000000 },
3678 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3679 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3680 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3681 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3682 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3683 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3684
3685 { 0xffff, 0, 0x00000000, 0x00000000 },
3686 };
3687
3688 ret = 0;
3689 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3690 u32 offset, rw_mask, ro_mask, save_val, val;
3691
3692 offset = (u32) reg_tbl[i].offset;
3693 rw_mask = reg_tbl[i].rw_mask;
3694 ro_mask = reg_tbl[i].ro_mask;
3695
3696 save_val = readl(bp->regview + offset);
3697
3698 writel(0, bp->regview + offset);
3699
3700 val = readl(bp->regview + offset);
3701 if ((val & rw_mask) != 0) {
3702 goto reg_test_err;
3703 }
3704
3705 if ((val & ro_mask) != (save_val & ro_mask)) {
3706 goto reg_test_err;
3707 }
3708
3709 writel(0xffffffff, bp->regview + offset);
3710
3711 val = readl(bp->regview + offset);
3712 if ((val & rw_mask) != rw_mask) {
3713 goto reg_test_err;
3714 }
3715
3716 if ((val & ro_mask) != (save_val & ro_mask)) {
3717 goto reg_test_err;
3718 }
3719
3720 writel(save_val, bp->regview + offset);
3721 continue;
3722
3723 reg_test_err:
3724 writel(save_val, bp->regview + offset);
3725 ret = -ENODEV;
3726 break;
3727 }
3728 return ret;
3729 }
3730
3731 static int
3732 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3733 {
3734 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3735 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3736 int i;
3737
3738 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3739 u32 offset;
3740
3741 for (offset = 0; offset < size; offset += 4) {
3742
3743 REG_WR_IND(bp, start + offset, test_pattern[i]);
3744
3745 if (REG_RD_IND(bp, start + offset) !=
3746 test_pattern[i]) {
3747 return -ENODEV;
3748 }
3749 }
3750 }
3751 return 0;
3752 }
3753
3754 static int
3755 bnx2_test_memory(struct bnx2 *bp)
3756 {
3757 int ret = 0;
3758 int i;
3759 static const struct {
3760 u32 offset;
3761 u32 len;
3762 } mem_tbl[] = {
3763 { 0x60000, 0x4000 },
3764 { 0xa0000, 0x3000 },
3765 { 0xe0000, 0x4000 },
3766 { 0x120000, 0x4000 },
3767 { 0x1a0000, 0x4000 },
3768 { 0x160000, 0x4000 },
3769 { 0xffffffff, 0 },
3770 };
3771
3772 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3773 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3774 mem_tbl[i].len)) != 0) {
3775 return ret;
3776 }
3777 }
3778
3779 return ret;
3780 }
3781
3782 #define BNX2_MAC_LOOPBACK 0
3783 #define BNX2_PHY_LOOPBACK 1
3784
3785 static int
3786 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3787 {
3788 unsigned int pkt_size, num_pkts, i;
3789 struct sk_buff *skb, *rx_skb;
3790 unsigned char *packet;
3791 u16 rx_start_idx, rx_idx;
3792 dma_addr_t map;
3793 struct tx_bd *txbd;
3794 struct sw_bd *rx_buf;
3795 struct l2_fhdr *rx_hdr;
3796 int ret = -ENODEV;
3797
3798 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3799 bp->loopback = MAC_LOOPBACK;
3800 bnx2_set_mac_loopback(bp);
3801 }
3802 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3803 bp->loopback = 0;
3804 bnx2_set_phy_loopback(bp);
3805 }
3806 else
3807 return -EINVAL;
3808
3809 pkt_size = 1514;
3810 skb = dev_alloc_skb(pkt_size);
3811 if (!skb)
3812 return -ENOMEM;
3813 packet = skb_put(skb, pkt_size);
3814 memcpy(packet, bp->mac_addr, 6);
3815 memset(packet + 6, 0x0, 8);
3816 for (i = 14; i < pkt_size; i++)
3817 packet[i] = (unsigned char) (i & 0xff);
3818
3819 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3820 PCI_DMA_TODEVICE);
3821
3822 REG_WR(bp, BNX2_HC_COMMAND,
3823 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3824
3825 REG_RD(bp, BNX2_HC_COMMAND);
3826
3827 udelay(5);
3828 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3829
3830 num_pkts = 0;
3831
3832 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3833
3834 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3835 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3836 txbd->tx_bd_mss_nbytes = pkt_size;
3837 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3838
3839 num_pkts++;
3840 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3841 bp->tx_prod_bseq += pkt_size;
3842
3843 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3844 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3845
3846 udelay(100);
3847
3848 REG_WR(bp, BNX2_HC_COMMAND,
3849 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3850
3851 REG_RD(bp, BNX2_HC_COMMAND);
3852
3853 udelay(5);
3854
3855 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3856 dev_kfree_skb_irq(skb);
3857
3858 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
3859 goto loopback_test_done;
3860 }
3861
3862 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3863 if (rx_idx != rx_start_idx + num_pkts) {
3864 goto loopback_test_done;
3865 }
3866
3867 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3868 rx_skb = rx_buf->skb;
3869
3870 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3871 skb_reserve(rx_skb, bp->rx_offset);
3872
3873 pci_dma_sync_single_for_cpu(bp->pdev,
3874 pci_unmap_addr(rx_buf, mapping),
3875 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3876
3877 if (rx_hdr->l2_fhdr_status &
3878 (L2_FHDR_ERRORS_BAD_CRC |
3879 L2_FHDR_ERRORS_PHY_DECODE |
3880 L2_FHDR_ERRORS_ALIGNMENT |
3881 L2_FHDR_ERRORS_TOO_SHORT |
3882 L2_FHDR_ERRORS_GIANT_FRAME)) {
3883
3884 goto loopback_test_done;
3885 }
3886
3887 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3888 goto loopback_test_done;
3889 }
3890
3891 for (i = 14; i < pkt_size; i++) {
3892 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3893 goto loopback_test_done;
3894 }
3895 }
3896
3897 ret = 0;
3898
3899 loopback_test_done:
3900 bp->loopback = 0;
3901 return ret;
3902 }
3903
3904 #define BNX2_MAC_LOOPBACK_FAILED 1
3905 #define BNX2_PHY_LOOPBACK_FAILED 2
3906 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3907 BNX2_PHY_LOOPBACK_FAILED)
3908
3909 static int
3910 bnx2_test_loopback(struct bnx2 *bp)
3911 {
3912 int rc = 0;
3913
3914 if (!netif_running(bp->dev))
3915 return BNX2_LOOPBACK_FAILED;
3916
3917 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3918 spin_lock_bh(&bp->phy_lock);
3919 bnx2_init_phy(bp);
3920 spin_unlock_bh(&bp->phy_lock);
3921 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3922 rc |= BNX2_MAC_LOOPBACK_FAILED;
3923 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3924 rc |= BNX2_PHY_LOOPBACK_FAILED;
3925 return rc;
3926 }
3927
3928 #define NVRAM_SIZE 0x200
3929 #define CRC32_RESIDUAL 0xdebb20e3
3930
3931 static int
3932 bnx2_test_nvram(struct bnx2 *bp)
3933 {
3934 u32 buf[NVRAM_SIZE / 4];
3935 u8 *data = (u8 *) buf;
3936 int rc = 0;
3937 u32 magic, csum;
3938
3939 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3940 goto test_nvram_done;
3941
3942 magic = be32_to_cpu(buf[0]);
3943 if (magic != 0x669955aa) {
3944 rc = -ENODEV;
3945 goto test_nvram_done;
3946 }
3947
3948 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3949 goto test_nvram_done;
3950
3951 csum = ether_crc_le(0x100, data);
3952 if (csum != CRC32_RESIDUAL) {
3953 rc = -ENODEV;
3954 goto test_nvram_done;
3955 }
3956
3957 csum = ether_crc_le(0x100, data + 0x100);
3958 if (csum != CRC32_RESIDUAL) {
3959 rc = -ENODEV;
3960 }
3961
3962 test_nvram_done:
3963 return rc;
3964 }
3965
3966 static int
3967 bnx2_test_link(struct bnx2 *bp)
3968 {
3969 u32 bmsr;
3970
3971 spin_lock_bh(&bp->phy_lock);
3972 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3973 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3974 spin_unlock_bh(&bp->phy_lock);
3975
3976 if (bmsr & BMSR_LSTATUS) {
3977 return 0;
3978 }
3979 return -ENODEV;
3980 }
3981
3982 static int
3983 bnx2_test_intr(struct bnx2 *bp)
3984 {
3985 int i;
3986 u16 status_idx;
3987
3988 if (!netif_running(bp->dev))
3989 return -ENODEV;
3990
3991 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3992
3993 /* This register is not touched during run-time. */
3994 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
3995 REG_RD(bp, BNX2_HC_COMMAND);
3996
3997 for (i = 0; i < 10; i++) {
3998 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3999 status_idx) {
4000
4001 break;
4002 }
4003
4004 msleep_interruptible(10);
4005 }
4006 if (i < 10)
4007 return 0;
4008
4009 return -ENODEV;
4010 }
4011
4012 static void
4013 bnx2_timer(unsigned long data)
4014 {
4015 struct bnx2 *bp = (struct bnx2 *) data;
4016 u32 msg;
4017
4018 if (!netif_running(bp->dev))
4019 return;
4020
4021 if (atomic_read(&bp->intr_sem) != 0)
4022 goto bnx2_restart_timer;
4023
4024 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4025 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4026
4027 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4028 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
4029
4030 spin_lock(&bp->phy_lock);
4031 if (bp->serdes_an_pending) {
4032 bp->serdes_an_pending--;
4033 }
4034 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4035 u32 bmcr;
4036
4037 bp->current_interval = bp->timer_interval;
4038
4039 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4040
4041 if (bmcr & BMCR_ANENABLE) {
4042 u32 phy1, phy2;
4043
4044 bnx2_write_phy(bp, 0x1c, 0x7c00);
4045 bnx2_read_phy(bp, 0x1c, &phy1);
4046
4047 bnx2_write_phy(bp, 0x17, 0x0f01);
4048 bnx2_read_phy(bp, 0x15, &phy2);
4049 bnx2_write_phy(bp, 0x17, 0x0f01);
4050 bnx2_read_phy(bp, 0x15, &phy2);
4051
4052 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4053 !(phy2 & 0x20)) { /* no CONFIG */
4054
4055 bmcr &= ~BMCR_ANENABLE;
4056 bmcr |= BMCR_SPEED1000 |
4057 BMCR_FULLDPLX;
4058 bnx2_write_phy(bp, MII_BMCR, bmcr);
4059 bp->phy_flags |=
4060 PHY_PARALLEL_DETECT_FLAG;
4061 }
4062 }
4063 }
4064 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4065 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4066 u32 phy2;
4067
4068 bnx2_write_phy(bp, 0x17, 0x0f01);
4069 bnx2_read_phy(bp, 0x15, &phy2);
4070 if (phy2 & 0x20) {
4071 u32 bmcr;
4072
4073 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4074 bmcr |= BMCR_ANENABLE;
4075 bnx2_write_phy(bp, MII_BMCR, bmcr);
4076
4077 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4078
4079 }
4080 }
4081 else
4082 bp->current_interval = bp->timer_interval;
4083
4084 spin_unlock(&bp->phy_lock);
4085 }
4086
4087 bnx2_restart_timer:
4088 mod_timer(&bp->timer, jiffies + bp->current_interval);
4089 }
4090
4091 /* Called with rtnl_lock */
4092 static int
4093 bnx2_open(struct net_device *dev)
4094 {
4095 struct bnx2 *bp = netdev_priv(dev);
4096 int rc;
4097
4098 bnx2_set_power_state(bp, PCI_D0);
4099 bnx2_disable_int(bp);
4100
4101 rc = bnx2_alloc_mem(bp);
4102 if (rc)
4103 return rc;
4104
4105 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4106 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4107 !disable_msi) {
4108
4109 if (pci_enable_msi(bp->pdev) == 0) {
4110 bp->flags |= USING_MSI_FLAG;
4111 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4112 dev);
4113 }
4114 else {
4115 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4116 SA_SHIRQ, dev->name, dev);
4117 }
4118 }
4119 else {
4120 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4121 dev->name, dev);
4122 }
4123 if (rc) {
4124 bnx2_free_mem(bp);
4125 return rc;
4126 }
4127
4128 rc = bnx2_init_nic(bp);
4129
4130 if (rc) {
4131 free_irq(bp->pdev->irq, dev);
4132 if (bp->flags & USING_MSI_FLAG) {
4133 pci_disable_msi(bp->pdev);
4134 bp->flags &= ~USING_MSI_FLAG;
4135 }
4136 bnx2_free_skbs(bp);
4137 bnx2_free_mem(bp);
4138 return rc;
4139 }
4140
4141 mod_timer(&bp->timer, jiffies + bp->current_interval);
4142
4143 atomic_set(&bp->intr_sem, 0);
4144
4145 bnx2_enable_int(bp);
4146
4147 if (bp->flags & USING_MSI_FLAG) {
4148 /* Test MSI to make sure it is working
4149 * If MSI test fails, go back to INTx mode
4150 */
4151 if (bnx2_test_intr(bp) != 0) {
4152 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4153 " using MSI, switching to INTx mode. Please"
4154 " report this failure to the PCI maintainer"
4155 " and include system chipset information.\n",
4156 bp->dev->name);
4157
4158 bnx2_disable_int(bp);
4159 free_irq(bp->pdev->irq, dev);
4160 pci_disable_msi(bp->pdev);
4161 bp->flags &= ~USING_MSI_FLAG;
4162
4163 rc = bnx2_init_nic(bp);
4164
4165 if (!rc) {
4166 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4167 SA_SHIRQ, dev->name, dev);
4168 }
4169 if (rc) {
4170 bnx2_free_skbs(bp);
4171 bnx2_free_mem(bp);
4172 del_timer_sync(&bp->timer);
4173 return rc;
4174 }
4175 bnx2_enable_int(bp);
4176 }
4177 }
4178 if (bp->flags & USING_MSI_FLAG) {
4179 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4180 }
4181
4182 netif_start_queue(dev);
4183
4184 return 0;
4185 }
4186
4187 static void
4188 bnx2_reset_task(void *data)
4189 {
4190 struct bnx2 *bp = data;
4191
4192 if (!netif_running(bp->dev))
4193 return;
4194
4195 bp->in_reset_task = 1;
4196 bnx2_netif_stop(bp);
4197
4198 bnx2_init_nic(bp);
4199
4200 atomic_set(&bp->intr_sem, 1);
4201 bnx2_netif_start(bp);
4202 bp->in_reset_task = 0;
4203 }
4204
4205 static void
4206 bnx2_tx_timeout(struct net_device *dev)
4207 {
4208 struct bnx2 *bp = netdev_priv(dev);
4209
4210 /* This allows the netif to be shutdown gracefully before resetting */
4211 schedule_work(&bp->reset_task);
4212 }
4213
4214 #ifdef BCM_VLAN
4215 /* Called with rtnl_lock */
4216 static void
4217 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4218 {
4219 struct bnx2 *bp = netdev_priv(dev);
4220
4221 bnx2_netif_stop(bp);
4222
4223 bp->vlgrp = vlgrp;
4224 bnx2_set_rx_mode(dev);
4225
4226 bnx2_netif_start(bp);
4227 }
4228
4229 /* Called with rtnl_lock */
4230 static void
4231 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4232 {
4233 struct bnx2 *bp = netdev_priv(dev);
4234
4235 bnx2_netif_stop(bp);
4236
4237 if (bp->vlgrp)
4238 bp->vlgrp->vlan_devices[vid] = NULL;
4239 bnx2_set_rx_mode(dev);
4240
4241 bnx2_netif_start(bp);
4242 }
4243 #endif
4244
4245 /* Called with dev->xmit_lock.
4246 * hard_start_xmit is pseudo-lockless - a lock is only required when
4247 * the tx queue is full. This way, we get the benefit of lockless
4248 * operations most of the time without the complexities to handle
4249 * netif_stop_queue/wake_queue race conditions.
4250 */
4251 static int
4252 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4253 {
4254 struct bnx2 *bp = netdev_priv(dev);
4255 dma_addr_t mapping;
4256 struct tx_bd *txbd;
4257 struct sw_bd *tx_buf;
4258 u32 len, vlan_tag_flags, last_frag, mss;
4259 u16 prod, ring_prod;
4260 int i;
4261
4262 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4263 netif_stop_queue(dev);
4264 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4265 dev->name);
4266
4267 return NETDEV_TX_BUSY;
4268 }
4269 len = skb_headlen(skb);
4270 prod = bp->tx_prod;
4271 ring_prod = TX_RING_IDX(prod);
4272
4273 vlan_tag_flags = 0;
4274 if (skb->ip_summed == CHECKSUM_HW) {
4275 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4276 }
4277
4278 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4279 vlan_tag_flags |=
4280 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4281 }
4282 #ifdef BCM_TSO
4283 if ((mss = skb_shinfo(skb)->tso_size) &&
4284 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4285 u32 tcp_opt_len, ip_tcp_len;
4286
4287 if (skb_header_cloned(skb) &&
4288 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4289 dev_kfree_skb(skb);
4290 return NETDEV_TX_OK;
4291 }
4292
4293 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4294 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4295
4296 tcp_opt_len = 0;
4297 if (skb->h.th->doff > 5) {
4298 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4299 }
4300 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4301
4302 skb->nh.iph->check = 0;
4303 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4304 skb->h.th->check =
4305 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4306 skb->nh.iph->daddr,
4307 0, IPPROTO_TCP, 0);
4308
4309 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4310 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4311 (tcp_opt_len >> 2)) << 8;
4312 }
4313 }
4314 else
4315 #endif
4316 {
4317 mss = 0;
4318 }
4319
4320 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4321
4322 tx_buf = &bp->tx_buf_ring[ring_prod];
4323 tx_buf->skb = skb;
4324 pci_unmap_addr_set(tx_buf, mapping, mapping);
4325
4326 txbd = &bp->tx_desc_ring[ring_prod];
4327
4328 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4329 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4330 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4331 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4332
4333 last_frag = skb_shinfo(skb)->nr_frags;
4334
4335 for (i = 0; i < last_frag; i++) {
4336 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4337
4338 prod = NEXT_TX_BD(prod);
4339 ring_prod = TX_RING_IDX(prod);
4340 txbd = &bp->tx_desc_ring[ring_prod];
4341
4342 len = frag->size;
4343 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4344 len, PCI_DMA_TODEVICE);
4345 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4346 mapping, mapping);
4347
4348 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4349 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4350 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4351 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4352
4353 }
4354 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4355
4356 prod = NEXT_TX_BD(prod);
4357 bp->tx_prod_bseq += skb->len;
4358
4359 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4360 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4361
4362 mmiowb();
4363
4364 bp->tx_prod = prod;
4365 dev->trans_start = jiffies;
4366
4367 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4368 spin_lock(&bp->tx_lock);
4369 netif_stop_queue(dev);
4370
4371 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4372 netif_wake_queue(dev);
4373 spin_unlock(&bp->tx_lock);
4374 }
4375
4376 return NETDEV_TX_OK;
4377 }
4378
4379 /* Called with rtnl_lock */
4380 static int
4381 bnx2_close(struct net_device *dev)
4382 {
4383 struct bnx2 *bp = netdev_priv(dev);
4384 u32 reset_code;
4385
4386 /* Calling flush_scheduled_work() may deadlock because
4387 * linkwatch_event() may be on the workqueue and it will try to get
4388 * the rtnl_lock which we are holding.
4389 */
4390 while (bp->in_reset_task)
4391 msleep(1);
4392
4393 bnx2_netif_stop(bp);
4394 del_timer_sync(&bp->timer);
4395 if (bp->flags & NO_WOL_FLAG)
4396 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4397 else if (bp->wol)
4398 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4399 else
4400 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4401 bnx2_reset_chip(bp, reset_code);
4402 free_irq(bp->pdev->irq, dev);
4403 if (bp->flags & USING_MSI_FLAG) {
4404 pci_disable_msi(bp->pdev);
4405 bp->flags &= ~USING_MSI_FLAG;
4406 }
4407 bnx2_free_skbs(bp);
4408 bnx2_free_mem(bp);
4409 bp->link_up = 0;
4410 netif_carrier_off(bp->dev);
4411 bnx2_set_power_state(bp, PCI_D3hot);
4412 return 0;
4413 }
4414
4415 #define GET_NET_STATS64(ctr) \
4416 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4417 (unsigned long) (ctr##_lo)
4418
4419 #define GET_NET_STATS32(ctr) \
4420 (ctr##_lo)
4421
4422 #if (BITS_PER_LONG == 64)
4423 #define GET_NET_STATS GET_NET_STATS64
4424 #else
4425 #define GET_NET_STATS GET_NET_STATS32
4426 #endif
4427
4428 static struct net_device_stats *
4429 bnx2_get_stats(struct net_device *dev)
4430 {
4431 struct bnx2 *bp = netdev_priv(dev);
4432 struct statistics_block *stats_blk = bp->stats_blk;
4433 struct net_device_stats *net_stats = &bp->net_stats;
4434
4435 if (bp->stats_blk == NULL) {
4436 return net_stats;
4437 }
4438 net_stats->rx_packets =
4439 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4440 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4441 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4442
4443 net_stats->tx_packets =
4444 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4445 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4446 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4447
4448 net_stats->rx_bytes =
4449 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4450
4451 net_stats->tx_bytes =
4452 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4453
4454 net_stats->multicast =
4455 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4456
4457 net_stats->collisions =
4458 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4459
4460 net_stats->rx_length_errors =
4461 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4462 stats_blk->stat_EtherStatsOverrsizePkts);
4463
4464 net_stats->rx_over_errors =
4465 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4466
4467 net_stats->rx_frame_errors =
4468 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4469
4470 net_stats->rx_crc_errors =
4471 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4472
4473 net_stats->rx_errors = net_stats->rx_length_errors +
4474 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4475 net_stats->rx_crc_errors;
4476
4477 net_stats->tx_aborted_errors =
4478 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4479 stats_blk->stat_Dot3StatsLateCollisions);
4480
4481 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4482 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4483 net_stats->tx_carrier_errors = 0;
4484 else {
4485 net_stats->tx_carrier_errors =
4486 (unsigned long)
4487 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4488 }
4489
4490 net_stats->tx_errors =
4491 (unsigned long)
4492 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4493 +
4494 net_stats->tx_aborted_errors +
4495 net_stats->tx_carrier_errors;
4496
4497 return net_stats;
4498 }
4499
4500 /* All ethtool functions called with rtnl_lock */
4501
4502 static int
4503 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4504 {
4505 struct bnx2 *bp = netdev_priv(dev);
4506
4507 cmd->supported = SUPPORTED_Autoneg;
4508 if (bp->phy_flags & PHY_SERDES_FLAG) {
4509 cmd->supported |= SUPPORTED_1000baseT_Full |
4510 SUPPORTED_FIBRE;
4511
4512 cmd->port = PORT_FIBRE;
4513 }
4514 else {
4515 cmd->supported |= SUPPORTED_10baseT_Half |
4516 SUPPORTED_10baseT_Full |
4517 SUPPORTED_100baseT_Half |
4518 SUPPORTED_100baseT_Full |
4519 SUPPORTED_1000baseT_Full |
4520 SUPPORTED_TP;
4521
4522 cmd->port = PORT_TP;
4523 }
4524
4525 cmd->advertising = bp->advertising;
4526
4527 if (bp->autoneg & AUTONEG_SPEED) {
4528 cmd->autoneg = AUTONEG_ENABLE;
4529 }
4530 else {
4531 cmd->autoneg = AUTONEG_DISABLE;
4532 }
4533
4534 if (netif_carrier_ok(dev)) {
4535 cmd->speed = bp->line_speed;
4536 cmd->duplex = bp->duplex;
4537 }
4538 else {
4539 cmd->speed = -1;
4540 cmd->duplex = -1;
4541 }
4542
4543 cmd->transceiver = XCVR_INTERNAL;
4544 cmd->phy_address = bp->phy_addr;
4545
4546 return 0;
4547 }
4548
4549 static int
4550 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4551 {
4552 struct bnx2 *bp = netdev_priv(dev);
4553 u8 autoneg = bp->autoneg;
4554 u8 req_duplex = bp->req_duplex;
4555 u16 req_line_speed = bp->req_line_speed;
4556 u32 advertising = bp->advertising;
4557
4558 if (cmd->autoneg == AUTONEG_ENABLE) {
4559 autoneg |= AUTONEG_SPEED;
4560
4561 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4562
4563 /* allow advertising 1 speed */
4564 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4565 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4566 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4567 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4568
4569 if (bp->phy_flags & PHY_SERDES_FLAG)
4570 return -EINVAL;
4571
4572 advertising = cmd->advertising;
4573
4574 }
4575 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4576 advertising = cmd->advertising;
4577 }
4578 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4579 return -EINVAL;
4580 }
4581 else {
4582 if (bp->phy_flags & PHY_SERDES_FLAG) {
4583 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4584 }
4585 else {
4586 advertising = ETHTOOL_ALL_COPPER_SPEED;
4587 }
4588 }
4589 advertising |= ADVERTISED_Autoneg;
4590 }
4591 else {
4592 if (bp->phy_flags & PHY_SERDES_FLAG) {
4593 if ((cmd->speed != SPEED_1000) ||
4594 (cmd->duplex != DUPLEX_FULL)) {
4595 return -EINVAL;
4596 }
4597 }
4598 else if (cmd->speed == SPEED_1000) {
4599 return -EINVAL;
4600 }
4601 autoneg &= ~AUTONEG_SPEED;
4602 req_line_speed = cmd->speed;
4603 req_duplex = cmd->duplex;
4604 advertising = 0;
4605 }
4606
4607 bp->autoneg = autoneg;
4608 bp->advertising = advertising;
4609 bp->req_line_speed = req_line_speed;
4610 bp->req_duplex = req_duplex;
4611
4612 spin_lock_bh(&bp->phy_lock);
4613
4614 bnx2_setup_phy(bp);
4615
4616 spin_unlock_bh(&bp->phy_lock);
4617
4618 return 0;
4619 }
4620
4621 static void
4622 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4623 {
4624 struct bnx2 *bp = netdev_priv(dev);
4625
4626 strcpy(info->driver, DRV_MODULE_NAME);
4627 strcpy(info->version, DRV_MODULE_VERSION);
4628 strcpy(info->bus_info, pci_name(bp->pdev));
4629 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4630 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4631 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4632 info->fw_version[1] = info->fw_version[3] = '.';
4633 info->fw_version[5] = 0;
4634 }
4635
4636 #define BNX2_REGDUMP_LEN (32 * 1024)
4637
4638 static int
4639 bnx2_get_regs_len(struct net_device *dev)
4640 {
4641 return BNX2_REGDUMP_LEN;
4642 }
4643
4644 static void
4645 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4646 {
4647 u32 *p = _p, i, offset;
4648 u8 *orig_p = _p;
4649 struct bnx2 *bp = netdev_priv(dev);
4650 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4651 0x0800, 0x0880, 0x0c00, 0x0c10,
4652 0x0c30, 0x0d08, 0x1000, 0x101c,
4653 0x1040, 0x1048, 0x1080, 0x10a4,
4654 0x1400, 0x1490, 0x1498, 0x14f0,
4655 0x1500, 0x155c, 0x1580, 0x15dc,
4656 0x1600, 0x1658, 0x1680, 0x16d8,
4657 0x1800, 0x1820, 0x1840, 0x1854,
4658 0x1880, 0x1894, 0x1900, 0x1984,
4659 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4660 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4661 0x2000, 0x2030, 0x23c0, 0x2400,
4662 0x2800, 0x2820, 0x2830, 0x2850,
4663 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4664 0x3c00, 0x3c94, 0x4000, 0x4010,
4665 0x4080, 0x4090, 0x43c0, 0x4458,
4666 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4667 0x4fc0, 0x5010, 0x53c0, 0x5444,
4668 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4669 0x5fc0, 0x6000, 0x6400, 0x6428,
4670 0x6800, 0x6848, 0x684c, 0x6860,
4671 0x6888, 0x6910, 0x8000 };
4672
4673 regs->version = 0;
4674
4675 memset(p, 0, BNX2_REGDUMP_LEN);
4676
4677 if (!netif_running(bp->dev))
4678 return;
4679
4680 i = 0;
4681 offset = reg_boundaries[0];
4682 p += offset;
4683 while (offset < BNX2_REGDUMP_LEN) {
4684 *p++ = REG_RD(bp, offset);
4685 offset += 4;
4686 if (offset == reg_boundaries[i + 1]) {
4687 offset = reg_boundaries[i + 2];
4688 p = (u32 *) (orig_p + offset);
4689 i += 2;
4690 }
4691 }
4692 }
4693
4694 static void
4695 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4696 {
4697 struct bnx2 *bp = netdev_priv(dev);
4698
4699 if (bp->flags & NO_WOL_FLAG) {
4700 wol->supported = 0;
4701 wol->wolopts = 0;
4702 }
4703 else {
4704 wol->supported = WAKE_MAGIC;
4705 if (bp->wol)
4706 wol->wolopts = WAKE_MAGIC;
4707 else
4708 wol->wolopts = 0;
4709 }
4710 memset(&wol->sopass, 0, sizeof(wol->sopass));
4711 }
4712
4713 static int
4714 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4715 {
4716 struct bnx2 *bp = netdev_priv(dev);
4717
4718 if (wol->wolopts & ~WAKE_MAGIC)
4719 return -EINVAL;
4720
4721 if (wol->wolopts & WAKE_MAGIC) {
4722 if (bp->flags & NO_WOL_FLAG)
4723 return -EINVAL;
4724
4725 bp->wol = 1;
4726 }
4727 else {
4728 bp->wol = 0;
4729 }
4730 return 0;
4731 }
4732
4733 static int
4734 bnx2_nway_reset(struct net_device *dev)
4735 {
4736 struct bnx2 *bp = netdev_priv(dev);
4737 u32 bmcr;
4738
4739 if (!(bp->autoneg & AUTONEG_SPEED)) {
4740 return -EINVAL;
4741 }
4742
4743 spin_lock_bh(&bp->phy_lock);
4744
4745 /* Force a link down visible on the other side */
4746 if (bp->phy_flags & PHY_SERDES_FLAG) {
4747 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4748 spin_unlock_bh(&bp->phy_lock);
4749
4750 msleep(20);
4751
4752 spin_lock_bh(&bp->phy_lock);
4753 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4754 bp->current_interval = SERDES_AN_TIMEOUT;
4755 bp->serdes_an_pending = 1;
4756 mod_timer(&bp->timer, jiffies + bp->current_interval);
4757 }
4758 }
4759
4760 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4761 bmcr &= ~BMCR_LOOPBACK;
4762 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4763
4764 spin_unlock_bh(&bp->phy_lock);
4765
4766 return 0;
4767 }
4768
4769 static int
4770 bnx2_get_eeprom_len(struct net_device *dev)
4771 {
4772 struct bnx2 *bp = netdev_priv(dev);
4773
4774 if (bp->flash_info == NULL)
4775 return 0;
4776
4777 return (int) bp->flash_size;
4778 }
4779
4780 static int
4781 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4782 u8 *eebuf)
4783 {
4784 struct bnx2 *bp = netdev_priv(dev);
4785 int rc;
4786
4787 /* parameters already validated in ethtool_get_eeprom */
4788
4789 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4790
4791 return rc;
4792 }
4793
4794 static int
4795 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4796 u8 *eebuf)
4797 {
4798 struct bnx2 *bp = netdev_priv(dev);
4799 int rc;
4800
4801 /* parameters already validated in ethtool_set_eeprom */
4802
4803 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4804
4805 return rc;
4806 }
4807
4808 static int
4809 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4810 {
4811 struct bnx2 *bp = netdev_priv(dev);
4812
4813 memset(coal, 0, sizeof(struct ethtool_coalesce));
4814
4815 coal->rx_coalesce_usecs = bp->rx_ticks;
4816 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4817 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4818 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4819
4820 coal->tx_coalesce_usecs = bp->tx_ticks;
4821 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4822 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4823 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4824
4825 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4826
4827 return 0;
4828 }
4829
4830 static int
4831 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4832 {
4833 struct bnx2 *bp = netdev_priv(dev);
4834
4835 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4836 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4837
4838 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4839 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4840
4841 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4842 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4843
4844 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4845 if (bp->rx_quick_cons_trip_int > 0xff)
4846 bp->rx_quick_cons_trip_int = 0xff;
4847
4848 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4849 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4850
4851 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4852 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4853
4854 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4855 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4856
4857 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4858 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4859 0xff;
4860
4861 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4862 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4863 bp->stats_ticks &= 0xffff00;
4864
4865 if (netif_running(bp->dev)) {
4866 bnx2_netif_stop(bp);
4867 bnx2_init_nic(bp);
4868 bnx2_netif_start(bp);
4869 }
4870
4871 return 0;
4872 }
4873
4874 static void
4875 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4876 {
4877 struct bnx2 *bp = netdev_priv(dev);
4878
4879 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
4880 ering->rx_mini_max_pending = 0;
4881 ering->rx_jumbo_max_pending = 0;
4882
4883 ering->rx_pending = bp->rx_ring_size;
4884 ering->rx_mini_pending = 0;
4885 ering->rx_jumbo_pending = 0;
4886
4887 ering->tx_max_pending = MAX_TX_DESC_CNT;
4888 ering->tx_pending = bp->tx_ring_size;
4889 }
4890
4891 static int
4892 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4893 {
4894 struct bnx2 *bp = netdev_priv(dev);
4895
4896 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
4897 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4898 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4899
4900 return -EINVAL;
4901 }
4902 if (netif_running(bp->dev)) {
4903 bnx2_netif_stop(bp);
4904 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4905 bnx2_free_skbs(bp);
4906 bnx2_free_mem(bp);
4907 }
4908
4909 bnx2_set_rx_ring_size(bp, ering->rx_pending);
4910 bp->tx_ring_size = ering->tx_pending;
4911
4912 if (netif_running(bp->dev)) {
4913 int rc;
4914
4915 rc = bnx2_alloc_mem(bp);
4916 if (rc)
4917 return rc;
4918 bnx2_init_nic(bp);
4919 bnx2_netif_start(bp);
4920 }
4921
4922 return 0;
4923 }
4924
4925 static void
4926 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4927 {
4928 struct bnx2 *bp = netdev_priv(dev);
4929
4930 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4931 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4932 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4933 }
4934
4935 static int
4936 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4937 {
4938 struct bnx2 *bp = netdev_priv(dev);
4939
4940 bp->req_flow_ctrl = 0;
4941 if (epause->rx_pause)
4942 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4943 if (epause->tx_pause)
4944 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4945
4946 if (epause->autoneg) {
4947 bp->autoneg |= AUTONEG_FLOW_CTRL;
4948 }
4949 else {
4950 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4951 }
4952
4953 spin_lock_bh(&bp->phy_lock);
4954
4955 bnx2_setup_phy(bp);
4956
4957 spin_unlock_bh(&bp->phy_lock);
4958
4959 return 0;
4960 }
4961
4962 static u32
4963 bnx2_get_rx_csum(struct net_device *dev)
4964 {
4965 struct bnx2 *bp = netdev_priv(dev);
4966
4967 return bp->rx_csum;
4968 }
4969
4970 static int
4971 bnx2_set_rx_csum(struct net_device *dev, u32 data)
4972 {
4973 struct bnx2 *bp = netdev_priv(dev);
4974
4975 bp->rx_csum = data;
4976 return 0;
4977 }
4978
4979 #define BNX2_NUM_STATS 45
4980
4981 static struct {
4982 char string[ETH_GSTRING_LEN];
4983 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4984 { "rx_bytes" },
4985 { "rx_error_bytes" },
4986 { "tx_bytes" },
4987 { "tx_error_bytes" },
4988 { "rx_ucast_packets" },
4989 { "rx_mcast_packets" },
4990 { "rx_bcast_packets" },
4991 { "tx_ucast_packets" },
4992 { "tx_mcast_packets" },
4993 { "tx_bcast_packets" },
4994 { "tx_mac_errors" },
4995 { "tx_carrier_errors" },
4996 { "rx_crc_errors" },
4997 { "rx_align_errors" },
4998 { "tx_single_collisions" },
4999 { "tx_multi_collisions" },
5000 { "tx_deferred" },
5001 { "tx_excess_collisions" },
5002 { "tx_late_collisions" },
5003 { "tx_total_collisions" },
5004 { "rx_fragments" },
5005 { "rx_jabbers" },
5006 { "rx_undersize_packets" },
5007 { "rx_oversize_packets" },
5008 { "rx_64_byte_packets" },
5009 { "rx_65_to_127_byte_packets" },
5010 { "rx_128_to_255_byte_packets" },
5011 { "rx_256_to_511_byte_packets" },
5012 { "rx_512_to_1023_byte_packets" },
5013 { "rx_1024_to_1522_byte_packets" },
5014 { "rx_1523_to_9022_byte_packets" },
5015 { "tx_64_byte_packets" },
5016 { "tx_65_to_127_byte_packets" },
5017 { "tx_128_to_255_byte_packets" },
5018 { "tx_256_to_511_byte_packets" },
5019 { "tx_512_to_1023_byte_packets" },
5020 { "tx_1024_to_1522_byte_packets" },
5021 { "tx_1523_to_9022_byte_packets" },
5022 { "rx_xon_frames" },
5023 { "rx_xoff_frames" },
5024 { "tx_xon_frames" },
5025 { "tx_xoff_frames" },
5026 { "rx_mac_ctrl_frames" },
5027 { "rx_filtered_packets" },
5028 { "rx_discards" },
5029 };
5030
5031 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5032
5033 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5034 STATS_OFFSET32(stat_IfHCInOctets_hi),
5035 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5036 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5037 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5038 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5039 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5040 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5041 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5042 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5043 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5044 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5045 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5046 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5047 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5048 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5049 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5050 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5051 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5052 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5053 STATS_OFFSET32(stat_EtherStatsCollisions),
5054 STATS_OFFSET32(stat_EtherStatsFragments),
5055 STATS_OFFSET32(stat_EtherStatsJabbers),
5056 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5057 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5058 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5059 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5060 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5061 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5062 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5063 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5064 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5065 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5066 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5067 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5068 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5069 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5070 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5071 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5072 STATS_OFFSET32(stat_XonPauseFramesReceived),
5073 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5074 STATS_OFFSET32(stat_OutXonSent),
5075 STATS_OFFSET32(stat_OutXoffSent),
5076 STATS_OFFSET32(stat_MacControlFramesReceived),
5077 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5078 STATS_OFFSET32(stat_IfInMBUFDiscards),
5079 };
5080
5081 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5082 * skipped because of errata.
5083 */
5084 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5085 8,0,8,8,8,8,8,8,8,8,
5086 4,0,4,4,4,4,4,4,4,4,
5087 4,4,4,4,4,4,4,4,4,4,
5088 4,4,4,4,4,4,4,4,4,4,
5089 4,4,4,4,4,
5090 };
5091
5092 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5093 8,0,8,8,8,8,8,8,8,8,
5094 4,4,4,4,4,4,4,4,4,4,
5095 4,4,4,4,4,4,4,4,4,4,
5096 4,4,4,4,4,4,4,4,4,4,
5097 4,4,4,4,4,
5098 };
5099
5100 #define BNX2_NUM_TESTS 6
5101
5102 static struct {
5103 char string[ETH_GSTRING_LEN];
5104 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5105 { "register_test (offline)" },
5106 { "memory_test (offline)" },
5107 { "loopback_test (offline)" },
5108 { "nvram_test (online)" },
5109 { "interrupt_test (online)" },
5110 { "link_test (online)" },
5111 };
5112
5113 static int
5114 bnx2_self_test_count(struct net_device *dev)
5115 {
5116 return BNX2_NUM_TESTS;
5117 }
5118
5119 static void
5120 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5121 {
5122 struct bnx2 *bp = netdev_priv(dev);
5123
5124 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5125 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5126 bnx2_netif_stop(bp);
5127 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5128 bnx2_free_skbs(bp);
5129
5130 if (bnx2_test_registers(bp) != 0) {
5131 buf[0] = 1;
5132 etest->flags |= ETH_TEST_FL_FAILED;
5133 }
5134 if (bnx2_test_memory(bp) != 0) {
5135 buf[1] = 1;
5136 etest->flags |= ETH_TEST_FL_FAILED;
5137 }
5138 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5139 etest->flags |= ETH_TEST_FL_FAILED;
5140
5141 if (!netif_running(bp->dev)) {
5142 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5143 }
5144 else {
5145 bnx2_init_nic(bp);
5146 bnx2_netif_start(bp);
5147 }
5148
5149 /* wait for link up */
5150 msleep_interruptible(3000);
5151 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5152 msleep_interruptible(4000);
5153 }
5154
5155 if (bnx2_test_nvram(bp) != 0) {
5156 buf[3] = 1;
5157 etest->flags |= ETH_TEST_FL_FAILED;
5158 }
5159 if (bnx2_test_intr(bp) != 0) {
5160 buf[4] = 1;
5161 etest->flags |= ETH_TEST_FL_FAILED;
5162 }
5163
5164 if (bnx2_test_link(bp) != 0) {
5165 buf[5] = 1;
5166 etest->flags |= ETH_TEST_FL_FAILED;
5167
5168 }
5169 }
5170
5171 static void
5172 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5173 {
5174 switch (stringset) {
5175 case ETH_SS_STATS:
5176 memcpy(buf, bnx2_stats_str_arr,
5177 sizeof(bnx2_stats_str_arr));
5178 break;
5179 case ETH_SS_TEST:
5180 memcpy(buf, bnx2_tests_str_arr,
5181 sizeof(bnx2_tests_str_arr));
5182 break;
5183 }
5184 }
5185
5186 static int
5187 bnx2_get_stats_count(struct net_device *dev)
5188 {
5189 return BNX2_NUM_STATS;
5190 }
5191
5192 static void
5193 bnx2_get_ethtool_stats(struct net_device *dev,
5194 struct ethtool_stats *stats, u64 *buf)
5195 {
5196 struct bnx2 *bp = netdev_priv(dev);
5197 int i;
5198 u32 *hw_stats = (u32 *) bp->stats_blk;
5199 u8 *stats_len_arr = NULL;
5200
5201 if (hw_stats == NULL) {
5202 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5203 return;
5204 }
5205
5206 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5207 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5208 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5209 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5210 stats_len_arr = bnx2_5706_stats_len_arr;
5211 else
5212 stats_len_arr = bnx2_5708_stats_len_arr;
5213
5214 for (i = 0; i < BNX2_NUM_STATS; i++) {
5215 if (stats_len_arr[i] == 0) {
5216 /* skip this counter */
5217 buf[i] = 0;
5218 continue;
5219 }
5220 if (stats_len_arr[i] == 4) {
5221 /* 4-byte counter */
5222 buf[i] = (u64)
5223 *(hw_stats + bnx2_stats_offset_arr[i]);
5224 continue;
5225 }
5226 /* 8-byte counter */
5227 buf[i] = (((u64) *(hw_stats +
5228 bnx2_stats_offset_arr[i])) << 32) +
5229 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5230 }
5231 }
5232
5233 static int
5234 bnx2_phys_id(struct net_device *dev, u32 data)
5235 {
5236 struct bnx2 *bp = netdev_priv(dev);
5237 int i;
5238 u32 save;
5239
5240 if (data == 0)
5241 data = 2;
5242
5243 save = REG_RD(bp, BNX2_MISC_CFG);
5244 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5245
5246 for (i = 0; i < (data * 2); i++) {
5247 if ((i % 2) == 0) {
5248 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5249 }
5250 else {
5251 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5252 BNX2_EMAC_LED_1000MB_OVERRIDE |
5253 BNX2_EMAC_LED_100MB_OVERRIDE |
5254 BNX2_EMAC_LED_10MB_OVERRIDE |
5255 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5256 BNX2_EMAC_LED_TRAFFIC);
5257 }
5258 msleep_interruptible(500);
5259 if (signal_pending(current))
5260 break;
5261 }
5262 REG_WR(bp, BNX2_EMAC_LED, 0);
5263 REG_WR(bp, BNX2_MISC_CFG, save);
5264 return 0;
5265 }
5266
5267 static struct ethtool_ops bnx2_ethtool_ops = {
5268 .get_settings = bnx2_get_settings,
5269 .set_settings = bnx2_set_settings,
5270 .get_drvinfo = bnx2_get_drvinfo,
5271 .get_regs_len = bnx2_get_regs_len,
5272 .get_regs = bnx2_get_regs,
5273 .get_wol = bnx2_get_wol,
5274 .set_wol = bnx2_set_wol,
5275 .nway_reset = bnx2_nway_reset,
5276 .get_link = ethtool_op_get_link,
5277 .get_eeprom_len = bnx2_get_eeprom_len,
5278 .get_eeprom = bnx2_get_eeprom,
5279 .set_eeprom = bnx2_set_eeprom,
5280 .get_coalesce = bnx2_get_coalesce,
5281 .set_coalesce = bnx2_set_coalesce,
5282 .get_ringparam = bnx2_get_ringparam,
5283 .set_ringparam = bnx2_set_ringparam,
5284 .get_pauseparam = bnx2_get_pauseparam,
5285 .set_pauseparam = bnx2_set_pauseparam,
5286 .get_rx_csum = bnx2_get_rx_csum,
5287 .set_rx_csum = bnx2_set_rx_csum,
5288 .get_tx_csum = ethtool_op_get_tx_csum,
5289 .set_tx_csum = ethtool_op_set_tx_csum,
5290 .get_sg = ethtool_op_get_sg,
5291 .set_sg = ethtool_op_set_sg,
5292 #ifdef BCM_TSO
5293 .get_tso = ethtool_op_get_tso,
5294 .set_tso = ethtool_op_set_tso,
5295 #endif
5296 .self_test_count = bnx2_self_test_count,
5297 .self_test = bnx2_self_test,
5298 .get_strings = bnx2_get_strings,
5299 .phys_id = bnx2_phys_id,
5300 .get_stats_count = bnx2_get_stats_count,
5301 .get_ethtool_stats = bnx2_get_ethtool_stats,
5302 .get_perm_addr = ethtool_op_get_perm_addr,
5303 };
5304
5305 /* Called with rtnl_lock */
5306 static int
5307 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5308 {
5309 struct mii_ioctl_data *data = if_mii(ifr);
5310 struct bnx2 *bp = netdev_priv(dev);
5311 int err;
5312
5313 switch(cmd) {
5314 case SIOCGMIIPHY:
5315 data->phy_id = bp->phy_addr;
5316
5317 /* fallthru */
5318 case SIOCGMIIREG: {
5319 u32 mii_regval;
5320
5321 spin_lock_bh(&bp->phy_lock);
5322 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5323 spin_unlock_bh(&bp->phy_lock);
5324
5325 data->val_out = mii_regval;
5326
5327 return err;
5328 }
5329
5330 case SIOCSMIIREG:
5331 if (!capable(CAP_NET_ADMIN))
5332 return -EPERM;
5333
5334 spin_lock_bh(&bp->phy_lock);
5335 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5336 spin_unlock_bh(&bp->phy_lock);
5337
5338 return err;
5339
5340 default:
5341 /* do nothing */
5342 break;
5343 }
5344 return -EOPNOTSUPP;
5345 }
5346
5347 /* Called with rtnl_lock */
5348 static int
5349 bnx2_change_mac_addr(struct net_device *dev, void *p)
5350 {
5351 struct sockaddr *addr = p;
5352 struct bnx2 *bp = netdev_priv(dev);
5353
5354 if (!is_valid_ether_addr(addr->sa_data))
5355 return -EINVAL;
5356
5357 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5358 if (netif_running(dev))
5359 bnx2_set_mac_addr(bp);
5360
5361 return 0;
5362 }
5363
5364 /* Called with rtnl_lock */
5365 static int
5366 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5367 {
5368 struct bnx2 *bp = netdev_priv(dev);
5369
5370 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5371 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5372 return -EINVAL;
5373
5374 dev->mtu = new_mtu;
5375 if (netif_running(dev)) {
5376 bnx2_netif_stop(bp);
5377
5378 bnx2_init_nic(bp);
5379
5380 bnx2_netif_start(bp);
5381 }
5382 return 0;
5383 }
5384
5385 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5386 static void
5387 poll_bnx2(struct net_device *dev)
5388 {
5389 struct bnx2 *bp = netdev_priv(dev);
5390
5391 disable_irq(bp->pdev->irq);
5392 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5393 enable_irq(bp->pdev->irq);
5394 }
5395 #endif
5396
5397 static int __devinit
5398 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5399 {
5400 struct bnx2 *bp;
5401 unsigned long mem_len;
5402 int rc;
5403 u32 reg;
5404
5405 SET_MODULE_OWNER(dev);
5406 SET_NETDEV_DEV(dev, &pdev->dev);
5407 bp = netdev_priv(dev);
5408
5409 bp->flags = 0;
5410 bp->phy_flags = 0;
5411
5412 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5413 rc = pci_enable_device(pdev);
5414 if (rc) {
5415 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5416 goto err_out;
5417 }
5418
5419 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5420 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5421 "aborting.\n");
5422 rc = -ENODEV;
5423 goto err_out_disable;
5424 }
5425
5426 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5427 if (rc) {
5428 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5429 goto err_out_disable;
5430 }
5431
5432 pci_set_master(pdev);
5433
5434 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5435 if (bp->pm_cap == 0) {
5436 printk(KERN_ERR PFX "Cannot find power management capability, "
5437 "aborting.\n");
5438 rc = -EIO;
5439 goto err_out_release;
5440 }
5441
5442 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5443 if (bp->pcix_cap == 0) {
5444 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5445 rc = -EIO;
5446 goto err_out_release;
5447 }
5448
5449 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5450 bp->flags |= USING_DAC_FLAG;
5451 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5452 printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5453 "failed, aborting.\n");
5454 rc = -EIO;
5455 goto err_out_release;
5456 }
5457 }
5458 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5459 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5460 rc = -EIO;
5461 goto err_out_release;
5462 }
5463
5464 bp->dev = dev;
5465 bp->pdev = pdev;
5466
5467 spin_lock_init(&bp->phy_lock);
5468 spin_lock_init(&bp->tx_lock);
5469 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5470
5471 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5472 mem_len = MB_GET_CID_ADDR(17);
5473 dev->mem_end = dev->mem_start + mem_len;
5474 dev->irq = pdev->irq;
5475
5476 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5477
5478 if (!bp->regview) {
5479 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5480 rc = -ENOMEM;
5481 goto err_out_release;
5482 }
5483
5484 /* Configure byte swap and enable write to the reg_window registers.
5485 * Rely on CPU to do target byte swapping on big endian systems
5486 * The chip's target access swapping will not swap all accesses
5487 */
5488 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5489 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5490 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5491
5492 bnx2_set_power_state(bp, PCI_D0);
5493
5494 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5495
5496 /* Get bus information. */
5497 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5498 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5499 u32 clkreg;
5500
5501 bp->flags |= PCIX_FLAG;
5502
5503 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5504
5505 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5506 switch (clkreg) {
5507 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5508 bp->bus_speed_mhz = 133;
5509 break;
5510
5511 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5512 bp->bus_speed_mhz = 100;
5513 break;
5514
5515 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5516 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5517 bp->bus_speed_mhz = 66;
5518 break;
5519
5520 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5521 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5522 bp->bus_speed_mhz = 50;
5523 break;
5524
5525 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5526 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5527 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5528 bp->bus_speed_mhz = 33;
5529 break;
5530 }
5531 }
5532 else {
5533 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5534 bp->bus_speed_mhz = 66;
5535 else
5536 bp->bus_speed_mhz = 33;
5537 }
5538
5539 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5540 bp->flags |= PCI_32BIT_FLAG;
5541
5542 /* 5706A0 may falsely detect SERR and PERR. */
5543 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5544 reg = REG_RD(bp, PCI_COMMAND);
5545 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5546 REG_WR(bp, PCI_COMMAND, reg);
5547 }
5548 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5549 !(bp->flags & PCIX_FLAG)) {
5550
5551 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5552 "aborting.\n");
5553 goto err_out_unmap;
5554 }
5555
5556 bnx2_init_nvram(bp);
5557
5558 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5559
5560 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5561 BNX2_SHM_HDR_SIGNATURE_SIG)
5562 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5563 else
5564 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5565
5566 /* Get the permanent MAC address. First we need to make sure the
5567 * firmware is actually running.
5568 */
5569 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5570
5571 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5572 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5573 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5574 rc = -ENODEV;
5575 goto err_out_unmap;
5576 }
5577
5578 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5579
5580 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5581 bp->mac_addr[0] = (u8) (reg >> 8);
5582 bp->mac_addr[1] = (u8) reg;
5583
5584 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5585 bp->mac_addr[2] = (u8) (reg >> 24);
5586 bp->mac_addr[3] = (u8) (reg >> 16);
5587 bp->mac_addr[4] = (u8) (reg >> 8);
5588 bp->mac_addr[5] = (u8) reg;
5589
5590 bp->tx_ring_size = MAX_TX_DESC_CNT;
5591 bnx2_set_rx_ring_size(bp, 100);
5592
5593 bp->rx_csum = 1;
5594
5595 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5596
5597 bp->tx_quick_cons_trip_int = 20;
5598 bp->tx_quick_cons_trip = 20;
5599 bp->tx_ticks_int = 80;
5600 bp->tx_ticks = 80;
5601
5602 bp->rx_quick_cons_trip_int = 6;
5603 bp->rx_quick_cons_trip = 6;
5604 bp->rx_ticks_int = 18;
5605 bp->rx_ticks = 18;
5606
5607 bp->stats_ticks = 1000000 & 0xffff00;
5608
5609 bp->timer_interval = HZ;
5610 bp->current_interval = HZ;
5611
5612 bp->phy_addr = 1;
5613
5614 /* Disable WOL support if we are running on a SERDES chip. */
5615 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5616 bp->phy_flags |= PHY_SERDES_FLAG;
5617 bp->flags |= NO_WOL_FLAG;
5618 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5619 bp->phy_addr = 2;
5620 reg = REG_RD_IND(bp, bp->shmem_base +
5621 BNX2_SHARED_HW_CFG_CONFIG);
5622 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5623 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5624 }
5625 }
5626
5627 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5628 bp->flags |= NO_WOL_FLAG;
5629
5630 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5631 bp->tx_quick_cons_trip_int =
5632 bp->tx_quick_cons_trip;
5633 bp->tx_ticks_int = bp->tx_ticks;
5634 bp->rx_quick_cons_trip_int =
5635 bp->rx_quick_cons_trip;
5636 bp->rx_ticks_int = bp->rx_ticks;
5637 bp->comp_prod_trip_int = bp->comp_prod_trip;
5638 bp->com_ticks_int = bp->com_ticks;
5639 bp->cmd_ticks_int = bp->cmd_ticks;
5640 }
5641
5642 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5643 bp->req_line_speed = 0;
5644 if (bp->phy_flags & PHY_SERDES_FLAG) {
5645 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5646
5647 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5648 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5649 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5650 bp->autoneg = 0;
5651 bp->req_line_speed = bp->line_speed = SPEED_1000;
5652 bp->req_duplex = DUPLEX_FULL;
5653 }
5654 }
5655 else {
5656 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5657 }
5658
5659 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5660
5661 init_timer(&bp->timer);
5662 bp->timer.expires = RUN_AT(bp->timer_interval);
5663 bp->timer.data = (unsigned long) bp;
5664 bp->timer.function = bnx2_timer;
5665
5666 return 0;
5667
5668 err_out_unmap:
5669 if (bp->regview) {
5670 iounmap(bp->regview);
5671 bp->regview = NULL;
5672 }
5673
5674 err_out_release:
5675 pci_release_regions(pdev);
5676
5677 err_out_disable:
5678 pci_disable_device(pdev);
5679 pci_set_drvdata(pdev, NULL);
5680
5681 err_out:
5682 return rc;
5683 }
5684
5685 static int __devinit
5686 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5687 {
5688 static int version_printed = 0;
5689 struct net_device *dev = NULL;
5690 struct bnx2 *bp;
5691 int rc, i;
5692
5693 if (version_printed++ == 0)
5694 printk(KERN_INFO "%s", version);
5695
5696 /* dev zeroed in init_etherdev */
5697 dev = alloc_etherdev(sizeof(*bp));
5698
5699 if (!dev)
5700 return -ENOMEM;
5701
5702 rc = bnx2_init_board(pdev, dev);
5703 if (rc < 0) {
5704 free_netdev(dev);
5705 return rc;
5706 }
5707
5708 dev->open = bnx2_open;
5709 dev->hard_start_xmit = bnx2_start_xmit;
5710 dev->stop = bnx2_close;
5711 dev->get_stats = bnx2_get_stats;
5712 dev->set_multicast_list = bnx2_set_rx_mode;
5713 dev->do_ioctl = bnx2_ioctl;
5714 dev->set_mac_address = bnx2_change_mac_addr;
5715 dev->change_mtu = bnx2_change_mtu;
5716 dev->tx_timeout = bnx2_tx_timeout;
5717 dev->watchdog_timeo = TX_TIMEOUT;
5718 #ifdef BCM_VLAN
5719 dev->vlan_rx_register = bnx2_vlan_rx_register;
5720 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5721 #endif
5722 dev->poll = bnx2_poll;
5723 dev->ethtool_ops = &bnx2_ethtool_ops;
5724 dev->weight = 64;
5725
5726 bp = netdev_priv(dev);
5727
5728 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5729 dev->poll_controller = poll_bnx2;
5730 #endif
5731
5732 if ((rc = register_netdev(dev))) {
5733 printk(KERN_ERR PFX "Cannot register net device\n");
5734 if (bp->regview)
5735 iounmap(bp->regview);
5736 pci_release_regions(pdev);
5737 pci_disable_device(pdev);
5738 pci_set_drvdata(pdev, NULL);
5739 free_netdev(dev);
5740 return rc;
5741 }
5742
5743 pci_set_drvdata(pdev, dev);
5744
5745 memcpy(dev->dev_addr, bp->mac_addr, 6);
5746 memcpy(dev->perm_addr, bp->mac_addr, 6);
5747 bp->name = board_info[ent->driver_data].name,
5748 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5749 "IRQ %d, ",
5750 dev->name,
5751 bp->name,
5752 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5753 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5754 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5755 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5756 bp->bus_speed_mhz,
5757 dev->base_addr,
5758 bp->pdev->irq);
5759
5760 printk("node addr ");
5761 for (i = 0; i < 6; i++)
5762 printk("%2.2x", dev->dev_addr[i]);
5763 printk("\n");
5764
5765 dev->features |= NETIF_F_SG;
5766 if (bp->flags & USING_DAC_FLAG)
5767 dev->features |= NETIF_F_HIGHDMA;
5768 dev->features |= NETIF_F_IP_CSUM;
5769 #ifdef BCM_VLAN
5770 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5771 #endif
5772 #ifdef BCM_TSO
5773 dev->features |= NETIF_F_TSO;
5774 #endif
5775
5776 netif_carrier_off(bp->dev);
5777
5778 return 0;
5779 }
5780
5781 static void __devexit
5782 bnx2_remove_one(struct pci_dev *pdev)
5783 {
5784 struct net_device *dev = pci_get_drvdata(pdev);
5785 struct bnx2 *bp = netdev_priv(dev);
5786
5787 flush_scheduled_work();
5788
5789 unregister_netdev(dev);
5790
5791 if (bp->regview)
5792 iounmap(bp->regview);
5793
5794 free_netdev(dev);
5795 pci_release_regions(pdev);
5796 pci_disable_device(pdev);
5797 pci_set_drvdata(pdev, NULL);
5798 }
5799
5800 static int
5801 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5802 {
5803 struct net_device *dev = pci_get_drvdata(pdev);
5804 struct bnx2 *bp = netdev_priv(dev);
5805 u32 reset_code;
5806
5807 if (!netif_running(dev))
5808 return 0;
5809
5810 flush_scheduled_work();
5811 bnx2_netif_stop(bp);
5812 netif_device_detach(dev);
5813 del_timer_sync(&bp->timer);
5814 if (bp->flags & NO_WOL_FLAG)
5815 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5816 else if (bp->wol)
5817 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5818 else
5819 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5820 bnx2_reset_chip(bp, reset_code);
5821 bnx2_free_skbs(bp);
5822 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5823 return 0;
5824 }
5825
5826 static int
5827 bnx2_resume(struct pci_dev *pdev)
5828 {
5829 struct net_device *dev = pci_get_drvdata(pdev);
5830 struct bnx2 *bp = netdev_priv(dev);
5831
5832 if (!netif_running(dev))
5833 return 0;
5834
5835 bnx2_set_power_state(bp, PCI_D0);
5836 netif_device_attach(dev);
5837 bnx2_init_nic(bp);
5838 bnx2_netif_start(bp);
5839 return 0;
5840 }
5841
5842 static struct pci_driver bnx2_pci_driver = {
5843 .name = DRV_MODULE_NAME,
5844 .id_table = bnx2_pci_tbl,
5845 .probe = bnx2_init_one,
5846 .remove = __devexit_p(bnx2_remove_one),
5847 .suspend = bnx2_suspend,
5848 .resume = bnx2_resume,
5849 };
5850
5851 static int __init bnx2_init(void)
5852 {
5853 return pci_module_init(&bnx2_pci_driver);
5854 }
5855
5856 static void __exit bnx2_cleanup(void)
5857 {
5858 pci_unregister_driver(&bnx2_pci_driver);
5859 }
5860
5861 module_init(bnx2_init);
5862 module_exit(bnx2_cleanup);
5863
5864
5865