]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/bnx2.c
[BNX2]: Enable new tx ring.
[mirror_ubuntu-artful-kernel.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2007 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE 0x10000
56
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.0"
60 #define DRV_MODULE_RELDATE "December 11, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
66
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
86 BCM5708,
87 BCM5708S,
88 BCM5709,
89 BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94 char *name;
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126 { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
133 /* Slow EEPROM */
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
230 {
231 u32 diff;
232
233 smp_mb();
234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
238 diff = bp->tx_prod - bnapi->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
244 return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271 offset += cid_addr;
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
290 spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402 int i;
403 struct bnx2_napi *bnapi;
404
405 for (i = 0; i < bp->irq_nvecs; i++) {
406 bnapi = &bp->bnx2_napi[i];
407 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
408 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
409 }
410 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
411 }
412
413 static void
414 bnx2_enable_int(struct bnx2 *bp)
415 {
416 int i;
417 struct bnx2_napi *bnapi;
418
419 for (i = 0; i < bp->irq_nvecs; i++) {
420 bnapi = &bp->bnx2_napi[i];
421
422 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
423 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
424 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
425 bnapi->last_status_idx);
426
427 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
429 bnapi->last_status_idx);
430 }
431 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
432 }
433
434 static void
435 bnx2_disable_int_sync(struct bnx2 *bp)
436 {
437 int i;
438
439 atomic_inc(&bp->intr_sem);
440 bnx2_disable_int(bp);
441 for (i = 0; i < bp->irq_nvecs; i++)
442 synchronize_irq(bp->irq_tbl[i].vector);
443 }
444
445 static void
446 bnx2_napi_disable(struct bnx2 *bp)
447 {
448 int i;
449
450 for (i = 0; i < bp->irq_nvecs; i++)
451 napi_disable(&bp->bnx2_napi[i].napi);
452 }
453
454 static void
455 bnx2_napi_enable(struct bnx2 *bp)
456 {
457 int i;
458
459 for (i = 0; i < bp->irq_nvecs; i++)
460 napi_enable(&bp->bnx2_napi[i].napi);
461 }
462
463 static void
464 bnx2_netif_stop(struct bnx2 *bp)
465 {
466 bnx2_disable_int_sync(bp);
467 if (netif_running(bp->dev)) {
468 bnx2_napi_disable(bp);
469 netif_tx_disable(bp->dev);
470 bp->dev->trans_start = jiffies; /* prevent tx timeout */
471 }
472 }
473
474 static void
475 bnx2_netif_start(struct bnx2 *bp)
476 {
477 if (atomic_dec_and_test(&bp->intr_sem)) {
478 if (netif_running(bp->dev)) {
479 netif_wake_queue(bp->dev);
480 bnx2_napi_enable(bp);
481 bnx2_enable_int(bp);
482 }
483 }
484 }
485
486 static void
487 bnx2_free_mem(struct bnx2 *bp)
488 {
489 int i;
490
491 for (i = 0; i < bp->ctx_pages; i++) {
492 if (bp->ctx_blk[i]) {
493 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
494 bp->ctx_blk[i],
495 bp->ctx_blk_mapping[i]);
496 bp->ctx_blk[i] = NULL;
497 }
498 }
499 if (bp->status_blk) {
500 pci_free_consistent(bp->pdev, bp->status_stats_size,
501 bp->status_blk, bp->status_blk_mapping);
502 bp->status_blk = NULL;
503 bp->stats_blk = NULL;
504 }
505 if (bp->tx_desc_ring) {
506 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
507 bp->tx_desc_ring, bp->tx_desc_mapping);
508 bp->tx_desc_ring = NULL;
509 }
510 kfree(bp->tx_buf_ring);
511 bp->tx_buf_ring = NULL;
512 for (i = 0; i < bp->rx_max_ring; i++) {
513 if (bp->rx_desc_ring[i])
514 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
515 bp->rx_desc_ring[i],
516 bp->rx_desc_mapping[i]);
517 bp->rx_desc_ring[i] = NULL;
518 }
519 vfree(bp->rx_buf_ring);
520 bp->rx_buf_ring = NULL;
521 for (i = 0; i < bp->rx_max_pg_ring; i++) {
522 if (bp->rx_pg_desc_ring[i])
523 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
524 bp->rx_pg_desc_ring[i],
525 bp->rx_pg_desc_mapping[i]);
526 bp->rx_pg_desc_ring[i] = NULL;
527 }
528 if (bp->rx_pg_ring)
529 vfree(bp->rx_pg_ring);
530 bp->rx_pg_ring = NULL;
531 }
532
533 static int
534 bnx2_alloc_mem(struct bnx2 *bp)
535 {
536 int i, status_blk_size;
537
538 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
539 if (bp->tx_buf_ring == NULL)
540 return -ENOMEM;
541
542 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
543 &bp->tx_desc_mapping);
544 if (bp->tx_desc_ring == NULL)
545 goto alloc_mem_err;
546
547 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
548 if (bp->rx_buf_ring == NULL)
549 goto alloc_mem_err;
550
551 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
552
553 for (i = 0; i < bp->rx_max_ring; i++) {
554 bp->rx_desc_ring[i] =
555 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
556 &bp->rx_desc_mapping[i]);
557 if (bp->rx_desc_ring[i] == NULL)
558 goto alloc_mem_err;
559
560 }
561
562 if (bp->rx_pg_ring_size) {
563 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
564 bp->rx_max_pg_ring);
565 if (bp->rx_pg_ring == NULL)
566 goto alloc_mem_err;
567
568 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
569 bp->rx_max_pg_ring);
570 }
571
572 for (i = 0; i < bp->rx_max_pg_ring; i++) {
573 bp->rx_pg_desc_ring[i] =
574 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
575 &bp->rx_pg_desc_mapping[i]);
576 if (bp->rx_pg_desc_ring[i] == NULL)
577 goto alloc_mem_err;
578
579 }
580
581 /* Combine status and statistics blocks into one allocation. */
582 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
583 if (bp->flags & MSIX_CAP_FLAG)
584 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
585 BNX2_SBLK_MSIX_ALIGN_SIZE);
586 bp->status_stats_size = status_blk_size +
587 sizeof(struct statistics_block);
588
589 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
590 &bp->status_blk_mapping);
591 if (bp->status_blk == NULL)
592 goto alloc_mem_err;
593
594 memset(bp->status_blk, 0, bp->status_stats_size);
595
596 bp->bnx2_napi[0].status_blk = bp->status_blk;
597 if (bp->flags & MSIX_CAP_FLAG) {
598 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
599 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
600
601 bnapi->status_blk_msix = (void *)
602 ((unsigned long) bp->status_blk +
603 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
604 bnapi->int_num = i << 24;
605 }
606 }
607
608 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
609 status_blk_size);
610
611 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
612
613 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
614 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
615 if (bp->ctx_pages == 0)
616 bp->ctx_pages = 1;
617 for (i = 0; i < bp->ctx_pages; i++) {
618 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
619 BCM_PAGE_SIZE,
620 &bp->ctx_blk_mapping[i]);
621 if (bp->ctx_blk[i] == NULL)
622 goto alloc_mem_err;
623 }
624 }
625 return 0;
626
627 alloc_mem_err:
628 bnx2_free_mem(bp);
629 return -ENOMEM;
630 }
631
632 static void
633 bnx2_report_fw_link(struct bnx2 *bp)
634 {
635 u32 fw_link_status = 0;
636
637 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
638 return;
639
640 if (bp->link_up) {
641 u32 bmsr;
642
643 switch (bp->line_speed) {
644 case SPEED_10:
645 if (bp->duplex == DUPLEX_HALF)
646 fw_link_status = BNX2_LINK_STATUS_10HALF;
647 else
648 fw_link_status = BNX2_LINK_STATUS_10FULL;
649 break;
650 case SPEED_100:
651 if (bp->duplex == DUPLEX_HALF)
652 fw_link_status = BNX2_LINK_STATUS_100HALF;
653 else
654 fw_link_status = BNX2_LINK_STATUS_100FULL;
655 break;
656 case SPEED_1000:
657 if (bp->duplex == DUPLEX_HALF)
658 fw_link_status = BNX2_LINK_STATUS_1000HALF;
659 else
660 fw_link_status = BNX2_LINK_STATUS_1000FULL;
661 break;
662 case SPEED_2500:
663 if (bp->duplex == DUPLEX_HALF)
664 fw_link_status = BNX2_LINK_STATUS_2500HALF;
665 else
666 fw_link_status = BNX2_LINK_STATUS_2500FULL;
667 break;
668 }
669
670 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
671
672 if (bp->autoneg) {
673 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
674
675 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
676 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
677
678 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
679 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
680 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
681 else
682 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
683 }
684 }
685 else
686 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
687
688 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
689 }
690
691 static char *
692 bnx2_xceiver_str(struct bnx2 *bp)
693 {
694 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
695 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
696 "Copper"));
697 }
698
699 static void
700 bnx2_report_link(struct bnx2 *bp)
701 {
702 if (bp->link_up) {
703 netif_carrier_on(bp->dev);
704 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
705 bnx2_xceiver_str(bp));
706
707 printk("%d Mbps ", bp->line_speed);
708
709 if (bp->duplex == DUPLEX_FULL)
710 printk("full duplex");
711 else
712 printk("half duplex");
713
714 if (bp->flow_ctrl) {
715 if (bp->flow_ctrl & FLOW_CTRL_RX) {
716 printk(", receive ");
717 if (bp->flow_ctrl & FLOW_CTRL_TX)
718 printk("& transmit ");
719 }
720 else {
721 printk(", transmit ");
722 }
723 printk("flow control ON");
724 }
725 printk("\n");
726 }
727 else {
728 netif_carrier_off(bp->dev);
729 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
730 bnx2_xceiver_str(bp));
731 }
732
733 bnx2_report_fw_link(bp);
734 }
735
736 static void
737 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
738 {
739 u32 local_adv, remote_adv;
740
741 bp->flow_ctrl = 0;
742 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
743 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
744
745 if (bp->duplex == DUPLEX_FULL) {
746 bp->flow_ctrl = bp->req_flow_ctrl;
747 }
748 return;
749 }
750
751 if (bp->duplex != DUPLEX_FULL) {
752 return;
753 }
754
755 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
756 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
757 u32 val;
758
759 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
760 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
761 bp->flow_ctrl |= FLOW_CTRL_TX;
762 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
763 bp->flow_ctrl |= FLOW_CTRL_RX;
764 return;
765 }
766
767 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
768 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
769
770 if (bp->phy_flags & PHY_SERDES_FLAG) {
771 u32 new_local_adv = 0;
772 u32 new_remote_adv = 0;
773
774 if (local_adv & ADVERTISE_1000XPAUSE)
775 new_local_adv |= ADVERTISE_PAUSE_CAP;
776 if (local_adv & ADVERTISE_1000XPSE_ASYM)
777 new_local_adv |= ADVERTISE_PAUSE_ASYM;
778 if (remote_adv & ADVERTISE_1000XPAUSE)
779 new_remote_adv |= ADVERTISE_PAUSE_CAP;
780 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
781 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
782
783 local_adv = new_local_adv;
784 remote_adv = new_remote_adv;
785 }
786
787 /* See Table 28B-3 of 802.3ab-1999 spec. */
788 if (local_adv & ADVERTISE_PAUSE_CAP) {
789 if(local_adv & ADVERTISE_PAUSE_ASYM) {
790 if (remote_adv & ADVERTISE_PAUSE_CAP) {
791 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
792 }
793 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
794 bp->flow_ctrl = FLOW_CTRL_RX;
795 }
796 }
797 else {
798 if (remote_adv & ADVERTISE_PAUSE_CAP) {
799 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
800 }
801 }
802 }
803 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
804 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
805 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
806
807 bp->flow_ctrl = FLOW_CTRL_TX;
808 }
809 }
810 }
811
812 static int
813 bnx2_5709s_linkup(struct bnx2 *bp)
814 {
815 u32 val, speed;
816
817 bp->link_up = 1;
818
819 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
820 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
821 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
822
823 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
824 bp->line_speed = bp->req_line_speed;
825 bp->duplex = bp->req_duplex;
826 return 0;
827 }
828 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
829 switch (speed) {
830 case MII_BNX2_GP_TOP_AN_SPEED_10:
831 bp->line_speed = SPEED_10;
832 break;
833 case MII_BNX2_GP_TOP_AN_SPEED_100:
834 bp->line_speed = SPEED_100;
835 break;
836 case MII_BNX2_GP_TOP_AN_SPEED_1G:
837 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
838 bp->line_speed = SPEED_1000;
839 break;
840 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
841 bp->line_speed = SPEED_2500;
842 break;
843 }
844 if (val & MII_BNX2_GP_TOP_AN_FD)
845 bp->duplex = DUPLEX_FULL;
846 else
847 bp->duplex = DUPLEX_HALF;
848 return 0;
849 }
850
851 static int
852 bnx2_5708s_linkup(struct bnx2 *bp)
853 {
854 u32 val;
855
856 bp->link_up = 1;
857 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
859 case BCM5708S_1000X_STAT1_SPEED_10:
860 bp->line_speed = SPEED_10;
861 break;
862 case BCM5708S_1000X_STAT1_SPEED_100:
863 bp->line_speed = SPEED_100;
864 break;
865 case BCM5708S_1000X_STAT1_SPEED_1G:
866 bp->line_speed = SPEED_1000;
867 break;
868 case BCM5708S_1000X_STAT1_SPEED_2G5:
869 bp->line_speed = SPEED_2500;
870 break;
871 }
872 if (val & BCM5708S_1000X_STAT1_FD)
873 bp->duplex = DUPLEX_FULL;
874 else
875 bp->duplex = DUPLEX_HALF;
876
877 return 0;
878 }
879
880 static int
881 bnx2_5706s_linkup(struct bnx2 *bp)
882 {
883 u32 bmcr, local_adv, remote_adv, common;
884
885 bp->link_up = 1;
886 bp->line_speed = SPEED_1000;
887
888 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
889 if (bmcr & BMCR_FULLDPLX) {
890 bp->duplex = DUPLEX_FULL;
891 }
892 else {
893 bp->duplex = DUPLEX_HALF;
894 }
895
896 if (!(bmcr & BMCR_ANENABLE)) {
897 return 0;
898 }
899
900 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
901 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
902
903 common = local_adv & remote_adv;
904 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
905
906 if (common & ADVERTISE_1000XFULL) {
907 bp->duplex = DUPLEX_FULL;
908 }
909 else {
910 bp->duplex = DUPLEX_HALF;
911 }
912 }
913
914 return 0;
915 }
916
917 static int
918 bnx2_copper_linkup(struct bnx2 *bp)
919 {
920 u32 bmcr;
921
922 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
923 if (bmcr & BMCR_ANENABLE) {
924 u32 local_adv, remote_adv, common;
925
926 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
927 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
928
929 common = local_adv & (remote_adv >> 2);
930 if (common & ADVERTISE_1000FULL) {
931 bp->line_speed = SPEED_1000;
932 bp->duplex = DUPLEX_FULL;
933 }
934 else if (common & ADVERTISE_1000HALF) {
935 bp->line_speed = SPEED_1000;
936 bp->duplex = DUPLEX_HALF;
937 }
938 else {
939 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
940 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
941
942 common = local_adv & remote_adv;
943 if (common & ADVERTISE_100FULL) {
944 bp->line_speed = SPEED_100;
945 bp->duplex = DUPLEX_FULL;
946 }
947 else if (common & ADVERTISE_100HALF) {
948 bp->line_speed = SPEED_100;
949 bp->duplex = DUPLEX_HALF;
950 }
951 else if (common & ADVERTISE_10FULL) {
952 bp->line_speed = SPEED_10;
953 bp->duplex = DUPLEX_FULL;
954 }
955 else if (common & ADVERTISE_10HALF) {
956 bp->line_speed = SPEED_10;
957 bp->duplex = DUPLEX_HALF;
958 }
959 else {
960 bp->line_speed = 0;
961 bp->link_up = 0;
962 }
963 }
964 }
965 else {
966 if (bmcr & BMCR_SPEED100) {
967 bp->line_speed = SPEED_100;
968 }
969 else {
970 bp->line_speed = SPEED_10;
971 }
972 if (bmcr & BMCR_FULLDPLX) {
973 bp->duplex = DUPLEX_FULL;
974 }
975 else {
976 bp->duplex = DUPLEX_HALF;
977 }
978 }
979
980 return 0;
981 }
982
983 static int
984 bnx2_set_mac_link(struct bnx2 *bp)
985 {
986 u32 val;
987
988 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
989 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
990 (bp->duplex == DUPLEX_HALF)) {
991 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
992 }
993
994 /* Configure the EMAC mode register. */
995 val = REG_RD(bp, BNX2_EMAC_MODE);
996
997 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
998 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
999 BNX2_EMAC_MODE_25G_MODE);
1000
1001 if (bp->link_up) {
1002 switch (bp->line_speed) {
1003 case SPEED_10:
1004 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1005 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1006 break;
1007 }
1008 /* fall through */
1009 case SPEED_100:
1010 val |= BNX2_EMAC_MODE_PORT_MII;
1011 break;
1012 case SPEED_2500:
1013 val |= BNX2_EMAC_MODE_25G_MODE;
1014 /* fall through */
1015 case SPEED_1000:
1016 val |= BNX2_EMAC_MODE_PORT_GMII;
1017 break;
1018 }
1019 }
1020 else {
1021 val |= BNX2_EMAC_MODE_PORT_GMII;
1022 }
1023
1024 /* Set the MAC to operate in the appropriate duplex mode. */
1025 if (bp->duplex == DUPLEX_HALF)
1026 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1027 REG_WR(bp, BNX2_EMAC_MODE, val);
1028
1029 /* Enable/disable rx PAUSE. */
1030 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1031
1032 if (bp->flow_ctrl & FLOW_CTRL_RX)
1033 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1034 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1035
1036 /* Enable/disable tx PAUSE. */
1037 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1038 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1039
1040 if (bp->flow_ctrl & FLOW_CTRL_TX)
1041 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1042 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1043
1044 /* Acknowledge the interrupt. */
1045 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1046
1047 return 0;
1048 }
1049
1050 static void
1051 bnx2_enable_bmsr1(struct bnx2 *bp)
1052 {
1053 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1054 (CHIP_NUM(bp) == CHIP_NUM_5709))
1055 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056 MII_BNX2_BLK_ADDR_GP_STATUS);
1057 }
1058
1059 static void
1060 bnx2_disable_bmsr1(struct bnx2 *bp)
1061 {
1062 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1063 (CHIP_NUM(bp) == CHIP_NUM_5709))
1064 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1065 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1066 }
1067
1068 static int
1069 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1070 {
1071 u32 up1;
1072 int ret = 1;
1073
1074 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1075 return 0;
1076
1077 if (bp->autoneg & AUTONEG_SPEED)
1078 bp->advertising |= ADVERTISED_2500baseX_Full;
1079
1080 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1081 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1082
1083 bnx2_read_phy(bp, bp->mii_up1, &up1);
1084 if (!(up1 & BCM5708S_UP1_2G5)) {
1085 up1 |= BCM5708S_UP1_2G5;
1086 bnx2_write_phy(bp, bp->mii_up1, up1);
1087 ret = 0;
1088 }
1089
1090 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093
1094 return ret;
1095 }
1096
1097 static int
1098 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1099 {
1100 u32 up1;
1101 int ret = 0;
1102
1103 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1104 return 0;
1105
1106 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1107 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1108
1109 bnx2_read_phy(bp, bp->mii_up1, &up1);
1110 if (up1 & BCM5708S_UP1_2G5) {
1111 up1 &= ~BCM5708S_UP1_2G5;
1112 bnx2_write_phy(bp, bp->mii_up1, up1);
1113 ret = 1;
1114 }
1115
1116 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1117 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1118 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1119
1120 return ret;
1121 }
1122
1123 static void
1124 bnx2_enable_forced_2g5(struct bnx2 *bp)
1125 {
1126 u32 bmcr;
1127
1128 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1129 return;
1130
1131 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1132 u32 val;
1133
1134 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1135 MII_BNX2_BLK_ADDR_SERDES_DIG);
1136 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1137 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1138 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1139 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1146 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147 bmcr |= BCM5708S_BMCR_FORCE_2500;
1148 }
1149
1150 if (bp->autoneg & AUTONEG_SPEED) {
1151 bmcr &= ~BMCR_ANENABLE;
1152 if (bp->req_duplex == DUPLEX_FULL)
1153 bmcr |= BMCR_FULLDPLX;
1154 }
1155 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1156 }
1157
1158 static void
1159 bnx2_disable_forced_2g5(struct bnx2 *bp)
1160 {
1161 u32 bmcr;
1162
1163 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1164 return;
1165
1166 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1167 u32 val;
1168
1169 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1170 MII_BNX2_BLK_ADDR_SERDES_DIG);
1171 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1172 val &= ~MII_BNX2_SD_MISC1_FORCE;
1173 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1174
1175 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1176 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1177 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1178
1179 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1180 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1181 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1182 }
1183
1184 if (bp->autoneg & AUTONEG_SPEED)
1185 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1186 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1187 }
1188
1189 static int
1190 bnx2_set_link(struct bnx2 *bp)
1191 {
1192 u32 bmsr;
1193 u8 link_up;
1194
1195 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1196 bp->link_up = 1;
1197 return 0;
1198 }
1199
1200 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1201 return 0;
1202
1203 link_up = bp->link_up;
1204
1205 bnx2_enable_bmsr1(bp);
1206 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1207 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1208 bnx2_disable_bmsr1(bp);
1209
1210 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1211 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1212 u32 val;
1213
1214 val = REG_RD(bp, BNX2_EMAC_STATUS);
1215 if (val & BNX2_EMAC_STATUS_LINK)
1216 bmsr |= BMSR_LSTATUS;
1217 else
1218 bmsr &= ~BMSR_LSTATUS;
1219 }
1220
1221 if (bmsr & BMSR_LSTATUS) {
1222 bp->link_up = 1;
1223
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1226 bnx2_5706s_linkup(bp);
1227 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1228 bnx2_5708s_linkup(bp);
1229 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1230 bnx2_5709s_linkup(bp);
1231 }
1232 else {
1233 bnx2_copper_linkup(bp);
1234 }
1235 bnx2_resolve_flow_ctrl(bp);
1236 }
1237 else {
1238 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1239 (bp->autoneg & AUTONEG_SPEED))
1240 bnx2_disable_forced_2g5(bp);
1241
1242 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1243 bp->link_up = 0;
1244 }
1245
1246 if (bp->link_up != link_up) {
1247 bnx2_report_link(bp);
1248 }
1249
1250 bnx2_set_mac_link(bp);
1251
1252 return 0;
1253 }
1254
1255 static int
1256 bnx2_reset_phy(struct bnx2 *bp)
1257 {
1258 int i;
1259 u32 reg;
1260
1261 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1262
1263 #define PHY_RESET_MAX_WAIT 100
1264 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1265 udelay(10);
1266
1267 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1268 if (!(reg & BMCR_RESET)) {
1269 udelay(20);
1270 break;
1271 }
1272 }
1273 if (i == PHY_RESET_MAX_WAIT) {
1274 return -EBUSY;
1275 }
1276 return 0;
1277 }
1278
1279 static u32
1280 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1281 {
1282 u32 adv = 0;
1283
1284 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1285 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1286
1287 if (bp->phy_flags & PHY_SERDES_FLAG) {
1288 adv = ADVERTISE_1000XPAUSE;
1289 }
1290 else {
1291 adv = ADVERTISE_PAUSE_CAP;
1292 }
1293 }
1294 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1295 if (bp->phy_flags & PHY_SERDES_FLAG) {
1296 adv = ADVERTISE_1000XPSE_ASYM;
1297 }
1298 else {
1299 adv = ADVERTISE_PAUSE_ASYM;
1300 }
1301 }
1302 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1303 if (bp->phy_flags & PHY_SERDES_FLAG) {
1304 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1305 }
1306 else {
1307 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1308 }
1309 }
1310 return adv;
1311 }
1312
1313 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1314
1315 static int
1316 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1317 {
1318 u32 speed_arg = 0, pause_adv;
1319
1320 pause_adv = bnx2_phy_get_pause_adv(bp);
1321
1322 if (bp->autoneg & AUTONEG_SPEED) {
1323 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1324 if (bp->advertising & ADVERTISED_10baseT_Half)
1325 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1326 if (bp->advertising & ADVERTISED_10baseT_Full)
1327 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1328 if (bp->advertising & ADVERTISED_100baseT_Half)
1329 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1330 if (bp->advertising & ADVERTISED_100baseT_Full)
1331 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1332 if (bp->advertising & ADVERTISED_1000baseT_Full)
1333 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1334 if (bp->advertising & ADVERTISED_2500baseX_Full)
1335 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1336 } else {
1337 if (bp->req_line_speed == SPEED_2500)
1338 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1339 else if (bp->req_line_speed == SPEED_1000)
1340 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1341 else if (bp->req_line_speed == SPEED_100) {
1342 if (bp->req_duplex == DUPLEX_FULL)
1343 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1344 else
1345 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1346 } else if (bp->req_line_speed == SPEED_10) {
1347 if (bp->req_duplex == DUPLEX_FULL)
1348 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1349 else
1350 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1351 }
1352 }
1353
1354 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1355 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1356 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1357 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1358
1359 if (port == PORT_TP)
1360 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1361 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1362
1363 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1364
1365 spin_unlock_bh(&bp->phy_lock);
1366 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1367 spin_lock_bh(&bp->phy_lock);
1368
1369 return 0;
1370 }
1371
1372 static int
1373 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1374 {
1375 u32 adv, bmcr;
1376 u32 new_adv = 0;
1377
1378 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1379 return (bnx2_setup_remote_phy(bp, port));
1380
1381 if (!(bp->autoneg & AUTONEG_SPEED)) {
1382 u32 new_bmcr;
1383 int force_link_down = 0;
1384
1385 if (bp->req_line_speed == SPEED_2500) {
1386 if (!bnx2_test_and_enable_2g5(bp))
1387 force_link_down = 1;
1388 } else if (bp->req_line_speed == SPEED_1000) {
1389 if (bnx2_test_and_disable_2g5(bp))
1390 force_link_down = 1;
1391 }
1392 bnx2_read_phy(bp, bp->mii_adv, &adv);
1393 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1394
1395 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1396 new_bmcr = bmcr & ~BMCR_ANENABLE;
1397 new_bmcr |= BMCR_SPEED1000;
1398
1399 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1400 if (bp->req_line_speed == SPEED_2500)
1401 bnx2_enable_forced_2g5(bp);
1402 else if (bp->req_line_speed == SPEED_1000) {
1403 bnx2_disable_forced_2g5(bp);
1404 new_bmcr &= ~0x2000;
1405 }
1406
1407 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1408 if (bp->req_line_speed == SPEED_2500)
1409 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1410 else
1411 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1412 }
1413
1414 if (bp->req_duplex == DUPLEX_FULL) {
1415 adv |= ADVERTISE_1000XFULL;
1416 new_bmcr |= BMCR_FULLDPLX;
1417 }
1418 else {
1419 adv |= ADVERTISE_1000XHALF;
1420 new_bmcr &= ~BMCR_FULLDPLX;
1421 }
1422 if ((new_bmcr != bmcr) || (force_link_down)) {
1423 /* Force a link down visible on the other side */
1424 if (bp->link_up) {
1425 bnx2_write_phy(bp, bp->mii_adv, adv &
1426 ~(ADVERTISE_1000XFULL |
1427 ADVERTISE_1000XHALF));
1428 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1429 BMCR_ANRESTART | BMCR_ANENABLE);
1430
1431 bp->link_up = 0;
1432 netif_carrier_off(bp->dev);
1433 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1434 bnx2_report_link(bp);
1435 }
1436 bnx2_write_phy(bp, bp->mii_adv, adv);
1437 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1438 } else {
1439 bnx2_resolve_flow_ctrl(bp);
1440 bnx2_set_mac_link(bp);
1441 }
1442 return 0;
1443 }
1444
1445 bnx2_test_and_enable_2g5(bp);
1446
1447 if (bp->advertising & ADVERTISED_1000baseT_Full)
1448 new_adv |= ADVERTISE_1000XFULL;
1449
1450 new_adv |= bnx2_phy_get_pause_adv(bp);
1451
1452 bnx2_read_phy(bp, bp->mii_adv, &adv);
1453 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1454
1455 bp->serdes_an_pending = 0;
1456 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1457 /* Force a link down visible on the other side */
1458 if (bp->link_up) {
1459 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1460 spin_unlock_bh(&bp->phy_lock);
1461 msleep(20);
1462 spin_lock_bh(&bp->phy_lock);
1463 }
1464
1465 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1466 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1467 BMCR_ANENABLE);
1468 /* Speed up link-up time when the link partner
1469 * does not autonegotiate which is very common
1470 * in blade servers. Some blade servers use
1471 * IPMI for kerboard input and it's important
1472 * to minimize link disruptions. Autoneg. involves
1473 * exchanging base pages plus 3 next pages and
1474 * normally completes in about 120 msec.
1475 */
1476 bp->current_interval = SERDES_AN_TIMEOUT;
1477 bp->serdes_an_pending = 1;
1478 mod_timer(&bp->timer, jiffies + bp->current_interval);
1479 } else {
1480 bnx2_resolve_flow_ctrl(bp);
1481 bnx2_set_mac_link(bp);
1482 }
1483
1484 return 0;
1485 }
1486
1487 #define ETHTOOL_ALL_FIBRE_SPEED \
1488 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1489 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1490 (ADVERTISED_1000baseT_Full)
1491
1492 #define ETHTOOL_ALL_COPPER_SPEED \
1493 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1494 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1495 ADVERTISED_1000baseT_Full)
1496
1497 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1498 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1499
1500 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1501
1502 static void
1503 bnx2_set_default_remote_link(struct bnx2 *bp)
1504 {
1505 u32 link;
1506
1507 if (bp->phy_port == PORT_TP)
1508 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1509 else
1510 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1511
1512 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1513 bp->req_line_speed = 0;
1514 bp->autoneg |= AUTONEG_SPEED;
1515 bp->advertising = ADVERTISED_Autoneg;
1516 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1517 bp->advertising |= ADVERTISED_10baseT_Half;
1518 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1519 bp->advertising |= ADVERTISED_10baseT_Full;
1520 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1521 bp->advertising |= ADVERTISED_100baseT_Half;
1522 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1523 bp->advertising |= ADVERTISED_100baseT_Full;
1524 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1525 bp->advertising |= ADVERTISED_1000baseT_Full;
1526 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1527 bp->advertising |= ADVERTISED_2500baseX_Full;
1528 } else {
1529 bp->autoneg = 0;
1530 bp->advertising = 0;
1531 bp->req_duplex = DUPLEX_FULL;
1532 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1533 bp->req_line_speed = SPEED_10;
1534 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1535 bp->req_duplex = DUPLEX_HALF;
1536 }
1537 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1538 bp->req_line_speed = SPEED_100;
1539 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1540 bp->req_duplex = DUPLEX_HALF;
1541 }
1542 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1543 bp->req_line_speed = SPEED_1000;
1544 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1545 bp->req_line_speed = SPEED_2500;
1546 }
1547 }
1548
1549 static void
1550 bnx2_set_default_link(struct bnx2 *bp)
1551 {
1552 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1553 return bnx2_set_default_remote_link(bp);
1554
1555 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1556 bp->req_line_speed = 0;
1557 if (bp->phy_flags & PHY_SERDES_FLAG) {
1558 u32 reg;
1559
1560 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1561
1562 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1563 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1564 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1565 bp->autoneg = 0;
1566 bp->req_line_speed = bp->line_speed = SPEED_1000;
1567 bp->req_duplex = DUPLEX_FULL;
1568 }
1569 } else
1570 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1571 }
1572
1573 static void
1574 bnx2_send_heart_beat(struct bnx2 *bp)
1575 {
1576 u32 msg;
1577 u32 addr;
1578
1579 spin_lock(&bp->indirect_lock);
1580 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1581 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1582 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1583 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1584 spin_unlock(&bp->indirect_lock);
1585 }
1586
1587 static void
1588 bnx2_remote_phy_event(struct bnx2 *bp)
1589 {
1590 u32 msg;
1591 u8 link_up = bp->link_up;
1592 u8 old_port;
1593
1594 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1595
1596 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1597 bnx2_send_heart_beat(bp);
1598
1599 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1600
1601 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1602 bp->link_up = 0;
1603 else {
1604 u32 speed;
1605
1606 bp->link_up = 1;
1607 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1608 bp->duplex = DUPLEX_FULL;
1609 switch (speed) {
1610 case BNX2_LINK_STATUS_10HALF:
1611 bp->duplex = DUPLEX_HALF;
1612 case BNX2_LINK_STATUS_10FULL:
1613 bp->line_speed = SPEED_10;
1614 break;
1615 case BNX2_LINK_STATUS_100HALF:
1616 bp->duplex = DUPLEX_HALF;
1617 case BNX2_LINK_STATUS_100BASE_T4:
1618 case BNX2_LINK_STATUS_100FULL:
1619 bp->line_speed = SPEED_100;
1620 break;
1621 case BNX2_LINK_STATUS_1000HALF:
1622 bp->duplex = DUPLEX_HALF;
1623 case BNX2_LINK_STATUS_1000FULL:
1624 bp->line_speed = SPEED_1000;
1625 break;
1626 case BNX2_LINK_STATUS_2500HALF:
1627 bp->duplex = DUPLEX_HALF;
1628 case BNX2_LINK_STATUS_2500FULL:
1629 bp->line_speed = SPEED_2500;
1630 break;
1631 default:
1632 bp->line_speed = 0;
1633 break;
1634 }
1635
1636 spin_lock(&bp->phy_lock);
1637 bp->flow_ctrl = 0;
1638 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1639 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1640 if (bp->duplex == DUPLEX_FULL)
1641 bp->flow_ctrl = bp->req_flow_ctrl;
1642 } else {
1643 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1644 bp->flow_ctrl |= FLOW_CTRL_TX;
1645 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1646 bp->flow_ctrl |= FLOW_CTRL_RX;
1647 }
1648
1649 old_port = bp->phy_port;
1650 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1651 bp->phy_port = PORT_FIBRE;
1652 else
1653 bp->phy_port = PORT_TP;
1654
1655 if (old_port != bp->phy_port)
1656 bnx2_set_default_link(bp);
1657
1658 spin_unlock(&bp->phy_lock);
1659 }
1660 if (bp->link_up != link_up)
1661 bnx2_report_link(bp);
1662
1663 bnx2_set_mac_link(bp);
1664 }
1665
1666 static int
1667 bnx2_set_remote_link(struct bnx2 *bp)
1668 {
1669 u32 evt_code;
1670
1671 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1672 switch (evt_code) {
1673 case BNX2_FW_EVT_CODE_LINK_EVENT:
1674 bnx2_remote_phy_event(bp);
1675 break;
1676 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1677 default:
1678 bnx2_send_heart_beat(bp);
1679 break;
1680 }
1681 return 0;
1682 }
1683
1684 static int
1685 bnx2_setup_copper_phy(struct bnx2 *bp)
1686 {
1687 u32 bmcr;
1688 u32 new_bmcr;
1689
1690 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1691
1692 if (bp->autoneg & AUTONEG_SPEED) {
1693 u32 adv_reg, adv1000_reg;
1694 u32 new_adv_reg = 0;
1695 u32 new_adv1000_reg = 0;
1696
1697 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1698 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1699 ADVERTISE_PAUSE_ASYM);
1700
1701 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1702 adv1000_reg &= PHY_ALL_1000_SPEED;
1703
1704 if (bp->advertising & ADVERTISED_10baseT_Half)
1705 new_adv_reg |= ADVERTISE_10HALF;
1706 if (bp->advertising & ADVERTISED_10baseT_Full)
1707 new_adv_reg |= ADVERTISE_10FULL;
1708 if (bp->advertising & ADVERTISED_100baseT_Half)
1709 new_adv_reg |= ADVERTISE_100HALF;
1710 if (bp->advertising & ADVERTISED_100baseT_Full)
1711 new_adv_reg |= ADVERTISE_100FULL;
1712 if (bp->advertising & ADVERTISED_1000baseT_Full)
1713 new_adv1000_reg |= ADVERTISE_1000FULL;
1714
1715 new_adv_reg |= ADVERTISE_CSMA;
1716
1717 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1718
1719 if ((adv1000_reg != new_adv1000_reg) ||
1720 (adv_reg != new_adv_reg) ||
1721 ((bmcr & BMCR_ANENABLE) == 0)) {
1722
1723 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1724 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1725 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1726 BMCR_ANENABLE);
1727 }
1728 else if (bp->link_up) {
1729 /* Flow ctrl may have changed from auto to forced */
1730 /* or vice-versa. */
1731
1732 bnx2_resolve_flow_ctrl(bp);
1733 bnx2_set_mac_link(bp);
1734 }
1735 return 0;
1736 }
1737
1738 new_bmcr = 0;
1739 if (bp->req_line_speed == SPEED_100) {
1740 new_bmcr |= BMCR_SPEED100;
1741 }
1742 if (bp->req_duplex == DUPLEX_FULL) {
1743 new_bmcr |= BMCR_FULLDPLX;
1744 }
1745 if (new_bmcr != bmcr) {
1746 u32 bmsr;
1747
1748 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1749 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1750
1751 if (bmsr & BMSR_LSTATUS) {
1752 /* Force link down */
1753 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1754 spin_unlock_bh(&bp->phy_lock);
1755 msleep(50);
1756 spin_lock_bh(&bp->phy_lock);
1757
1758 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1759 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1760 }
1761
1762 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1763
1764 /* Normally, the new speed is setup after the link has
1765 * gone down and up again. In some cases, link will not go
1766 * down so we need to set up the new speed here.
1767 */
1768 if (bmsr & BMSR_LSTATUS) {
1769 bp->line_speed = bp->req_line_speed;
1770 bp->duplex = bp->req_duplex;
1771 bnx2_resolve_flow_ctrl(bp);
1772 bnx2_set_mac_link(bp);
1773 }
1774 } else {
1775 bnx2_resolve_flow_ctrl(bp);
1776 bnx2_set_mac_link(bp);
1777 }
1778 return 0;
1779 }
1780
1781 static int
1782 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1783 {
1784 if (bp->loopback == MAC_LOOPBACK)
1785 return 0;
1786
1787 if (bp->phy_flags & PHY_SERDES_FLAG) {
1788 return (bnx2_setup_serdes_phy(bp, port));
1789 }
1790 else {
1791 return (bnx2_setup_copper_phy(bp));
1792 }
1793 }
1794
1795 static int
1796 bnx2_init_5709s_phy(struct bnx2 *bp)
1797 {
1798 u32 val;
1799
1800 bp->mii_bmcr = MII_BMCR + 0x10;
1801 bp->mii_bmsr = MII_BMSR + 0x10;
1802 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1803 bp->mii_adv = MII_ADVERTISE + 0x10;
1804 bp->mii_lpa = MII_LPA + 0x10;
1805 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1806
1807 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1808 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1809
1810 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1811 bnx2_reset_phy(bp);
1812
1813 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1814
1815 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1816 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1817 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1818 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1819
1820 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1821 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1822 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1823 val |= BCM5708S_UP1_2G5;
1824 else
1825 val &= ~BCM5708S_UP1_2G5;
1826 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1827
1828 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1829 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1830 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1831 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1832
1833 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1834
1835 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1836 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1837 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1838
1839 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1840
1841 return 0;
1842 }
1843
1844 static int
1845 bnx2_init_5708s_phy(struct bnx2 *bp)
1846 {
1847 u32 val;
1848
1849 bnx2_reset_phy(bp);
1850
1851 bp->mii_up1 = BCM5708S_UP1;
1852
1853 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1854 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1855 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1856
1857 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1858 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1859 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1860
1861 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1862 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1863 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1864
1865 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1866 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1867 val |= BCM5708S_UP1_2G5;
1868 bnx2_write_phy(bp, BCM5708S_UP1, val);
1869 }
1870
1871 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1872 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1873 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1874 /* increase tx signal amplitude */
1875 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1876 BCM5708S_BLK_ADDR_TX_MISC);
1877 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1878 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1879 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1880 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1881 }
1882
1883 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1884 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1885
1886 if (val) {
1887 u32 is_backplane;
1888
1889 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1890 BNX2_SHARED_HW_CFG_CONFIG);
1891 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1892 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1893 BCM5708S_BLK_ADDR_TX_MISC);
1894 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1895 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1896 BCM5708S_BLK_ADDR_DIG);
1897 }
1898 }
1899 return 0;
1900 }
1901
1902 static int
1903 bnx2_init_5706s_phy(struct bnx2 *bp)
1904 {
1905 bnx2_reset_phy(bp);
1906
1907 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1908
1909 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1910 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1911
1912 if (bp->dev->mtu > 1500) {
1913 u32 val;
1914
1915 /* Set extended packet length bit */
1916 bnx2_write_phy(bp, 0x18, 0x7);
1917 bnx2_read_phy(bp, 0x18, &val);
1918 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1919
1920 bnx2_write_phy(bp, 0x1c, 0x6c00);
1921 bnx2_read_phy(bp, 0x1c, &val);
1922 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1923 }
1924 else {
1925 u32 val;
1926
1927 bnx2_write_phy(bp, 0x18, 0x7);
1928 bnx2_read_phy(bp, 0x18, &val);
1929 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1930
1931 bnx2_write_phy(bp, 0x1c, 0x6c00);
1932 bnx2_read_phy(bp, 0x1c, &val);
1933 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1934 }
1935
1936 return 0;
1937 }
1938
1939 static int
1940 bnx2_init_copper_phy(struct bnx2 *bp)
1941 {
1942 u32 val;
1943
1944 bnx2_reset_phy(bp);
1945
1946 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1947 bnx2_write_phy(bp, 0x18, 0x0c00);
1948 bnx2_write_phy(bp, 0x17, 0x000a);
1949 bnx2_write_phy(bp, 0x15, 0x310b);
1950 bnx2_write_phy(bp, 0x17, 0x201f);
1951 bnx2_write_phy(bp, 0x15, 0x9506);
1952 bnx2_write_phy(bp, 0x17, 0x401f);
1953 bnx2_write_phy(bp, 0x15, 0x14e2);
1954 bnx2_write_phy(bp, 0x18, 0x0400);
1955 }
1956
1957 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1958 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1959 MII_BNX2_DSP_EXPAND_REG | 0x8);
1960 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1961 val &= ~(1 << 8);
1962 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1963 }
1964
1965 if (bp->dev->mtu > 1500) {
1966 /* Set extended packet length bit */
1967 bnx2_write_phy(bp, 0x18, 0x7);
1968 bnx2_read_phy(bp, 0x18, &val);
1969 bnx2_write_phy(bp, 0x18, val | 0x4000);
1970
1971 bnx2_read_phy(bp, 0x10, &val);
1972 bnx2_write_phy(bp, 0x10, val | 0x1);
1973 }
1974 else {
1975 bnx2_write_phy(bp, 0x18, 0x7);
1976 bnx2_read_phy(bp, 0x18, &val);
1977 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1978
1979 bnx2_read_phy(bp, 0x10, &val);
1980 bnx2_write_phy(bp, 0x10, val & ~0x1);
1981 }
1982
1983 /* ethernet@wirespeed */
1984 bnx2_write_phy(bp, 0x18, 0x7007);
1985 bnx2_read_phy(bp, 0x18, &val);
1986 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1987 return 0;
1988 }
1989
1990
1991 static int
1992 bnx2_init_phy(struct bnx2 *bp)
1993 {
1994 u32 val;
1995 int rc = 0;
1996
1997 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1998 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1999
2000 bp->mii_bmcr = MII_BMCR;
2001 bp->mii_bmsr = MII_BMSR;
2002 bp->mii_bmsr1 = MII_BMSR;
2003 bp->mii_adv = MII_ADVERTISE;
2004 bp->mii_lpa = MII_LPA;
2005
2006 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2007
2008 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
2009 goto setup_phy;
2010
2011 bnx2_read_phy(bp, MII_PHYSID1, &val);
2012 bp->phy_id = val << 16;
2013 bnx2_read_phy(bp, MII_PHYSID2, &val);
2014 bp->phy_id |= val & 0xffff;
2015
2016 if (bp->phy_flags & PHY_SERDES_FLAG) {
2017 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2018 rc = bnx2_init_5706s_phy(bp);
2019 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2020 rc = bnx2_init_5708s_phy(bp);
2021 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2022 rc = bnx2_init_5709s_phy(bp);
2023 }
2024 else {
2025 rc = bnx2_init_copper_phy(bp);
2026 }
2027
2028 setup_phy:
2029 if (!rc)
2030 rc = bnx2_setup_phy(bp, bp->phy_port);
2031
2032 return rc;
2033 }
2034
2035 static int
2036 bnx2_set_mac_loopback(struct bnx2 *bp)
2037 {
2038 u32 mac_mode;
2039
2040 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2041 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2042 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2043 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2044 bp->link_up = 1;
2045 return 0;
2046 }
2047
2048 static int bnx2_test_link(struct bnx2 *);
2049
2050 static int
2051 bnx2_set_phy_loopback(struct bnx2 *bp)
2052 {
2053 u32 mac_mode;
2054 int rc, i;
2055
2056 spin_lock_bh(&bp->phy_lock);
2057 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2058 BMCR_SPEED1000);
2059 spin_unlock_bh(&bp->phy_lock);
2060 if (rc)
2061 return rc;
2062
2063 for (i = 0; i < 10; i++) {
2064 if (bnx2_test_link(bp) == 0)
2065 break;
2066 msleep(100);
2067 }
2068
2069 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2070 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2071 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2072 BNX2_EMAC_MODE_25G_MODE);
2073
2074 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2075 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2076 bp->link_up = 1;
2077 return 0;
2078 }
2079
2080 static int
2081 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2082 {
2083 int i;
2084 u32 val;
2085
2086 bp->fw_wr_seq++;
2087 msg_data |= bp->fw_wr_seq;
2088
2089 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2090
2091 /* wait for an acknowledgement. */
2092 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2093 msleep(10);
2094
2095 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2096
2097 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2098 break;
2099 }
2100 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2101 return 0;
2102
2103 /* If we timed out, inform the firmware that this is the case. */
2104 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2105 if (!silent)
2106 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2107 "%x\n", msg_data);
2108
2109 msg_data &= ~BNX2_DRV_MSG_CODE;
2110 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2111
2112 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2113
2114 return -EBUSY;
2115 }
2116
2117 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2118 return -EIO;
2119
2120 return 0;
2121 }
2122
2123 static int
2124 bnx2_init_5709_context(struct bnx2 *bp)
2125 {
2126 int i, ret = 0;
2127 u32 val;
2128
2129 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2130 val |= (BCM_PAGE_BITS - 8) << 16;
2131 REG_WR(bp, BNX2_CTX_COMMAND, val);
2132 for (i = 0; i < 10; i++) {
2133 val = REG_RD(bp, BNX2_CTX_COMMAND);
2134 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2135 break;
2136 udelay(2);
2137 }
2138 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2139 return -EBUSY;
2140
2141 for (i = 0; i < bp->ctx_pages; i++) {
2142 int j;
2143
2144 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2145 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2146 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2147 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2148 (u64) bp->ctx_blk_mapping[i] >> 32);
2149 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2150 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2151 for (j = 0; j < 10; j++) {
2152
2153 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2154 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2155 break;
2156 udelay(5);
2157 }
2158 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2159 ret = -EBUSY;
2160 break;
2161 }
2162 }
2163 return ret;
2164 }
2165
2166 static void
2167 bnx2_init_context(struct bnx2 *bp)
2168 {
2169 u32 vcid;
2170
2171 vcid = 96;
2172 while (vcid) {
2173 u32 vcid_addr, pcid_addr, offset;
2174 int i;
2175
2176 vcid--;
2177
2178 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2179 u32 new_vcid;
2180
2181 vcid_addr = GET_PCID_ADDR(vcid);
2182 if (vcid & 0x8) {
2183 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2184 }
2185 else {
2186 new_vcid = vcid;
2187 }
2188 pcid_addr = GET_PCID_ADDR(new_vcid);
2189 }
2190 else {
2191 vcid_addr = GET_CID_ADDR(vcid);
2192 pcid_addr = vcid_addr;
2193 }
2194
2195 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2196 vcid_addr += (i << PHY_CTX_SHIFT);
2197 pcid_addr += (i << PHY_CTX_SHIFT);
2198
2199 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2200 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2201
2202 /* Zero out the context. */
2203 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2204 CTX_WR(bp, vcid_addr, offset, 0);
2205 }
2206 }
2207 }
2208
2209 static int
2210 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2211 {
2212 u16 *good_mbuf;
2213 u32 good_mbuf_cnt;
2214 u32 val;
2215
2216 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2217 if (good_mbuf == NULL) {
2218 printk(KERN_ERR PFX "Failed to allocate memory in "
2219 "bnx2_alloc_bad_rbuf\n");
2220 return -ENOMEM;
2221 }
2222
2223 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2224 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2225
2226 good_mbuf_cnt = 0;
2227
2228 /* Allocate a bunch of mbufs and save the good ones in an array. */
2229 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2230 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2231 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2232
2233 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2234
2235 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2236
2237 /* The addresses with Bit 9 set are bad memory blocks. */
2238 if (!(val & (1 << 9))) {
2239 good_mbuf[good_mbuf_cnt] = (u16) val;
2240 good_mbuf_cnt++;
2241 }
2242
2243 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2244 }
2245
2246 /* Free the good ones back to the mbuf pool thus discarding
2247 * all the bad ones. */
2248 while (good_mbuf_cnt) {
2249 good_mbuf_cnt--;
2250
2251 val = good_mbuf[good_mbuf_cnt];
2252 val = (val << 9) | val | 1;
2253
2254 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2255 }
2256 kfree(good_mbuf);
2257 return 0;
2258 }
2259
2260 static void
2261 bnx2_set_mac_addr(struct bnx2 *bp)
2262 {
2263 u32 val;
2264 u8 *mac_addr = bp->dev->dev_addr;
2265
2266 val = (mac_addr[0] << 8) | mac_addr[1];
2267
2268 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2269
2270 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2271 (mac_addr[4] << 8) | mac_addr[5];
2272
2273 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2274 }
2275
2276 static inline int
2277 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2278 {
2279 dma_addr_t mapping;
2280 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2281 struct rx_bd *rxbd =
2282 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2283 struct page *page = alloc_page(GFP_ATOMIC);
2284
2285 if (!page)
2286 return -ENOMEM;
2287 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2288 PCI_DMA_FROMDEVICE);
2289 rx_pg->page = page;
2290 pci_unmap_addr_set(rx_pg, mapping, mapping);
2291 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2292 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2293 return 0;
2294 }
2295
2296 static void
2297 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2298 {
2299 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2300 struct page *page = rx_pg->page;
2301
2302 if (!page)
2303 return;
2304
2305 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2306 PCI_DMA_FROMDEVICE);
2307
2308 __free_page(page);
2309 rx_pg->page = NULL;
2310 }
2311
2312 static inline int
2313 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2314 {
2315 struct sk_buff *skb;
2316 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2317 dma_addr_t mapping;
2318 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2319 unsigned long align;
2320
2321 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2322 if (skb == NULL) {
2323 return -ENOMEM;
2324 }
2325
2326 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2327 skb_reserve(skb, BNX2_RX_ALIGN - align);
2328
2329 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2330 PCI_DMA_FROMDEVICE);
2331
2332 rx_buf->skb = skb;
2333 pci_unmap_addr_set(rx_buf, mapping, mapping);
2334
2335 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2336 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2337
2338 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2339
2340 return 0;
2341 }
2342
2343 static int
2344 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2345 {
2346 struct status_block *sblk = bnapi->status_blk;
2347 u32 new_link_state, old_link_state;
2348 int is_set = 1;
2349
2350 new_link_state = sblk->status_attn_bits & event;
2351 old_link_state = sblk->status_attn_bits_ack & event;
2352 if (new_link_state != old_link_state) {
2353 if (new_link_state)
2354 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2355 else
2356 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2357 } else
2358 is_set = 0;
2359
2360 return is_set;
2361 }
2362
2363 static void
2364 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2365 {
2366 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2367 spin_lock(&bp->phy_lock);
2368 bnx2_set_link(bp);
2369 spin_unlock(&bp->phy_lock);
2370 }
2371 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2372 bnx2_set_remote_link(bp);
2373
2374 }
2375
2376 static inline u16
2377 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2378 {
2379 u16 cons;
2380
2381 if (bnapi->int_num == 0)
2382 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2383 else
2384 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2385
2386 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2387 cons++;
2388 return cons;
2389 }
2390
2391 static int
2392 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2393 {
2394 u16 hw_cons, sw_cons, sw_ring_cons;
2395 int tx_pkt = 0;
2396
2397 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2398 sw_cons = bnapi->tx_cons;
2399
2400 while (sw_cons != hw_cons) {
2401 struct sw_bd *tx_buf;
2402 struct sk_buff *skb;
2403 int i, last;
2404
2405 sw_ring_cons = TX_RING_IDX(sw_cons);
2406
2407 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2408 skb = tx_buf->skb;
2409
2410 /* partial BD completions possible with TSO packets */
2411 if (skb_is_gso(skb)) {
2412 u16 last_idx, last_ring_idx;
2413
2414 last_idx = sw_cons +
2415 skb_shinfo(skb)->nr_frags + 1;
2416 last_ring_idx = sw_ring_cons +
2417 skb_shinfo(skb)->nr_frags + 1;
2418 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2419 last_idx++;
2420 }
2421 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2422 break;
2423 }
2424 }
2425
2426 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2427 skb_headlen(skb), PCI_DMA_TODEVICE);
2428
2429 tx_buf->skb = NULL;
2430 last = skb_shinfo(skb)->nr_frags;
2431
2432 for (i = 0; i < last; i++) {
2433 sw_cons = NEXT_TX_BD(sw_cons);
2434
2435 pci_unmap_page(bp->pdev,
2436 pci_unmap_addr(
2437 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2438 mapping),
2439 skb_shinfo(skb)->frags[i].size,
2440 PCI_DMA_TODEVICE);
2441 }
2442
2443 sw_cons = NEXT_TX_BD(sw_cons);
2444
2445 dev_kfree_skb(skb);
2446 tx_pkt++;
2447 if (tx_pkt == budget)
2448 break;
2449
2450 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2451 }
2452
2453 bnapi->hw_tx_cons = hw_cons;
2454 bnapi->tx_cons = sw_cons;
2455 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2456 * before checking for netif_queue_stopped(). Without the
2457 * memory barrier, there is a small possibility that bnx2_start_xmit()
2458 * will miss it and cause the queue to be stopped forever.
2459 */
2460 smp_mb();
2461
2462 if (unlikely(netif_queue_stopped(bp->dev)) &&
2463 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2464 netif_tx_lock(bp->dev);
2465 if ((netif_queue_stopped(bp->dev)) &&
2466 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2467 netif_wake_queue(bp->dev);
2468 netif_tx_unlock(bp->dev);
2469 }
2470 return tx_pkt;
2471 }
2472
2473 static void
2474 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2475 struct sk_buff *skb, int count)
2476 {
2477 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2478 struct rx_bd *cons_bd, *prod_bd;
2479 dma_addr_t mapping;
2480 int i;
2481 u16 hw_prod = bnapi->rx_pg_prod, prod;
2482 u16 cons = bnapi->rx_pg_cons;
2483
2484 for (i = 0; i < count; i++) {
2485 prod = RX_PG_RING_IDX(hw_prod);
2486
2487 prod_rx_pg = &bp->rx_pg_ring[prod];
2488 cons_rx_pg = &bp->rx_pg_ring[cons];
2489 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2490 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2491
2492 if (i == 0 && skb) {
2493 struct page *page;
2494 struct skb_shared_info *shinfo;
2495
2496 shinfo = skb_shinfo(skb);
2497 shinfo->nr_frags--;
2498 page = shinfo->frags[shinfo->nr_frags].page;
2499 shinfo->frags[shinfo->nr_frags].page = NULL;
2500 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2501 PCI_DMA_FROMDEVICE);
2502 cons_rx_pg->page = page;
2503 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2504 dev_kfree_skb(skb);
2505 }
2506 if (prod != cons) {
2507 prod_rx_pg->page = cons_rx_pg->page;
2508 cons_rx_pg->page = NULL;
2509 pci_unmap_addr_set(prod_rx_pg, mapping,
2510 pci_unmap_addr(cons_rx_pg, mapping));
2511
2512 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2513 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2514
2515 }
2516 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2517 hw_prod = NEXT_RX_BD(hw_prod);
2518 }
2519 bnapi->rx_pg_prod = hw_prod;
2520 bnapi->rx_pg_cons = cons;
2521 }
2522
2523 static inline void
2524 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2525 u16 cons, u16 prod)
2526 {
2527 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2528 struct rx_bd *cons_bd, *prod_bd;
2529
2530 cons_rx_buf = &bp->rx_buf_ring[cons];
2531 prod_rx_buf = &bp->rx_buf_ring[prod];
2532
2533 pci_dma_sync_single_for_device(bp->pdev,
2534 pci_unmap_addr(cons_rx_buf, mapping),
2535 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2536
2537 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2538
2539 prod_rx_buf->skb = skb;
2540
2541 if (cons == prod)
2542 return;
2543
2544 pci_unmap_addr_set(prod_rx_buf, mapping,
2545 pci_unmap_addr(cons_rx_buf, mapping));
2546
2547 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2548 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2549 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2550 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2551 }
2552
2553 static int
2554 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2555 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2556 u32 ring_idx)
2557 {
2558 int err;
2559 u16 prod = ring_idx & 0xffff;
2560
2561 err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2562 if (unlikely(err)) {
2563 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2564 if (hdr_len) {
2565 unsigned int raw_len = len + 4;
2566 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2567
2568 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2569 }
2570 return err;
2571 }
2572
2573 skb_reserve(skb, bp->rx_offset);
2574 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2575 PCI_DMA_FROMDEVICE);
2576
2577 if (hdr_len == 0) {
2578 skb_put(skb, len);
2579 return 0;
2580 } else {
2581 unsigned int i, frag_len, frag_size, pages;
2582 struct sw_pg *rx_pg;
2583 u16 pg_cons = bnapi->rx_pg_cons;
2584 u16 pg_prod = bnapi->rx_pg_prod;
2585
2586 frag_size = len + 4 - hdr_len;
2587 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2588 skb_put(skb, hdr_len);
2589
2590 for (i = 0; i < pages; i++) {
2591 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2592 if (unlikely(frag_len <= 4)) {
2593 unsigned int tail = 4 - frag_len;
2594
2595 bnapi->rx_pg_cons = pg_cons;
2596 bnapi->rx_pg_prod = pg_prod;
2597 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2598 pages - i);
2599 skb->len -= tail;
2600 if (i == 0) {
2601 skb->tail -= tail;
2602 } else {
2603 skb_frag_t *frag =
2604 &skb_shinfo(skb)->frags[i - 1];
2605 frag->size -= tail;
2606 skb->data_len -= tail;
2607 skb->truesize -= tail;
2608 }
2609 return 0;
2610 }
2611 rx_pg = &bp->rx_pg_ring[pg_cons];
2612
2613 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2614 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2615
2616 if (i == pages - 1)
2617 frag_len -= 4;
2618
2619 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2620 rx_pg->page = NULL;
2621
2622 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2623 if (unlikely(err)) {
2624 bnapi->rx_pg_cons = pg_cons;
2625 bnapi->rx_pg_prod = pg_prod;
2626 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2627 pages - i);
2628 return err;
2629 }
2630
2631 frag_size -= frag_len;
2632 skb->data_len += frag_len;
2633 skb->truesize += frag_len;
2634 skb->len += frag_len;
2635
2636 pg_prod = NEXT_RX_BD(pg_prod);
2637 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2638 }
2639 bnapi->rx_pg_prod = pg_prod;
2640 bnapi->rx_pg_cons = pg_cons;
2641 }
2642 return 0;
2643 }
2644
2645 static inline u16
2646 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2647 {
2648 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2649
2650 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2651 cons++;
2652 return cons;
2653 }
2654
2655 static int
2656 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2657 {
2658 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2659 struct l2_fhdr *rx_hdr;
2660 int rx_pkt = 0, pg_ring_used = 0;
2661
2662 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2663 sw_cons = bnapi->rx_cons;
2664 sw_prod = bnapi->rx_prod;
2665
2666 /* Memory barrier necessary as speculative reads of the rx
2667 * buffer can be ahead of the index in the status block
2668 */
2669 rmb();
2670 while (sw_cons != hw_cons) {
2671 unsigned int len, hdr_len;
2672 u32 status;
2673 struct sw_bd *rx_buf;
2674 struct sk_buff *skb;
2675 dma_addr_t dma_addr;
2676
2677 sw_ring_cons = RX_RING_IDX(sw_cons);
2678 sw_ring_prod = RX_RING_IDX(sw_prod);
2679
2680 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2681 skb = rx_buf->skb;
2682
2683 rx_buf->skb = NULL;
2684
2685 dma_addr = pci_unmap_addr(rx_buf, mapping);
2686
2687 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2688 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2689
2690 rx_hdr = (struct l2_fhdr *) skb->data;
2691 len = rx_hdr->l2_fhdr_pkt_len;
2692
2693 if ((status = rx_hdr->l2_fhdr_status) &
2694 (L2_FHDR_ERRORS_BAD_CRC |
2695 L2_FHDR_ERRORS_PHY_DECODE |
2696 L2_FHDR_ERRORS_ALIGNMENT |
2697 L2_FHDR_ERRORS_TOO_SHORT |
2698 L2_FHDR_ERRORS_GIANT_FRAME)) {
2699
2700 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2701 sw_ring_prod);
2702 goto next_rx;
2703 }
2704 hdr_len = 0;
2705 if (status & L2_FHDR_STATUS_SPLIT) {
2706 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2707 pg_ring_used = 1;
2708 } else if (len > bp->rx_jumbo_thresh) {
2709 hdr_len = bp->rx_jumbo_thresh;
2710 pg_ring_used = 1;
2711 }
2712
2713 len -= 4;
2714
2715 if (len <= bp->rx_copy_thresh) {
2716 struct sk_buff *new_skb;
2717
2718 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2719 if (new_skb == NULL) {
2720 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2721 sw_ring_prod);
2722 goto next_rx;
2723 }
2724
2725 /* aligned copy */
2726 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2727 new_skb->data, len + 2);
2728 skb_reserve(new_skb, 2);
2729 skb_put(new_skb, len);
2730
2731 bnx2_reuse_rx_skb(bp, bnapi, skb,
2732 sw_ring_cons, sw_ring_prod);
2733
2734 skb = new_skb;
2735 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2736 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2737 goto next_rx;
2738
2739 skb->protocol = eth_type_trans(skb, bp->dev);
2740
2741 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2742 (ntohs(skb->protocol) != 0x8100)) {
2743
2744 dev_kfree_skb(skb);
2745 goto next_rx;
2746
2747 }
2748
2749 skb->ip_summed = CHECKSUM_NONE;
2750 if (bp->rx_csum &&
2751 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2752 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2753
2754 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2755 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2756 skb->ip_summed = CHECKSUM_UNNECESSARY;
2757 }
2758
2759 #ifdef BCM_VLAN
2760 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2761 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2762 rx_hdr->l2_fhdr_vlan_tag);
2763 }
2764 else
2765 #endif
2766 netif_receive_skb(skb);
2767
2768 bp->dev->last_rx = jiffies;
2769 rx_pkt++;
2770
2771 next_rx:
2772 sw_cons = NEXT_RX_BD(sw_cons);
2773 sw_prod = NEXT_RX_BD(sw_prod);
2774
2775 if ((rx_pkt == budget))
2776 break;
2777
2778 /* Refresh hw_cons to see if there is new work */
2779 if (sw_cons == hw_cons) {
2780 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2781 rmb();
2782 }
2783 }
2784 bnapi->rx_cons = sw_cons;
2785 bnapi->rx_prod = sw_prod;
2786
2787 if (pg_ring_used)
2788 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2789 bnapi->rx_pg_prod);
2790
2791 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2792
2793 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2794
2795 mmiowb();
2796
2797 return rx_pkt;
2798
2799 }
2800
2801 /* MSI ISR - The only difference between this and the INTx ISR
2802 * is that the MSI interrupt is always serviced.
2803 */
2804 static irqreturn_t
2805 bnx2_msi(int irq, void *dev_instance)
2806 {
2807 struct net_device *dev = dev_instance;
2808 struct bnx2 *bp = netdev_priv(dev);
2809 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2810
2811 prefetch(bnapi->status_blk);
2812 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2813 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2814 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2815
2816 /* Return here if interrupt is disabled. */
2817 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2818 return IRQ_HANDLED;
2819
2820 netif_rx_schedule(dev, &bnapi->napi);
2821
2822 return IRQ_HANDLED;
2823 }
2824
2825 static irqreturn_t
2826 bnx2_msi_1shot(int irq, void *dev_instance)
2827 {
2828 struct net_device *dev = dev_instance;
2829 struct bnx2 *bp = netdev_priv(dev);
2830 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2831
2832 prefetch(bnapi->status_blk);
2833
2834 /* Return here if interrupt is disabled. */
2835 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2836 return IRQ_HANDLED;
2837
2838 netif_rx_schedule(dev, &bnapi->napi);
2839
2840 return IRQ_HANDLED;
2841 }
2842
2843 static irqreturn_t
2844 bnx2_interrupt(int irq, void *dev_instance)
2845 {
2846 struct net_device *dev = dev_instance;
2847 struct bnx2 *bp = netdev_priv(dev);
2848 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2849 struct status_block *sblk = bnapi->status_blk;
2850
2851 /* When using INTx, it is possible for the interrupt to arrive
2852 * at the CPU before the status block posted prior to the
2853 * interrupt. Reading a register will flush the status block.
2854 * When using MSI, the MSI message will always complete after
2855 * the status block write.
2856 */
2857 if ((sblk->status_idx == bnapi->last_status_idx) &&
2858 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2859 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2860 return IRQ_NONE;
2861
2862 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2863 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2864 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2865
2866 /* Read back to deassert IRQ immediately to avoid too many
2867 * spurious interrupts.
2868 */
2869 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2870
2871 /* Return here if interrupt is shared and is disabled. */
2872 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2873 return IRQ_HANDLED;
2874
2875 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2876 bnapi->last_status_idx = sblk->status_idx;
2877 __netif_rx_schedule(dev, &bnapi->napi);
2878 }
2879
2880 return IRQ_HANDLED;
2881 }
2882
2883 static irqreturn_t
2884 bnx2_tx_msix(int irq, void *dev_instance)
2885 {
2886 struct net_device *dev = dev_instance;
2887 struct bnx2 *bp = netdev_priv(dev);
2888 struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2889
2890 prefetch(bnapi->status_blk_msix);
2891
2892 /* Return here if interrupt is disabled. */
2893 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2894 return IRQ_HANDLED;
2895
2896 netif_rx_schedule(dev, &bnapi->napi);
2897 return IRQ_HANDLED;
2898 }
2899
2900 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2901 STATUS_ATTN_BITS_TIMER_ABORT)
2902
2903 static inline int
2904 bnx2_has_work(struct bnx2_napi *bnapi)
2905 {
2906 struct bnx2 *bp = bnapi->bp;
2907 struct status_block *sblk = bp->status_blk;
2908
2909 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2910 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2911 return 1;
2912
2913 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2914 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2915 return 1;
2916
2917 return 0;
2918 }
2919
2920 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
2921 {
2922 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2923 struct bnx2 *bp = bnapi->bp;
2924 int work_done = 0;
2925 struct status_block_msix *sblk = bnapi->status_blk_msix;
2926
2927 do {
2928 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
2929 if (unlikely(work_done >= budget))
2930 return work_done;
2931
2932 bnapi->last_status_idx = sblk->status_idx;
2933 rmb();
2934 } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
2935
2936 netif_rx_complete(bp->dev, napi);
2937 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
2938 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2939 bnapi->last_status_idx);
2940 return work_done;
2941 }
2942
2943 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2944 int work_done, int budget)
2945 {
2946 struct status_block *sblk = bnapi->status_blk;
2947 u32 status_attn_bits = sblk->status_attn_bits;
2948 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2949
2950 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2951 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2952
2953 bnx2_phy_int(bp, bnapi);
2954
2955 /* This is needed to take care of transient status
2956 * during link changes.
2957 */
2958 REG_WR(bp, BNX2_HC_COMMAND,
2959 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2960 REG_RD(bp, BNX2_HC_COMMAND);
2961 }
2962
2963 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
2964 bnx2_tx_int(bp, bnapi, 0);
2965
2966 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
2967 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
2968
2969 return work_done;
2970 }
2971
2972 static int bnx2_poll(struct napi_struct *napi, int budget)
2973 {
2974 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2975 struct bnx2 *bp = bnapi->bp;
2976 int work_done = 0;
2977 struct status_block *sblk = bnapi->status_blk;
2978
2979 while (1) {
2980 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
2981
2982 if (unlikely(work_done >= budget))
2983 break;
2984
2985 /* bnapi->last_status_idx is used below to tell the hw how
2986 * much work has been processed, so we must read it before
2987 * checking for more work.
2988 */
2989 bnapi->last_status_idx = sblk->status_idx;
2990 rmb();
2991 if (likely(!bnx2_has_work(bnapi))) {
2992 netif_rx_complete(bp->dev, napi);
2993 if (likely(bp->flags & USING_MSI_OR_MSIX_FLAG)) {
2994 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2995 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2996 bnapi->last_status_idx);
2997 break;
2998 }
2999 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3000 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3001 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3002 bnapi->last_status_idx);
3003
3004 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3005 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3006 bnapi->last_status_idx);
3007 break;
3008 }
3009 }
3010
3011 return work_done;
3012 }
3013
3014 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3015 * from set_multicast.
3016 */
3017 static void
3018 bnx2_set_rx_mode(struct net_device *dev)
3019 {
3020 struct bnx2 *bp = netdev_priv(dev);
3021 u32 rx_mode, sort_mode;
3022 int i;
3023
3024 spin_lock_bh(&bp->phy_lock);
3025
3026 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3027 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3028 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3029 #ifdef BCM_VLAN
3030 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
3031 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3032 #else
3033 if (!(bp->flags & ASF_ENABLE_FLAG))
3034 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3035 #endif
3036 if (dev->flags & IFF_PROMISC) {
3037 /* Promiscuous mode. */
3038 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3039 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3040 BNX2_RPM_SORT_USER0_PROM_VLAN;
3041 }
3042 else if (dev->flags & IFF_ALLMULTI) {
3043 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3044 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3045 0xffffffff);
3046 }
3047 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3048 }
3049 else {
3050 /* Accept one or more multicast(s). */
3051 struct dev_mc_list *mclist;
3052 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3053 u32 regidx;
3054 u32 bit;
3055 u32 crc;
3056
3057 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3058
3059 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3060 i++, mclist = mclist->next) {
3061
3062 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3063 bit = crc & 0xff;
3064 regidx = (bit & 0xe0) >> 5;
3065 bit &= 0x1f;
3066 mc_filter[regidx] |= (1 << bit);
3067 }
3068
3069 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3070 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3071 mc_filter[i]);
3072 }
3073
3074 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3075 }
3076
3077 if (rx_mode != bp->rx_mode) {
3078 bp->rx_mode = rx_mode;
3079 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3080 }
3081
3082 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3083 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3084 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3085
3086 spin_unlock_bh(&bp->phy_lock);
3087 }
3088
3089 static void
3090 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3091 u32 rv2p_proc)
3092 {
3093 int i;
3094 u32 val;
3095
3096
3097 for (i = 0; i < rv2p_code_len; i += 8) {
3098 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
3099 rv2p_code++;
3100 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
3101 rv2p_code++;
3102
3103 if (rv2p_proc == RV2P_PROC1) {
3104 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3105 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3106 }
3107 else {
3108 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3109 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3110 }
3111 }
3112
3113 /* Reset the processor, un-stall is done later. */
3114 if (rv2p_proc == RV2P_PROC1) {
3115 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3116 }
3117 else {
3118 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3119 }
3120 }
3121
3122 static int
3123 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3124 {
3125 u32 offset;
3126 u32 val;
3127 int rc;
3128
3129 /* Halt the CPU. */
3130 val = REG_RD_IND(bp, cpu_reg->mode);
3131 val |= cpu_reg->mode_value_halt;
3132 REG_WR_IND(bp, cpu_reg->mode, val);
3133 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3134
3135 /* Load the Text area. */
3136 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3137 if (fw->gz_text) {
3138 int j;
3139
3140 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3141 fw->gz_text_len);
3142 if (rc < 0)
3143 return rc;
3144
3145 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3146 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3147 }
3148 }
3149
3150 /* Load the Data area. */
3151 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3152 if (fw->data) {
3153 int j;
3154
3155 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3156 REG_WR_IND(bp, offset, fw->data[j]);
3157 }
3158 }
3159
3160 /* Load the SBSS area. */
3161 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3162 if (fw->sbss_len) {
3163 int j;
3164
3165 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3166 REG_WR_IND(bp, offset, 0);
3167 }
3168 }
3169
3170 /* Load the BSS area. */
3171 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3172 if (fw->bss_len) {
3173 int j;
3174
3175 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3176 REG_WR_IND(bp, offset, 0);
3177 }
3178 }
3179
3180 /* Load the Read-Only area. */
3181 offset = cpu_reg->spad_base +
3182 (fw->rodata_addr - cpu_reg->mips_view_base);
3183 if (fw->rodata) {
3184 int j;
3185
3186 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3187 REG_WR_IND(bp, offset, fw->rodata[j]);
3188 }
3189 }
3190
3191 /* Clear the pre-fetch instruction. */
3192 REG_WR_IND(bp, cpu_reg->inst, 0);
3193 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3194
3195 /* Start the CPU. */
3196 val = REG_RD_IND(bp, cpu_reg->mode);
3197 val &= ~cpu_reg->mode_value_halt;
3198 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3199 REG_WR_IND(bp, cpu_reg->mode, val);
3200
3201 return 0;
3202 }
3203
3204 static int
3205 bnx2_init_cpus(struct bnx2 *bp)
3206 {
3207 struct cpu_reg cpu_reg;
3208 struct fw_info *fw;
3209 int rc, rv2p_len;
3210 void *text, *rv2p;
3211
3212 /* Initialize the RV2P processor. */
3213 text = vmalloc(FW_BUF_SIZE);
3214 if (!text)
3215 return -ENOMEM;
3216 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3217 rv2p = bnx2_xi_rv2p_proc1;
3218 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3219 } else {
3220 rv2p = bnx2_rv2p_proc1;
3221 rv2p_len = sizeof(bnx2_rv2p_proc1);
3222 }
3223 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3224 if (rc < 0)
3225 goto init_cpu_err;
3226
3227 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3228
3229 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3230 rv2p = bnx2_xi_rv2p_proc2;
3231 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3232 } else {
3233 rv2p = bnx2_rv2p_proc2;
3234 rv2p_len = sizeof(bnx2_rv2p_proc2);
3235 }
3236 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3237 if (rc < 0)
3238 goto init_cpu_err;
3239
3240 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3241
3242 /* Initialize the RX Processor. */
3243 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3244 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3245 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3246 cpu_reg.state = BNX2_RXP_CPU_STATE;
3247 cpu_reg.state_value_clear = 0xffffff;
3248 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3249 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3250 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3251 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3252 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3253 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3254 cpu_reg.mips_view_base = 0x8000000;
3255
3256 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3257 fw = &bnx2_rxp_fw_09;
3258 else
3259 fw = &bnx2_rxp_fw_06;
3260
3261 fw->text = text;
3262 rc = load_cpu_fw(bp, &cpu_reg, fw);
3263 if (rc)
3264 goto init_cpu_err;
3265
3266 /* Initialize the TX Processor. */
3267 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3268 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3269 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3270 cpu_reg.state = BNX2_TXP_CPU_STATE;
3271 cpu_reg.state_value_clear = 0xffffff;
3272 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3273 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3274 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3275 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3276 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3277 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3278 cpu_reg.mips_view_base = 0x8000000;
3279
3280 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3281 fw = &bnx2_txp_fw_09;
3282 else
3283 fw = &bnx2_txp_fw_06;
3284
3285 fw->text = text;
3286 rc = load_cpu_fw(bp, &cpu_reg, fw);
3287 if (rc)
3288 goto init_cpu_err;
3289
3290 /* Initialize the TX Patch-up Processor. */
3291 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3292 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3293 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3294 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3295 cpu_reg.state_value_clear = 0xffffff;
3296 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3297 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3298 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3299 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3300 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3301 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3302 cpu_reg.mips_view_base = 0x8000000;
3303
3304 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3305 fw = &bnx2_tpat_fw_09;
3306 else
3307 fw = &bnx2_tpat_fw_06;
3308
3309 fw->text = text;
3310 rc = load_cpu_fw(bp, &cpu_reg, fw);
3311 if (rc)
3312 goto init_cpu_err;
3313
3314 /* Initialize the Completion Processor. */
3315 cpu_reg.mode = BNX2_COM_CPU_MODE;
3316 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3317 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3318 cpu_reg.state = BNX2_COM_CPU_STATE;
3319 cpu_reg.state_value_clear = 0xffffff;
3320 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3321 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3322 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3323 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3324 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3325 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3326 cpu_reg.mips_view_base = 0x8000000;
3327
3328 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3329 fw = &bnx2_com_fw_09;
3330 else
3331 fw = &bnx2_com_fw_06;
3332
3333 fw->text = text;
3334 rc = load_cpu_fw(bp, &cpu_reg, fw);
3335 if (rc)
3336 goto init_cpu_err;
3337
3338 /* Initialize the Command Processor. */
3339 cpu_reg.mode = BNX2_CP_CPU_MODE;
3340 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3341 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3342 cpu_reg.state = BNX2_CP_CPU_STATE;
3343 cpu_reg.state_value_clear = 0xffffff;
3344 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3345 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3346 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3347 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3348 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3349 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3350 cpu_reg.mips_view_base = 0x8000000;
3351
3352 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3353 fw = &bnx2_cp_fw_09;
3354 else
3355 fw = &bnx2_cp_fw_06;
3356
3357 fw->text = text;
3358 rc = load_cpu_fw(bp, &cpu_reg, fw);
3359
3360 init_cpu_err:
3361 vfree(text);
3362 return rc;
3363 }
3364
3365 static int
3366 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3367 {
3368 u16 pmcsr;
3369
3370 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3371
3372 switch (state) {
3373 case PCI_D0: {
3374 u32 val;
3375
3376 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3377 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3378 PCI_PM_CTRL_PME_STATUS);
3379
3380 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3381 /* delay required during transition out of D3hot */
3382 msleep(20);
3383
3384 val = REG_RD(bp, BNX2_EMAC_MODE);
3385 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3386 val &= ~BNX2_EMAC_MODE_MPKT;
3387 REG_WR(bp, BNX2_EMAC_MODE, val);
3388
3389 val = REG_RD(bp, BNX2_RPM_CONFIG);
3390 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3391 REG_WR(bp, BNX2_RPM_CONFIG, val);
3392 break;
3393 }
3394 case PCI_D3hot: {
3395 int i;
3396 u32 val, wol_msg;
3397
3398 if (bp->wol) {
3399 u32 advertising;
3400 u8 autoneg;
3401
3402 autoneg = bp->autoneg;
3403 advertising = bp->advertising;
3404
3405 if (bp->phy_port == PORT_TP) {
3406 bp->autoneg = AUTONEG_SPEED;
3407 bp->advertising = ADVERTISED_10baseT_Half |
3408 ADVERTISED_10baseT_Full |
3409 ADVERTISED_100baseT_Half |
3410 ADVERTISED_100baseT_Full |
3411 ADVERTISED_Autoneg;
3412 }
3413
3414 spin_lock_bh(&bp->phy_lock);
3415 bnx2_setup_phy(bp, bp->phy_port);
3416 spin_unlock_bh(&bp->phy_lock);
3417
3418 bp->autoneg = autoneg;
3419 bp->advertising = advertising;
3420
3421 bnx2_set_mac_addr(bp);
3422
3423 val = REG_RD(bp, BNX2_EMAC_MODE);
3424
3425 /* Enable port mode. */
3426 val &= ~BNX2_EMAC_MODE_PORT;
3427 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3428 BNX2_EMAC_MODE_ACPI_RCVD |
3429 BNX2_EMAC_MODE_MPKT;
3430 if (bp->phy_port == PORT_TP)
3431 val |= BNX2_EMAC_MODE_PORT_MII;
3432 else {
3433 val |= BNX2_EMAC_MODE_PORT_GMII;
3434 if (bp->line_speed == SPEED_2500)
3435 val |= BNX2_EMAC_MODE_25G_MODE;
3436 }
3437
3438 REG_WR(bp, BNX2_EMAC_MODE, val);
3439
3440 /* receive all multicast */
3441 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3442 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3443 0xffffffff);
3444 }
3445 REG_WR(bp, BNX2_EMAC_RX_MODE,
3446 BNX2_EMAC_RX_MODE_SORT_MODE);
3447
3448 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3449 BNX2_RPM_SORT_USER0_MC_EN;
3450 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3451 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3452 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3453 BNX2_RPM_SORT_USER0_ENA);
3454
3455 /* Need to enable EMAC and RPM for WOL. */
3456 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3457 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3458 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3459 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3460
3461 val = REG_RD(bp, BNX2_RPM_CONFIG);
3462 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3463 REG_WR(bp, BNX2_RPM_CONFIG, val);
3464
3465 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3466 }
3467 else {
3468 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3469 }
3470
3471 if (!(bp->flags & NO_WOL_FLAG))
3472 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3473
3474 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3475 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3476 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3477
3478 if (bp->wol)
3479 pmcsr |= 3;
3480 }
3481 else {
3482 pmcsr |= 3;
3483 }
3484 if (bp->wol) {
3485 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3486 }
3487 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3488 pmcsr);
3489
3490 /* No more memory access after this point until
3491 * device is brought back to D0.
3492 */
3493 udelay(50);
3494 break;
3495 }
3496 default:
3497 return -EINVAL;
3498 }
3499 return 0;
3500 }
3501
3502 static int
3503 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3504 {
3505 u32 val;
3506 int j;
3507
3508 /* Request access to the flash interface. */
3509 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3510 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3511 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3512 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3513 break;
3514
3515 udelay(5);
3516 }
3517
3518 if (j >= NVRAM_TIMEOUT_COUNT)
3519 return -EBUSY;
3520
3521 return 0;
3522 }
3523
3524 static int
3525 bnx2_release_nvram_lock(struct bnx2 *bp)
3526 {
3527 int j;
3528 u32 val;
3529
3530 /* Relinquish nvram interface. */
3531 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3532
3533 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3534 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3535 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3536 break;
3537
3538 udelay(5);
3539 }
3540
3541 if (j >= NVRAM_TIMEOUT_COUNT)
3542 return -EBUSY;
3543
3544 return 0;
3545 }
3546
3547
3548 static int
3549 bnx2_enable_nvram_write(struct bnx2 *bp)
3550 {
3551 u32 val;
3552
3553 val = REG_RD(bp, BNX2_MISC_CFG);
3554 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3555
3556 if (bp->flash_info->flags & BNX2_NV_WREN) {
3557 int j;
3558
3559 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3560 REG_WR(bp, BNX2_NVM_COMMAND,
3561 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3562
3563 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3564 udelay(5);
3565
3566 val = REG_RD(bp, BNX2_NVM_COMMAND);
3567 if (val & BNX2_NVM_COMMAND_DONE)
3568 break;
3569 }
3570
3571 if (j >= NVRAM_TIMEOUT_COUNT)
3572 return -EBUSY;
3573 }
3574 return 0;
3575 }
3576
3577 static void
3578 bnx2_disable_nvram_write(struct bnx2 *bp)
3579 {
3580 u32 val;
3581
3582 val = REG_RD(bp, BNX2_MISC_CFG);
3583 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3584 }
3585
3586
3587 static void
3588 bnx2_enable_nvram_access(struct bnx2 *bp)
3589 {
3590 u32 val;
3591
3592 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3593 /* Enable both bits, even on read. */
3594 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3595 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3596 }
3597
3598 static void
3599 bnx2_disable_nvram_access(struct bnx2 *bp)
3600 {
3601 u32 val;
3602
3603 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3604 /* Disable both bits, even after read. */
3605 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3606 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3607 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3608 }
3609
3610 static int
3611 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3612 {
3613 u32 cmd;
3614 int j;
3615
3616 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3617 /* Buffered flash, no erase needed */
3618 return 0;
3619
3620 /* Build an erase command */
3621 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3622 BNX2_NVM_COMMAND_DOIT;
3623
3624 /* Need to clear DONE bit separately. */
3625 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3626
3627 /* Address of the NVRAM to read from. */
3628 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3629
3630 /* Issue an erase command. */
3631 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3632
3633 /* Wait for completion. */
3634 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3635 u32 val;
3636
3637 udelay(5);
3638
3639 val = REG_RD(bp, BNX2_NVM_COMMAND);
3640 if (val & BNX2_NVM_COMMAND_DONE)
3641 break;
3642 }
3643
3644 if (j >= NVRAM_TIMEOUT_COUNT)
3645 return -EBUSY;
3646
3647 return 0;
3648 }
3649
3650 static int
3651 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3652 {
3653 u32 cmd;
3654 int j;
3655
3656 /* Build the command word. */
3657 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3658
3659 /* Calculate an offset of a buffered flash, not needed for 5709. */
3660 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3661 offset = ((offset / bp->flash_info->page_size) <<
3662 bp->flash_info->page_bits) +
3663 (offset % bp->flash_info->page_size);
3664 }
3665
3666 /* Need to clear DONE bit separately. */
3667 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3668
3669 /* Address of the NVRAM to read from. */
3670 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3671
3672 /* Issue a read command. */
3673 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3674
3675 /* Wait for completion. */
3676 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3677 u32 val;
3678
3679 udelay(5);
3680
3681 val = REG_RD(bp, BNX2_NVM_COMMAND);
3682 if (val & BNX2_NVM_COMMAND_DONE) {
3683 val = REG_RD(bp, BNX2_NVM_READ);
3684
3685 val = be32_to_cpu(val);
3686 memcpy(ret_val, &val, 4);
3687 break;
3688 }
3689 }
3690 if (j >= NVRAM_TIMEOUT_COUNT)
3691 return -EBUSY;
3692
3693 return 0;
3694 }
3695
3696
3697 static int
3698 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3699 {
3700 u32 cmd, val32;
3701 int j;
3702
3703 /* Build the command word. */
3704 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3705
3706 /* Calculate an offset of a buffered flash, not needed for 5709. */
3707 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3708 offset = ((offset / bp->flash_info->page_size) <<
3709 bp->flash_info->page_bits) +
3710 (offset % bp->flash_info->page_size);
3711 }
3712
3713 /* Need to clear DONE bit separately. */
3714 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3715
3716 memcpy(&val32, val, 4);
3717 val32 = cpu_to_be32(val32);
3718
3719 /* Write the data. */
3720 REG_WR(bp, BNX2_NVM_WRITE, val32);
3721
3722 /* Address of the NVRAM to write to. */
3723 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3724
3725 /* Issue the write command. */
3726 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3727
3728 /* Wait for completion. */
3729 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3730 udelay(5);
3731
3732 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3733 break;
3734 }
3735 if (j >= NVRAM_TIMEOUT_COUNT)
3736 return -EBUSY;
3737
3738 return 0;
3739 }
3740
3741 static int
3742 bnx2_init_nvram(struct bnx2 *bp)
3743 {
3744 u32 val;
3745 int j, entry_count, rc = 0;
3746 struct flash_spec *flash;
3747
3748 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3749 bp->flash_info = &flash_5709;
3750 goto get_flash_size;
3751 }
3752
3753 /* Determine the selected interface. */
3754 val = REG_RD(bp, BNX2_NVM_CFG1);
3755
3756 entry_count = ARRAY_SIZE(flash_table);
3757
3758 if (val & 0x40000000) {
3759
3760 /* Flash interface has been reconfigured */
3761 for (j = 0, flash = &flash_table[0]; j < entry_count;
3762 j++, flash++) {
3763 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3764 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3765 bp->flash_info = flash;
3766 break;
3767 }
3768 }
3769 }
3770 else {
3771 u32 mask;
3772 /* Not yet been reconfigured */
3773
3774 if (val & (1 << 23))
3775 mask = FLASH_BACKUP_STRAP_MASK;
3776 else
3777 mask = FLASH_STRAP_MASK;
3778
3779 for (j = 0, flash = &flash_table[0]; j < entry_count;
3780 j++, flash++) {
3781
3782 if ((val & mask) == (flash->strapping & mask)) {
3783 bp->flash_info = flash;
3784
3785 /* Request access to the flash interface. */
3786 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3787 return rc;
3788
3789 /* Enable access to flash interface */
3790 bnx2_enable_nvram_access(bp);
3791
3792 /* Reconfigure the flash interface */
3793 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3794 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3795 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3796 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3797
3798 /* Disable access to flash interface */
3799 bnx2_disable_nvram_access(bp);
3800 bnx2_release_nvram_lock(bp);
3801
3802 break;
3803 }
3804 }
3805 } /* if (val & 0x40000000) */
3806
3807 if (j == entry_count) {
3808 bp->flash_info = NULL;
3809 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3810 return -ENODEV;
3811 }
3812
3813 get_flash_size:
3814 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3815 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3816 if (val)
3817 bp->flash_size = val;
3818 else
3819 bp->flash_size = bp->flash_info->total_size;
3820
3821 return rc;
3822 }
3823
3824 static int
3825 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3826 int buf_size)
3827 {
3828 int rc = 0;
3829 u32 cmd_flags, offset32, len32, extra;
3830
3831 if (buf_size == 0)
3832 return 0;
3833
3834 /* Request access to the flash interface. */
3835 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3836 return rc;
3837
3838 /* Enable access to flash interface */
3839 bnx2_enable_nvram_access(bp);
3840
3841 len32 = buf_size;
3842 offset32 = offset;
3843 extra = 0;
3844
3845 cmd_flags = 0;
3846
3847 if (offset32 & 3) {
3848 u8 buf[4];
3849 u32 pre_len;
3850
3851 offset32 &= ~3;
3852 pre_len = 4 - (offset & 3);
3853
3854 if (pre_len >= len32) {
3855 pre_len = len32;
3856 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3857 BNX2_NVM_COMMAND_LAST;
3858 }
3859 else {
3860 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3861 }
3862
3863 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3864
3865 if (rc)
3866 return rc;
3867
3868 memcpy(ret_buf, buf + (offset & 3), pre_len);
3869
3870 offset32 += 4;
3871 ret_buf += pre_len;
3872 len32 -= pre_len;
3873 }
3874 if (len32 & 3) {
3875 extra = 4 - (len32 & 3);
3876 len32 = (len32 + 4) & ~3;
3877 }
3878
3879 if (len32 == 4) {
3880 u8 buf[4];
3881
3882 if (cmd_flags)
3883 cmd_flags = BNX2_NVM_COMMAND_LAST;
3884 else
3885 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3886 BNX2_NVM_COMMAND_LAST;
3887
3888 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3889
3890 memcpy(ret_buf, buf, 4 - extra);
3891 }
3892 else if (len32 > 0) {
3893 u8 buf[4];
3894
3895 /* Read the first word. */
3896 if (cmd_flags)
3897 cmd_flags = 0;
3898 else
3899 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3900
3901 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3902
3903 /* Advance to the next dword. */
3904 offset32 += 4;
3905 ret_buf += 4;
3906 len32 -= 4;
3907
3908 while (len32 > 4 && rc == 0) {
3909 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3910
3911 /* Advance to the next dword. */
3912 offset32 += 4;
3913 ret_buf += 4;
3914 len32 -= 4;
3915 }
3916
3917 if (rc)
3918 return rc;
3919
3920 cmd_flags = BNX2_NVM_COMMAND_LAST;
3921 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3922
3923 memcpy(ret_buf, buf, 4 - extra);
3924 }
3925
3926 /* Disable access to flash interface */
3927 bnx2_disable_nvram_access(bp);
3928
3929 bnx2_release_nvram_lock(bp);
3930
3931 return rc;
3932 }
3933
3934 static int
3935 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3936 int buf_size)
3937 {
3938 u32 written, offset32, len32;
3939 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3940 int rc = 0;
3941 int align_start, align_end;
3942
3943 buf = data_buf;
3944 offset32 = offset;
3945 len32 = buf_size;
3946 align_start = align_end = 0;
3947
3948 if ((align_start = (offset32 & 3))) {
3949 offset32 &= ~3;
3950 len32 += align_start;
3951 if (len32 < 4)
3952 len32 = 4;
3953 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3954 return rc;
3955 }
3956
3957 if (len32 & 3) {
3958 align_end = 4 - (len32 & 3);
3959 len32 += align_end;
3960 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3961 return rc;
3962 }
3963
3964 if (align_start || align_end) {
3965 align_buf = kmalloc(len32, GFP_KERNEL);
3966 if (align_buf == NULL)
3967 return -ENOMEM;
3968 if (align_start) {
3969 memcpy(align_buf, start, 4);
3970 }
3971 if (align_end) {
3972 memcpy(align_buf + len32 - 4, end, 4);
3973 }
3974 memcpy(align_buf + align_start, data_buf, buf_size);
3975 buf = align_buf;
3976 }
3977
3978 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3979 flash_buffer = kmalloc(264, GFP_KERNEL);
3980 if (flash_buffer == NULL) {
3981 rc = -ENOMEM;
3982 goto nvram_write_end;
3983 }
3984 }
3985
3986 written = 0;
3987 while ((written < len32) && (rc == 0)) {
3988 u32 page_start, page_end, data_start, data_end;
3989 u32 addr, cmd_flags;
3990 int i;
3991
3992 /* Find the page_start addr */
3993 page_start = offset32 + written;
3994 page_start -= (page_start % bp->flash_info->page_size);
3995 /* Find the page_end addr */
3996 page_end = page_start + bp->flash_info->page_size;
3997 /* Find the data_start addr */
3998 data_start = (written == 0) ? offset32 : page_start;
3999 /* Find the data_end addr */
4000 data_end = (page_end > offset32 + len32) ?
4001 (offset32 + len32) : page_end;
4002
4003 /* Request access to the flash interface. */
4004 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4005 goto nvram_write_end;
4006
4007 /* Enable access to flash interface */
4008 bnx2_enable_nvram_access(bp);
4009
4010 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4011 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4012 int j;
4013
4014 /* Read the whole page into the buffer
4015 * (non-buffer flash only) */
4016 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4017 if (j == (bp->flash_info->page_size - 4)) {
4018 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4019 }
4020 rc = bnx2_nvram_read_dword(bp,
4021 page_start + j,
4022 &flash_buffer[j],
4023 cmd_flags);
4024
4025 if (rc)
4026 goto nvram_write_end;
4027
4028 cmd_flags = 0;
4029 }
4030 }
4031
4032 /* Enable writes to flash interface (unlock write-protect) */
4033 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4034 goto nvram_write_end;
4035
4036 /* Loop to write back the buffer data from page_start to
4037 * data_start */
4038 i = 0;
4039 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4040 /* Erase the page */
4041 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4042 goto nvram_write_end;
4043
4044 /* Re-enable the write again for the actual write */
4045 bnx2_enable_nvram_write(bp);
4046
4047 for (addr = page_start; addr < data_start;
4048 addr += 4, i += 4) {
4049
4050 rc = bnx2_nvram_write_dword(bp, addr,
4051 &flash_buffer[i], cmd_flags);
4052
4053 if (rc != 0)
4054 goto nvram_write_end;
4055
4056 cmd_flags = 0;
4057 }
4058 }
4059
4060 /* Loop to write the new data from data_start to data_end */
4061 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4062 if ((addr == page_end - 4) ||
4063 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4064 (addr == data_end - 4))) {
4065
4066 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4067 }
4068 rc = bnx2_nvram_write_dword(bp, addr, buf,
4069 cmd_flags);
4070
4071 if (rc != 0)
4072 goto nvram_write_end;
4073
4074 cmd_flags = 0;
4075 buf += 4;
4076 }
4077
4078 /* Loop to write back the buffer data from data_end
4079 * to page_end */
4080 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4081 for (addr = data_end; addr < page_end;
4082 addr += 4, i += 4) {
4083
4084 if (addr == page_end-4) {
4085 cmd_flags = BNX2_NVM_COMMAND_LAST;
4086 }
4087 rc = bnx2_nvram_write_dword(bp, addr,
4088 &flash_buffer[i], cmd_flags);
4089
4090 if (rc != 0)
4091 goto nvram_write_end;
4092
4093 cmd_flags = 0;
4094 }
4095 }
4096
4097 /* Disable writes to flash interface (lock write-protect) */
4098 bnx2_disable_nvram_write(bp);
4099
4100 /* Disable access to flash interface */
4101 bnx2_disable_nvram_access(bp);
4102 bnx2_release_nvram_lock(bp);
4103
4104 /* Increment written */
4105 written += data_end - data_start;
4106 }
4107
4108 nvram_write_end:
4109 kfree(flash_buffer);
4110 kfree(align_buf);
4111 return rc;
4112 }
4113
4114 static void
4115 bnx2_init_remote_phy(struct bnx2 *bp)
4116 {
4117 u32 val;
4118
4119 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4120 if (!(bp->phy_flags & PHY_SERDES_FLAG))
4121 return;
4122
4123 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4124 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4125 return;
4126
4127 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4128 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4129
4130 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4131 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4132 bp->phy_port = PORT_FIBRE;
4133 else
4134 bp->phy_port = PORT_TP;
4135
4136 if (netif_running(bp->dev)) {
4137 u32 sig;
4138
4139 if (val & BNX2_LINK_STATUS_LINK_UP) {
4140 bp->link_up = 1;
4141 netif_carrier_on(bp->dev);
4142 } else {
4143 bp->link_up = 0;
4144 netif_carrier_off(bp->dev);
4145 }
4146 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4147 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4148 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4149 sig);
4150 }
4151 }
4152 }
4153
4154 static void
4155 bnx2_setup_msix_tbl(struct bnx2 *bp)
4156 {
4157 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4158
4159 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4160 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4161 }
4162
4163 static int
4164 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4165 {
4166 u32 val;
4167 int i, rc = 0;
4168 u8 old_port;
4169
4170 /* Wait for the current PCI transaction to complete before
4171 * issuing a reset. */
4172 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4173 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4174 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4175 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4176 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4177 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4178 udelay(5);
4179
4180 /* Wait for the firmware to tell us it is ok to issue a reset. */
4181 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4182
4183 /* Deposit a driver reset signature so the firmware knows that
4184 * this is a soft reset. */
4185 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4186 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4187
4188 /* Do a dummy read to force the chip to complete all current transaction
4189 * before we issue a reset. */
4190 val = REG_RD(bp, BNX2_MISC_ID);
4191
4192 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4193 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4194 REG_RD(bp, BNX2_MISC_COMMAND);
4195 udelay(5);
4196
4197 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4198 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4199
4200 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4201
4202 } else {
4203 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4204 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4205 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4206
4207 /* Chip reset. */
4208 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4209
4210 /* Reading back any register after chip reset will hang the
4211 * bus on 5706 A0 and A1. The msleep below provides plenty
4212 * of margin for write posting.
4213 */
4214 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4215 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4216 msleep(20);
4217
4218 /* Reset takes approximate 30 usec */
4219 for (i = 0; i < 10; i++) {
4220 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4221 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4222 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4223 break;
4224 udelay(10);
4225 }
4226
4227 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4228 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4229 printk(KERN_ERR PFX "Chip reset did not complete\n");
4230 return -EBUSY;
4231 }
4232 }
4233
4234 /* Make sure byte swapping is properly configured. */
4235 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4236 if (val != 0x01020304) {
4237 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4238 return -ENODEV;
4239 }
4240
4241 /* Wait for the firmware to finish its initialization. */
4242 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4243 if (rc)
4244 return rc;
4245
4246 spin_lock_bh(&bp->phy_lock);
4247 old_port = bp->phy_port;
4248 bnx2_init_remote_phy(bp);
4249 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4250 bnx2_set_default_remote_link(bp);
4251 spin_unlock_bh(&bp->phy_lock);
4252
4253 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4254 /* Adjust the voltage regular to two steps lower. The default
4255 * of this register is 0x0000000e. */
4256 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4257
4258 /* Remove bad rbuf memory from the free pool. */
4259 rc = bnx2_alloc_bad_rbuf(bp);
4260 }
4261
4262 if (bp->flags & USING_MSIX_FLAG)
4263 bnx2_setup_msix_tbl(bp);
4264
4265 return rc;
4266 }
4267
4268 static int
4269 bnx2_init_chip(struct bnx2 *bp)
4270 {
4271 u32 val;
4272 int rc, i;
4273
4274 /* Make sure the interrupt is not active. */
4275 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4276
4277 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4278 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4279 #ifdef __BIG_ENDIAN
4280 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4281 #endif
4282 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4283 DMA_READ_CHANS << 12 |
4284 DMA_WRITE_CHANS << 16;
4285
4286 val |= (0x2 << 20) | (1 << 11);
4287
4288 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4289 val |= (1 << 23);
4290
4291 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4292 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4293 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4294
4295 REG_WR(bp, BNX2_DMA_CONFIG, val);
4296
4297 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4298 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4299 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4300 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4301 }
4302
4303 if (bp->flags & PCIX_FLAG) {
4304 u16 val16;
4305
4306 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4307 &val16);
4308 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4309 val16 & ~PCI_X_CMD_ERO);
4310 }
4311
4312 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4313 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4314 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4315 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4316
4317 /* Initialize context mapping and zero out the quick contexts. The
4318 * context block must have already been enabled. */
4319 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4320 rc = bnx2_init_5709_context(bp);
4321 if (rc)
4322 return rc;
4323 } else
4324 bnx2_init_context(bp);
4325
4326 if ((rc = bnx2_init_cpus(bp)) != 0)
4327 return rc;
4328
4329 bnx2_init_nvram(bp);
4330
4331 bnx2_set_mac_addr(bp);
4332
4333 val = REG_RD(bp, BNX2_MQ_CONFIG);
4334 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4335 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4336 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4337 val |= BNX2_MQ_CONFIG_HALT_DIS;
4338
4339 REG_WR(bp, BNX2_MQ_CONFIG, val);
4340
4341 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4342 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4343 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4344
4345 val = (BCM_PAGE_BITS - 8) << 24;
4346 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4347
4348 /* Configure page size. */
4349 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4350 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4351 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4352 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4353
4354 val = bp->mac_addr[0] +
4355 (bp->mac_addr[1] << 8) +
4356 (bp->mac_addr[2] << 16) +
4357 bp->mac_addr[3] +
4358 (bp->mac_addr[4] << 8) +
4359 (bp->mac_addr[5] << 16);
4360 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4361
4362 /* Program the MTU. Also include 4 bytes for CRC32. */
4363 val = bp->dev->mtu + ETH_HLEN + 4;
4364 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4365 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4366 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4367
4368 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4369 bp->bnx2_napi[i].last_status_idx = 0;
4370
4371 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4372
4373 /* Set up how to generate a link change interrupt. */
4374 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4375
4376 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4377 (u64) bp->status_blk_mapping & 0xffffffff);
4378 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4379
4380 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4381 (u64) bp->stats_blk_mapping & 0xffffffff);
4382 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4383 (u64) bp->stats_blk_mapping >> 32);
4384
4385 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4386 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4387
4388 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4389 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4390
4391 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4392 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4393
4394 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4395
4396 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4397
4398 REG_WR(bp, BNX2_HC_COM_TICKS,
4399 (bp->com_ticks_int << 16) | bp->com_ticks);
4400
4401 REG_WR(bp, BNX2_HC_CMD_TICKS,
4402 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4403
4404 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4405 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4406 else
4407 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4408 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4409
4410 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4411 val = BNX2_HC_CONFIG_COLLECT_STATS;
4412 else {
4413 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4414 BNX2_HC_CONFIG_COLLECT_STATS;
4415 }
4416
4417 if (bp->flags & USING_MSIX_FLAG) {
4418 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4419 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4420
4421 REG_WR(bp, BNX2_HC_SB_CONFIG_1,
4422 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4423 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4424
4425 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP_1,
4426 (bp->tx_quick_cons_trip_int << 16) |
4427 bp->tx_quick_cons_trip);
4428
4429 REG_WR(bp, BNX2_HC_TX_TICKS_1,
4430 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4431
4432 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4433 }
4434
4435 if (bp->flags & ONE_SHOT_MSI_FLAG)
4436 val |= BNX2_HC_CONFIG_ONE_SHOT;
4437
4438 REG_WR(bp, BNX2_HC_CONFIG, val);
4439
4440 /* Clear internal stats counters. */
4441 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4442
4443 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4444
4445 /* Initialize the receive filter. */
4446 bnx2_set_rx_mode(bp->dev);
4447
4448 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4449 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4450 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4451 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4452 }
4453 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4454 0);
4455
4456 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4457 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4458
4459 udelay(20);
4460
4461 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4462
4463 return rc;
4464 }
4465
4466 static void
4467 bnx2_clear_ring_states(struct bnx2 *bp)
4468 {
4469 struct bnx2_napi *bnapi;
4470 int i;
4471
4472 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4473 bnapi = &bp->bnx2_napi[i];
4474
4475 bnapi->tx_cons = 0;
4476 bnapi->hw_tx_cons = 0;
4477 bnapi->rx_prod_bseq = 0;
4478 bnapi->rx_prod = 0;
4479 bnapi->rx_cons = 0;
4480 bnapi->rx_pg_prod = 0;
4481 bnapi->rx_pg_cons = 0;
4482 }
4483 }
4484
4485 static void
4486 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4487 {
4488 u32 val, offset0, offset1, offset2, offset3;
4489
4490 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4491 offset0 = BNX2_L2CTX_TYPE_XI;
4492 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4493 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4494 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4495 } else {
4496 offset0 = BNX2_L2CTX_TYPE;
4497 offset1 = BNX2_L2CTX_CMD_TYPE;
4498 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4499 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4500 }
4501 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4502 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4503
4504 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4505 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4506
4507 val = (u64) bp->tx_desc_mapping >> 32;
4508 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4509
4510 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4511 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4512 }
4513
4514 static void
4515 bnx2_init_tx_ring(struct bnx2 *bp)
4516 {
4517 struct tx_bd *txbd;
4518 u32 cid = TX_CID;
4519 struct bnx2_napi *bnapi;
4520
4521 bp->tx_vec = 0;
4522 if (bp->flags & USING_MSIX_FLAG) {
4523 cid = TX_TSS_CID;
4524 bp->tx_vec = BNX2_TX_VEC;
4525 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4526 (TX_TSS_CID << 7));
4527 }
4528 bnapi = &bp->bnx2_napi[bp->tx_vec];
4529
4530 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4531
4532 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4533
4534 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4535 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4536
4537 bp->tx_prod = 0;
4538 bp->tx_prod_bseq = 0;
4539
4540 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4541 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4542
4543 bnx2_init_tx_context(bp, cid);
4544 }
4545
4546 static void
4547 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4548 int num_rings)
4549 {
4550 int i;
4551 struct rx_bd *rxbd;
4552
4553 for (i = 0; i < num_rings; i++) {
4554 int j;
4555
4556 rxbd = &rx_ring[i][0];
4557 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4558 rxbd->rx_bd_len = buf_size;
4559 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4560 }
4561 if (i == (num_rings - 1))
4562 j = 0;
4563 else
4564 j = i + 1;
4565 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4566 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4567 }
4568 }
4569
4570 static void
4571 bnx2_init_rx_ring(struct bnx2 *bp)
4572 {
4573 int i;
4574 u16 prod, ring_prod;
4575 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4576 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4577
4578 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4579 bp->rx_buf_use_size, bp->rx_max_ring);
4580
4581 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4582 if (bp->rx_pg_ring_size) {
4583 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4584 bp->rx_pg_desc_mapping,
4585 PAGE_SIZE, bp->rx_max_pg_ring);
4586 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4587 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4588 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4589 BNX2_L2CTX_RBDC_JUMBO_KEY);
4590
4591 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4592 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4593
4594 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4595 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4596
4597 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4598 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4599 }
4600
4601 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4602 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4603 val |= 0x02 << 8;
4604 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4605
4606 val = (u64) bp->rx_desc_mapping[0] >> 32;
4607 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4608
4609 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4610 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4611
4612 ring_prod = prod = bnapi->rx_pg_prod;
4613 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4614 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4615 break;
4616 prod = NEXT_RX_BD(prod);
4617 ring_prod = RX_PG_RING_IDX(prod);
4618 }
4619 bnapi->rx_pg_prod = prod;
4620
4621 ring_prod = prod = bnapi->rx_prod;
4622 for (i = 0; i < bp->rx_ring_size; i++) {
4623 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4624 break;
4625 }
4626 prod = NEXT_RX_BD(prod);
4627 ring_prod = RX_RING_IDX(prod);
4628 }
4629 bnapi->rx_prod = prod;
4630
4631 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4632 bnapi->rx_pg_prod);
4633 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4634
4635 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4636 }
4637
4638 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4639 {
4640 u32 max, num_rings = 1;
4641
4642 while (ring_size > MAX_RX_DESC_CNT) {
4643 ring_size -= MAX_RX_DESC_CNT;
4644 num_rings++;
4645 }
4646 /* round to next power of 2 */
4647 max = max_size;
4648 while ((max & num_rings) == 0)
4649 max >>= 1;
4650
4651 if (num_rings != max)
4652 max <<= 1;
4653
4654 return max;
4655 }
4656
4657 static void
4658 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4659 {
4660 u32 rx_size, rx_space, jumbo_size;
4661
4662 /* 8 for CRC and VLAN */
4663 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4664
4665 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4666 sizeof(struct skb_shared_info);
4667
4668 bp->rx_copy_thresh = RX_COPY_THRESH;
4669 bp->rx_pg_ring_size = 0;
4670 bp->rx_max_pg_ring = 0;
4671 bp->rx_max_pg_ring_idx = 0;
4672 if (rx_space > PAGE_SIZE) {
4673 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4674
4675 jumbo_size = size * pages;
4676 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4677 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4678
4679 bp->rx_pg_ring_size = jumbo_size;
4680 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4681 MAX_RX_PG_RINGS);
4682 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4683 rx_size = RX_COPY_THRESH + bp->rx_offset;
4684 bp->rx_copy_thresh = 0;
4685 }
4686
4687 bp->rx_buf_use_size = rx_size;
4688 /* hw alignment */
4689 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4690 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4691 bp->rx_ring_size = size;
4692 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4693 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4694 }
4695
4696 static void
4697 bnx2_free_tx_skbs(struct bnx2 *bp)
4698 {
4699 int i;
4700
4701 if (bp->tx_buf_ring == NULL)
4702 return;
4703
4704 for (i = 0; i < TX_DESC_CNT; ) {
4705 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4706 struct sk_buff *skb = tx_buf->skb;
4707 int j, last;
4708
4709 if (skb == NULL) {
4710 i++;
4711 continue;
4712 }
4713
4714 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4715 skb_headlen(skb), PCI_DMA_TODEVICE);
4716
4717 tx_buf->skb = NULL;
4718
4719 last = skb_shinfo(skb)->nr_frags;
4720 for (j = 0; j < last; j++) {
4721 tx_buf = &bp->tx_buf_ring[i + j + 1];
4722 pci_unmap_page(bp->pdev,
4723 pci_unmap_addr(tx_buf, mapping),
4724 skb_shinfo(skb)->frags[j].size,
4725 PCI_DMA_TODEVICE);
4726 }
4727 dev_kfree_skb(skb);
4728 i += j + 1;
4729 }
4730
4731 }
4732
4733 static void
4734 bnx2_free_rx_skbs(struct bnx2 *bp)
4735 {
4736 int i;
4737
4738 if (bp->rx_buf_ring == NULL)
4739 return;
4740
4741 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4742 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4743 struct sk_buff *skb = rx_buf->skb;
4744
4745 if (skb == NULL)
4746 continue;
4747
4748 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4749 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4750
4751 rx_buf->skb = NULL;
4752
4753 dev_kfree_skb(skb);
4754 }
4755 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4756 bnx2_free_rx_page(bp, i);
4757 }
4758
4759 static void
4760 bnx2_free_skbs(struct bnx2 *bp)
4761 {
4762 bnx2_free_tx_skbs(bp);
4763 bnx2_free_rx_skbs(bp);
4764 }
4765
4766 static int
4767 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4768 {
4769 int rc;
4770
4771 rc = bnx2_reset_chip(bp, reset_code);
4772 bnx2_free_skbs(bp);
4773 if (rc)
4774 return rc;
4775
4776 if ((rc = bnx2_init_chip(bp)) != 0)
4777 return rc;
4778
4779 bnx2_clear_ring_states(bp);
4780 bnx2_init_tx_ring(bp);
4781 bnx2_init_rx_ring(bp);
4782 return 0;
4783 }
4784
4785 static int
4786 bnx2_init_nic(struct bnx2 *bp)
4787 {
4788 int rc;
4789
4790 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4791 return rc;
4792
4793 spin_lock_bh(&bp->phy_lock);
4794 bnx2_init_phy(bp);
4795 bnx2_set_link(bp);
4796 spin_unlock_bh(&bp->phy_lock);
4797 return 0;
4798 }
4799
4800 static int
4801 bnx2_test_registers(struct bnx2 *bp)
4802 {
4803 int ret;
4804 int i, is_5709;
4805 static const struct {
4806 u16 offset;
4807 u16 flags;
4808 #define BNX2_FL_NOT_5709 1
4809 u32 rw_mask;
4810 u32 ro_mask;
4811 } reg_tbl[] = {
4812 { 0x006c, 0, 0x00000000, 0x0000003f },
4813 { 0x0090, 0, 0xffffffff, 0x00000000 },
4814 { 0x0094, 0, 0x00000000, 0x00000000 },
4815
4816 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4817 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4818 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4819 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4820 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4821 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4822 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4823 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4824 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4825
4826 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4827 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4828 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4829 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4830 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4831 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4832
4833 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4834 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4835 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4836
4837 { 0x1000, 0, 0x00000000, 0x00000001 },
4838 { 0x1004, 0, 0x00000000, 0x000f0001 },
4839
4840 { 0x1408, 0, 0x01c00800, 0x00000000 },
4841 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4842 { 0x14a8, 0, 0x00000000, 0x000001ff },
4843 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4844 { 0x14b0, 0, 0x00000002, 0x00000001 },
4845 { 0x14b8, 0, 0x00000000, 0x00000000 },
4846 { 0x14c0, 0, 0x00000000, 0x00000009 },
4847 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4848 { 0x14cc, 0, 0x00000000, 0x00000001 },
4849 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4850
4851 { 0x1800, 0, 0x00000000, 0x00000001 },
4852 { 0x1804, 0, 0x00000000, 0x00000003 },
4853
4854 { 0x2800, 0, 0x00000000, 0x00000001 },
4855 { 0x2804, 0, 0x00000000, 0x00003f01 },
4856 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4857 { 0x2810, 0, 0xffff0000, 0x00000000 },
4858 { 0x2814, 0, 0xffff0000, 0x00000000 },
4859 { 0x2818, 0, 0xffff0000, 0x00000000 },
4860 { 0x281c, 0, 0xffff0000, 0x00000000 },
4861 { 0x2834, 0, 0xffffffff, 0x00000000 },
4862 { 0x2840, 0, 0x00000000, 0xffffffff },
4863 { 0x2844, 0, 0x00000000, 0xffffffff },
4864 { 0x2848, 0, 0xffffffff, 0x00000000 },
4865 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4866
4867 { 0x2c00, 0, 0x00000000, 0x00000011 },
4868 { 0x2c04, 0, 0x00000000, 0x00030007 },
4869
4870 { 0x3c00, 0, 0x00000000, 0x00000001 },
4871 { 0x3c04, 0, 0x00000000, 0x00070000 },
4872 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4873 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4874 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4875 { 0x3c14, 0, 0x00000000, 0xffffffff },
4876 { 0x3c18, 0, 0x00000000, 0xffffffff },
4877 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4878 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4879
4880 { 0x5004, 0, 0x00000000, 0x0000007f },
4881 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4882
4883 { 0x5c00, 0, 0x00000000, 0x00000001 },
4884 { 0x5c04, 0, 0x00000000, 0x0003000f },
4885 { 0x5c08, 0, 0x00000003, 0x00000000 },
4886 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4887 { 0x5c10, 0, 0x00000000, 0xffffffff },
4888 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4889 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4890 { 0x5c88, 0, 0x00000000, 0x00077373 },
4891 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4892
4893 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4894 { 0x680c, 0, 0xffffffff, 0x00000000 },
4895 { 0x6810, 0, 0xffffffff, 0x00000000 },
4896 { 0x6814, 0, 0xffffffff, 0x00000000 },
4897 { 0x6818, 0, 0xffffffff, 0x00000000 },
4898 { 0x681c, 0, 0xffffffff, 0x00000000 },
4899 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4900 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4901 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4902 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4903 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4904 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4905 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4906 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4907 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4908 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4909 { 0x684c, 0, 0xffffffff, 0x00000000 },
4910 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4911 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4912 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4913 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4914 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4915 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4916
4917 { 0xffff, 0, 0x00000000, 0x00000000 },
4918 };
4919
4920 ret = 0;
4921 is_5709 = 0;
4922 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4923 is_5709 = 1;
4924
4925 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4926 u32 offset, rw_mask, ro_mask, save_val, val;
4927 u16 flags = reg_tbl[i].flags;
4928
4929 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4930 continue;
4931
4932 offset = (u32) reg_tbl[i].offset;
4933 rw_mask = reg_tbl[i].rw_mask;
4934 ro_mask = reg_tbl[i].ro_mask;
4935
4936 save_val = readl(bp->regview + offset);
4937
4938 writel(0, bp->regview + offset);
4939
4940 val = readl(bp->regview + offset);
4941 if ((val & rw_mask) != 0) {
4942 goto reg_test_err;
4943 }
4944
4945 if ((val & ro_mask) != (save_val & ro_mask)) {
4946 goto reg_test_err;
4947 }
4948
4949 writel(0xffffffff, bp->regview + offset);
4950
4951 val = readl(bp->regview + offset);
4952 if ((val & rw_mask) != rw_mask) {
4953 goto reg_test_err;
4954 }
4955
4956 if ((val & ro_mask) != (save_val & ro_mask)) {
4957 goto reg_test_err;
4958 }
4959
4960 writel(save_val, bp->regview + offset);
4961 continue;
4962
4963 reg_test_err:
4964 writel(save_val, bp->regview + offset);
4965 ret = -ENODEV;
4966 break;
4967 }
4968 return ret;
4969 }
4970
4971 static int
4972 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4973 {
4974 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4975 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4976 int i;
4977
4978 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4979 u32 offset;
4980
4981 for (offset = 0; offset < size; offset += 4) {
4982
4983 REG_WR_IND(bp, start + offset, test_pattern[i]);
4984
4985 if (REG_RD_IND(bp, start + offset) !=
4986 test_pattern[i]) {
4987 return -ENODEV;
4988 }
4989 }
4990 }
4991 return 0;
4992 }
4993
4994 static int
4995 bnx2_test_memory(struct bnx2 *bp)
4996 {
4997 int ret = 0;
4998 int i;
4999 static struct mem_entry {
5000 u32 offset;
5001 u32 len;
5002 } mem_tbl_5706[] = {
5003 { 0x60000, 0x4000 },
5004 { 0xa0000, 0x3000 },
5005 { 0xe0000, 0x4000 },
5006 { 0x120000, 0x4000 },
5007 { 0x1a0000, 0x4000 },
5008 { 0x160000, 0x4000 },
5009 { 0xffffffff, 0 },
5010 },
5011 mem_tbl_5709[] = {
5012 { 0x60000, 0x4000 },
5013 { 0xa0000, 0x3000 },
5014 { 0xe0000, 0x4000 },
5015 { 0x120000, 0x4000 },
5016 { 0x1a0000, 0x4000 },
5017 { 0xffffffff, 0 },
5018 };
5019 struct mem_entry *mem_tbl;
5020
5021 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5022 mem_tbl = mem_tbl_5709;
5023 else
5024 mem_tbl = mem_tbl_5706;
5025
5026 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5027 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5028 mem_tbl[i].len)) != 0) {
5029 return ret;
5030 }
5031 }
5032
5033 return ret;
5034 }
5035
5036 #define BNX2_MAC_LOOPBACK 0
5037 #define BNX2_PHY_LOOPBACK 1
5038
5039 static int
5040 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5041 {
5042 unsigned int pkt_size, num_pkts, i;
5043 struct sk_buff *skb, *rx_skb;
5044 unsigned char *packet;
5045 u16 rx_start_idx, rx_idx;
5046 dma_addr_t map;
5047 struct tx_bd *txbd;
5048 struct sw_bd *rx_buf;
5049 struct l2_fhdr *rx_hdr;
5050 int ret = -ENODEV;
5051 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5052
5053 tx_napi = bnapi;
5054 if (bp->flags & USING_MSIX_FLAG)
5055 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5056
5057 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5058 bp->loopback = MAC_LOOPBACK;
5059 bnx2_set_mac_loopback(bp);
5060 }
5061 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5062 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5063 return 0;
5064
5065 bp->loopback = PHY_LOOPBACK;
5066 bnx2_set_phy_loopback(bp);
5067 }
5068 else
5069 return -EINVAL;
5070
5071 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5072 skb = netdev_alloc_skb(bp->dev, pkt_size);
5073 if (!skb)
5074 return -ENOMEM;
5075 packet = skb_put(skb, pkt_size);
5076 memcpy(packet, bp->dev->dev_addr, 6);
5077 memset(packet + 6, 0x0, 8);
5078 for (i = 14; i < pkt_size; i++)
5079 packet[i] = (unsigned char) (i & 0xff);
5080
5081 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5082 PCI_DMA_TODEVICE);
5083
5084 REG_WR(bp, BNX2_HC_COMMAND,
5085 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5086
5087 REG_RD(bp, BNX2_HC_COMMAND);
5088
5089 udelay(5);
5090 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5091
5092 num_pkts = 0;
5093
5094 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5095
5096 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5097 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5098 txbd->tx_bd_mss_nbytes = pkt_size;
5099 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5100
5101 num_pkts++;
5102 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5103 bp->tx_prod_bseq += pkt_size;
5104
5105 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5106 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5107
5108 udelay(100);
5109
5110 REG_WR(bp, BNX2_HC_COMMAND,
5111 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5112
5113 REG_RD(bp, BNX2_HC_COMMAND);
5114
5115 udelay(5);
5116
5117 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5118 dev_kfree_skb(skb);
5119
5120 if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5121 goto loopback_test_done;
5122
5123 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5124 if (rx_idx != rx_start_idx + num_pkts) {
5125 goto loopback_test_done;
5126 }
5127
5128 rx_buf = &bp->rx_buf_ring[rx_start_idx];
5129 rx_skb = rx_buf->skb;
5130
5131 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5132 skb_reserve(rx_skb, bp->rx_offset);
5133
5134 pci_dma_sync_single_for_cpu(bp->pdev,
5135 pci_unmap_addr(rx_buf, mapping),
5136 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5137
5138 if (rx_hdr->l2_fhdr_status &
5139 (L2_FHDR_ERRORS_BAD_CRC |
5140 L2_FHDR_ERRORS_PHY_DECODE |
5141 L2_FHDR_ERRORS_ALIGNMENT |
5142 L2_FHDR_ERRORS_TOO_SHORT |
5143 L2_FHDR_ERRORS_GIANT_FRAME)) {
5144
5145 goto loopback_test_done;
5146 }
5147
5148 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5149 goto loopback_test_done;
5150 }
5151
5152 for (i = 14; i < pkt_size; i++) {
5153 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5154 goto loopback_test_done;
5155 }
5156 }
5157
5158 ret = 0;
5159
5160 loopback_test_done:
5161 bp->loopback = 0;
5162 return ret;
5163 }
5164
5165 #define BNX2_MAC_LOOPBACK_FAILED 1
5166 #define BNX2_PHY_LOOPBACK_FAILED 2
5167 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5168 BNX2_PHY_LOOPBACK_FAILED)
5169
5170 static int
5171 bnx2_test_loopback(struct bnx2 *bp)
5172 {
5173 int rc = 0;
5174
5175 if (!netif_running(bp->dev))
5176 return BNX2_LOOPBACK_FAILED;
5177
5178 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5179 spin_lock_bh(&bp->phy_lock);
5180 bnx2_init_phy(bp);
5181 spin_unlock_bh(&bp->phy_lock);
5182 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5183 rc |= BNX2_MAC_LOOPBACK_FAILED;
5184 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5185 rc |= BNX2_PHY_LOOPBACK_FAILED;
5186 return rc;
5187 }
5188
5189 #define NVRAM_SIZE 0x200
5190 #define CRC32_RESIDUAL 0xdebb20e3
5191
5192 static int
5193 bnx2_test_nvram(struct bnx2 *bp)
5194 {
5195 u32 buf[NVRAM_SIZE / 4];
5196 u8 *data = (u8 *) buf;
5197 int rc = 0;
5198 u32 magic, csum;
5199
5200 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5201 goto test_nvram_done;
5202
5203 magic = be32_to_cpu(buf[0]);
5204 if (magic != 0x669955aa) {
5205 rc = -ENODEV;
5206 goto test_nvram_done;
5207 }
5208
5209 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5210 goto test_nvram_done;
5211
5212 csum = ether_crc_le(0x100, data);
5213 if (csum != CRC32_RESIDUAL) {
5214 rc = -ENODEV;
5215 goto test_nvram_done;
5216 }
5217
5218 csum = ether_crc_le(0x100, data + 0x100);
5219 if (csum != CRC32_RESIDUAL) {
5220 rc = -ENODEV;
5221 }
5222
5223 test_nvram_done:
5224 return rc;
5225 }
5226
5227 static int
5228 bnx2_test_link(struct bnx2 *bp)
5229 {
5230 u32 bmsr;
5231
5232 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5233 if (bp->link_up)
5234 return 0;
5235 return -ENODEV;
5236 }
5237 spin_lock_bh(&bp->phy_lock);
5238 bnx2_enable_bmsr1(bp);
5239 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5240 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5241 bnx2_disable_bmsr1(bp);
5242 spin_unlock_bh(&bp->phy_lock);
5243
5244 if (bmsr & BMSR_LSTATUS) {
5245 return 0;
5246 }
5247 return -ENODEV;
5248 }
5249
5250 static int
5251 bnx2_test_intr(struct bnx2 *bp)
5252 {
5253 int i;
5254 u16 status_idx;
5255
5256 if (!netif_running(bp->dev))
5257 return -ENODEV;
5258
5259 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5260
5261 /* This register is not touched during run-time. */
5262 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5263 REG_RD(bp, BNX2_HC_COMMAND);
5264
5265 for (i = 0; i < 10; i++) {
5266 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5267 status_idx) {
5268
5269 break;
5270 }
5271
5272 msleep_interruptible(10);
5273 }
5274 if (i < 10)
5275 return 0;
5276
5277 return -ENODEV;
5278 }
5279
5280 static void
5281 bnx2_5706_serdes_timer(struct bnx2 *bp)
5282 {
5283 spin_lock(&bp->phy_lock);
5284 if (bp->serdes_an_pending)
5285 bp->serdes_an_pending--;
5286 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5287 u32 bmcr;
5288
5289 bp->current_interval = bp->timer_interval;
5290
5291 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5292
5293 if (bmcr & BMCR_ANENABLE) {
5294 u32 phy1, phy2;
5295
5296 bnx2_write_phy(bp, 0x1c, 0x7c00);
5297 bnx2_read_phy(bp, 0x1c, &phy1);
5298
5299 bnx2_write_phy(bp, 0x17, 0x0f01);
5300 bnx2_read_phy(bp, 0x15, &phy2);
5301 bnx2_write_phy(bp, 0x17, 0x0f01);
5302 bnx2_read_phy(bp, 0x15, &phy2);
5303
5304 if ((phy1 & 0x10) && /* SIGNAL DETECT */
5305 !(phy2 & 0x20)) { /* no CONFIG */
5306
5307 bmcr &= ~BMCR_ANENABLE;
5308 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5309 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5310 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5311 }
5312 }
5313 }
5314 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5315 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5316 u32 phy2;
5317
5318 bnx2_write_phy(bp, 0x17, 0x0f01);
5319 bnx2_read_phy(bp, 0x15, &phy2);
5320 if (phy2 & 0x20) {
5321 u32 bmcr;
5322
5323 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5324 bmcr |= BMCR_ANENABLE;
5325 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5326
5327 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5328 }
5329 } else
5330 bp->current_interval = bp->timer_interval;
5331
5332 spin_unlock(&bp->phy_lock);
5333 }
5334
5335 static void
5336 bnx2_5708_serdes_timer(struct bnx2 *bp)
5337 {
5338 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5339 return;
5340
5341 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5342 bp->serdes_an_pending = 0;
5343 return;
5344 }
5345
5346 spin_lock(&bp->phy_lock);
5347 if (bp->serdes_an_pending)
5348 bp->serdes_an_pending--;
5349 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5350 u32 bmcr;
5351
5352 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5353 if (bmcr & BMCR_ANENABLE) {
5354 bnx2_enable_forced_2g5(bp);
5355 bp->current_interval = SERDES_FORCED_TIMEOUT;
5356 } else {
5357 bnx2_disable_forced_2g5(bp);
5358 bp->serdes_an_pending = 2;
5359 bp->current_interval = bp->timer_interval;
5360 }
5361
5362 } else
5363 bp->current_interval = bp->timer_interval;
5364
5365 spin_unlock(&bp->phy_lock);
5366 }
5367
5368 static void
5369 bnx2_timer(unsigned long data)
5370 {
5371 struct bnx2 *bp = (struct bnx2 *) data;
5372
5373 if (!netif_running(bp->dev))
5374 return;
5375
5376 if (atomic_read(&bp->intr_sem) != 0)
5377 goto bnx2_restart_timer;
5378
5379 bnx2_send_heart_beat(bp);
5380
5381 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5382
5383 /* workaround occasional corrupted counters */
5384 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5385 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5386 BNX2_HC_COMMAND_STATS_NOW);
5387
5388 if (bp->phy_flags & PHY_SERDES_FLAG) {
5389 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5390 bnx2_5706_serdes_timer(bp);
5391 else
5392 bnx2_5708_serdes_timer(bp);
5393 }
5394
5395 bnx2_restart_timer:
5396 mod_timer(&bp->timer, jiffies + bp->current_interval);
5397 }
5398
5399 static int
5400 bnx2_request_irq(struct bnx2 *bp)
5401 {
5402 struct net_device *dev = bp->dev;
5403 unsigned long flags;
5404 struct bnx2_irq *irq;
5405 int rc = 0, i;
5406
5407 if (bp->flags & USING_MSI_OR_MSIX_FLAG)
5408 flags = 0;
5409 else
5410 flags = IRQF_SHARED;
5411
5412 for (i = 0; i < bp->irq_nvecs; i++) {
5413 irq = &bp->irq_tbl[i];
5414 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5415 dev);
5416 if (rc)
5417 break;
5418 irq->requested = 1;
5419 }
5420 return rc;
5421 }
5422
5423 static void
5424 bnx2_free_irq(struct bnx2 *bp)
5425 {
5426 struct net_device *dev = bp->dev;
5427 struct bnx2_irq *irq;
5428 int i;
5429
5430 for (i = 0; i < bp->irq_nvecs; i++) {
5431 irq = &bp->irq_tbl[i];
5432 if (irq->requested)
5433 free_irq(irq->vector, dev);
5434 irq->requested = 0;
5435 }
5436 if (bp->flags & USING_MSI_FLAG)
5437 pci_disable_msi(bp->pdev);
5438 else if (bp->flags & USING_MSIX_FLAG)
5439 pci_disable_msix(bp->pdev);
5440
5441 bp->flags &= ~(USING_MSI_OR_MSIX_FLAG | ONE_SHOT_MSI_FLAG);
5442 }
5443
5444 static void
5445 bnx2_enable_msix(struct bnx2 *bp)
5446 {
5447 int i, rc;
5448 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5449
5450 bnx2_setup_msix_tbl(bp);
5451 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5452 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5453 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5454
5455 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5456 msix_ent[i].entry = i;
5457 msix_ent[i].vector = 0;
5458 }
5459
5460 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5461 if (rc != 0)
5462 return;
5463
5464 bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5465 bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5466
5467 strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5468 strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5469 strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5470 strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5471
5472 bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5473 bp->flags |= USING_MSIX_FLAG | ONE_SHOT_MSI_FLAG;
5474 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5475 bp->irq_tbl[i].vector = msix_ent[i].vector;
5476 }
5477
5478 static void
5479 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5480 {
5481 bp->irq_tbl[0].handler = bnx2_interrupt;
5482 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5483 bp->irq_nvecs = 1;
5484 bp->irq_tbl[0].vector = bp->pdev->irq;
5485
5486 if ((bp->flags & MSIX_CAP_FLAG) && !dis_msi)
5487 bnx2_enable_msix(bp);
5488
5489 if ((bp->flags & MSI_CAP_FLAG) && !dis_msi &&
5490 !(bp->flags & USING_MSIX_FLAG)) {
5491 if (pci_enable_msi(bp->pdev) == 0) {
5492 bp->flags |= USING_MSI_FLAG;
5493 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5494 bp->flags |= ONE_SHOT_MSI_FLAG;
5495 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5496 } else
5497 bp->irq_tbl[0].handler = bnx2_msi;
5498
5499 bp->irq_tbl[0].vector = bp->pdev->irq;
5500 }
5501 }
5502 }
5503
5504 /* Called with rtnl_lock */
5505 static int
5506 bnx2_open(struct net_device *dev)
5507 {
5508 struct bnx2 *bp = netdev_priv(dev);
5509 int rc;
5510
5511 netif_carrier_off(dev);
5512
5513 bnx2_set_power_state(bp, PCI_D0);
5514 bnx2_disable_int(bp);
5515
5516 rc = bnx2_alloc_mem(bp);
5517 if (rc)
5518 return rc;
5519
5520 bnx2_setup_int_mode(bp, disable_msi);
5521 bnx2_napi_enable(bp);
5522 rc = bnx2_request_irq(bp);
5523
5524 if (rc) {
5525 bnx2_napi_disable(bp);
5526 bnx2_free_mem(bp);
5527 return rc;
5528 }
5529
5530 rc = bnx2_init_nic(bp);
5531
5532 if (rc) {
5533 bnx2_napi_disable(bp);
5534 bnx2_free_irq(bp);
5535 bnx2_free_skbs(bp);
5536 bnx2_free_mem(bp);
5537 return rc;
5538 }
5539
5540 mod_timer(&bp->timer, jiffies + bp->current_interval);
5541
5542 atomic_set(&bp->intr_sem, 0);
5543
5544 bnx2_enable_int(bp);
5545
5546 if (bp->flags & USING_MSI_FLAG) {
5547 /* Test MSI to make sure it is working
5548 * If MSI test fails, go back to INTx mode
5549 */
5550 if (bnx2_test_intr(bp) != 0) {
5551 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5552 " using MSI, switching to INTx mode. Please"
5553 " report this failure to the PCI maintainer"
5554 " and include system chipset information.\n",
5555 bp->dev->name);
5556
5557 bnx2_disable_int(bp);
5558 bnx2_free_irq(bp);
5559
5560 bnx2_setup_int_mode(bp, 1);
5561
5562 rc = bnx2_init_nic(bp);
5563
5564 if (!rc)
5565 rc = bnx2_request_irq(bp);
5566
5567 if (rc) {
5568 bnx2_napi_disable(bp);
5569 bnx2_free_skbs(bp);
5570 bnx2_free_mem(bp);
5571 del_timer_sync(&bp->timer);
5572 return rc;
5573 }
5574 bnx2_enable_int(bp);
5575 }
5576 }
5577 if (bp->flags & USING_MSI_FLAG)
5578 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5579 else if (bp->flags & USING_MSIX_FLAG)
5580 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5581
5582 netif_start_queue(dev);
5583
5584 return 0;
5585 }
5586
5587 static void
5588 bnx2_reset_task(struct work_struct *work)
5589 {
5590 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5591
5592 if (!netif_running(bp->dev))
5593 return;
5594
5595 bp->in_reset_task = 1;
5596 bnx2_netif_stop(bp);
5597
5598 bnx2_init_nic(bp);
5599
5600 atomic_set(&bp->intr_sem, 1);
5601 bnx2_netif_start(bp);
5602 bp->in_reset_task = 0;
5603 }
5604
5605 static void
5606 bnx2_tx_timeout(struct net_device *dev)
5607 {
5608 struct bnx2 *bp = netdev_priv(dev);
5609
5610 /* This allows the netif to be shutdown gracefully before resetting */
5611 schedule_work(&bp->reset_task);
5612 }
5613
5614 #ifdef BCM_VLAN
5615 /* Called with rtnl_lock */
5616 static void
5617 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5618 {
5619 struct bnx2 *bp = netdev_priv(dev);
5620
5621 bnx2_netif_stop(bp);
5622
5623 bp->vlgrp = vlgrp;
5624 bnx2_set_rx_mode(dev);
5625
5626 bnx2_netif_start(bp);
5627 }
5628 #endif
5629
5630 /* Called with netif_tx_lock.
5631 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5632 * netif_wake_queue().
5633 */
5634 static int
5635 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5636 {
5637 struct bnx2 *bp = netdev_priv(dev);
5638 dma_addr_t mapping;
5639 struct tx_bd *txbd;
5640 struct sw_bd *tx_buf;
5641 u32 len, vlan_tag_flags, last_frag, mss;
5642 u16 prod, ring_prod;
5643 int i;
5644 struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5645
5646 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5647 (skb_shinfo(skb)->nr_frags + 1))) {
5648 netif_stop_queue(dev);
5649 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5650 dev->name);
5651
5652 return NETDEV_TX_BUSY;
5653 }
5654 len = skb_headlen(skb);
5655 prod = bp->tx_prod;
5656 ring_prod = TX_RING_IDX(prod);
5657
5658 vlan_tag_flags = 0;
5659 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5660 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5661 }
5662
5663 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5664 vlan_tag_flags |=
5665 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5666 }
5667 if ((mss = skb_shinfo(skb)->gso_size)) {
5668 u32 tcp_opt_len, ip_tcp_len;
5669 struct iphdr *iph;
5670
5671 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5672
5673 tcp_opt_len = tcp_optlen(skb);
5674
5675 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5676 u32 tcp_off = skb_transport_offset(skb) -
5677 sizeof(struct ipv6hdr) - ETH_HLEN;
5678
5679 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5680 TX_BD_FLAGS_SW_FLAGS;
5681 if (likely(tcp_off == 0))
5682 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5683 else {
5684 tcp_off >>= 3;
5685 vlan_tag_flags |= ((tcp_off & 0x3) <<
5686 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5687 ((tcp_off & 0x10) <<
5688 TX_BD_FLAGS_TCP6_OFF4_SHL);
5689 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5690 }
5691 } else {
5692 if (skb_header_cloned(skb) &&
5693 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5694 dev_kfree_skb(skb);
5695 return NETDEV_TX_OK;
5696 }
5697
5698 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5699
5700 iph = ip_hdr(skb);
5701 iph->check = 0;
5702 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5703 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5704 iph->daddr, 0,
5705 IPPROTO_TCP,
5706 0);
5707 if (tcp_opt_len || (iph->ihl > 5)) {
5708 vlan_tag_flags |= ((iph->ihl - 5) +
5709 (tcp_opt_len >> 2)) << 8;
5710 }
5711 }
5712 } else
5713 mss = 0;
5714
5715 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5716
5717 tx_buf = &bp->tx_buf_ring[ring_prod];
5718 tx_buf->skb = skb;
5719 pci_unmap_addr_set(tx_buf, mapping, mapping);
5720
5721 txbd = &bp->tx_desc_ring[ring_prod];
5722
5723 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5724 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5725 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5726 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5727
5728 last_frag = skb_shinfo(skb)->nr_frags;
5729
5730 for (i = 0; i < last_frag; i++) {
5731 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5732
5733 prod = NEXT_TX_BD(prod);
5734 ring_prod = TX_RING_IDX(prod);
5735 txbd = &bp->tx_desc_ring[ring_prod];
5736
5737 len = frag->size;
5738 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5739 len, PCI_DMA_TODEVICE);
5740 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5741 mapping, mapping);
5742
5743 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5744 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5745 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5746 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5747
5748 }
5749 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5750
5751 prod = NEXT_TX_BD(prod);
5752 bp->tx_prod_bseq += skb->len;
5753
5754 REG_WR16(bp, bp->tx_bidx_addr, prod);
5755 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5756
5757 mmiowb();
5758
5759 bp->tx_prod = prod;
5760 dev->trans_start = jiffies;
5761
5762 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5763 netif_stop_queue(dev);
5764 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5765 netif_wake_queue(dev);
5766 }
5767
5768 return NETDEV_TX_OK;
5769 }
5770
5771 /* Called with rtnl_lock */
5772 static int
5773 bnx2_close(struct net_device *dev)
5774 {
5775 struct bnx2 *bp = netdev_priv(dev);
5776 u32 reset_code;
5777
5778 /* Calling flush_scheduled_work() may deadlock because
5779 * linkwatch_event() may be on the workqueue and it will try to get
5780 * the rtnl_lock which we are holding.
5781 */
5782 while (bp->in_reset_task)
5783 msleep(1);
5784
5785 bnx2_disable_int_sync(bp);
5786 bnx2_napi_disable(bp);
5787 del_timer_sync(&bp->timer);
5788 if (bp->flags & NO_WOL_FLAG)
5789 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5790 else if (bp->wol)
5791 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5792 else
5793 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5794 bnx2_reset_chip(bp, reset_code);
5795 bnx2_free_irq(bp);
5796 bnx2_free_skbs(bp);
5797 bnx2_free_mem(bp);
5798 bp->link_up = 0;
5799 netif_carrier_off(bp->dev);
5800 bnx2_set_power_state(bp, PCI_D3hot);
5801 return 0;
5802 }
5803
5804 #define GET_NET_STATS64(ctr) \
5805 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5806 (unsigned long) (ctr##_lo)
5807
5808 #define GET_NET_STATS32(ctr) \
5809 (ctr##_lo)
5810
5811 #if (BITS_PER_LONG == 64)
5812 #define GET_NET_STATS GET_NET_STATS64
5813 #else
5814 #define GET_NET_STATS GET_NET_STATS32
5815 #endif
5816
5817 static struct net_device_stats *
5818 bnx2_get_stats(struct net_device *dev)
5819 {
5820 struct bnx2 *bp = netdev_priv(dev);
5821 struct statistics_block *stats_blk = bp->stats_blk;
5822 struct net_device_stats *net_stats = &bp->net_stats;
5823
5824 if (bp->stats_blk == NULL) {
5825 return net_stats;
5826 }
5827 net_stats->rx_packets =
5828 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5829 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5830 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5831
5832 net_stats->tx_packets =
5833 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5834 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5835 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5836
5837 net_stats->rx_bytes =
5838 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5839
5840 net_stats->tx_bytes =
5841 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5842
5843 net_stats->multicast =
5844 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5845
5846 net_stats->collisions =
5847 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5848
5849 net_stats->rx_length_errors =
5850 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5851 stats_blk->stat_EtherStatsOverrsizePkts);
5852
5853 net_stats->rx_over_errors =
5854 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5855
5856 net_stats->rx_frame_errors =
5857 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5858
5859 net_stats->rx_crc_errors =
5860 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5861
5862 net_stats->rx_errors = net_stats->rx_length_errors +
5863 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5864 net_stats->rx_crc_errors;
5865
5866 net_stats->tx_aborted_errors =
5867 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5868 stats_blk->stat_Dot3StatsLateCollisions);
5869
5870 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5871 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5872 net_stats->tx_carrier_errors = 0;
5873 else {
5874 net_stats->tx_carrier_errors =
5875 (unsigned long)
5876 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5877 }
5878
5879 net_stats->tx_errors =
5880 (unsigned long)
5881 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5882 +
5883 net_stats->tx_aborted_errors +
5884 net_stats->tx_carrier_errors;
5885
5886 net_stats->rx_missed_errors =
5887 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5888 stats_blk->stat_FwRxDrop);
5889
5890 return net_stats;
5891 }
5892
5893 /* All ethtool functions called with rtnl_lock */
5894
5895 static int
5896 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5897 {
5898 struct bnx2 *bp = netdev_priv(dev);
5899 int support_serdes = 0, support_copper = 0;
5900
5901 cmd->supported = SUPPORTED_Autoneg;
5902 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5903 support_serdes = 1;
5904 support_copper = 1;
5905 } else if (bp->phy_port == PORT_FIBRE)
5906 support_serdes = 1;
5907 else
5908 support_copper = 1;
5909
5910 if (support_serdes) {
5911 cmd->supported |= SUPPORTED_1000baseT_Full |
5912 SUPPORTED_FIBRE;
5913 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5914 cmd->supported |= SUPPORTED_2500baseX_Full;
5915
5916 }
5917 if (support_copper) {
5918 cmd->supported |= SUPPORTED_10baseT_Half |
5919 SUPPORTED_10baseT_Full |
5920 SUPPORTED_100baseT_Half |
5921 SUPPORTED_100baseT_Full |
5922 SUPPORTED_1000baseT_Full |
5923 SUPPORTED_TP;
5924
5925 }
5926
5927 spin_lock_bh(&bp->phy_lock);
5928 cmd->port = bp->phy_port;
5929 cmd->advertising = bp->advertising;
5930
5931 if (bp->autoneg & AUTONEG_SPEED) {
5932 cmd->autoneg = AUTONEG_ENABLE;
5933 }
5934 else {
5935 cmd->autoneg = AUTONEG_DISABLE;
5936 }
5937
5938 if (netif_carrier_ok(dev)) {
5939 cmd->speed = bp->line_speed;
5940 cmd->duplex = bp->duplex;
5941 }
5942 else {
5943 cmd->speed = -1;
5944 cmd->duplex = -1;
5945 }
5946 spin_unlock_bh(&bp->phy_lock);
5947
5948 cmd->transceiver = XCVR_INTERNAL;
5949 cmd->phy_address = bp->phy_addr;
5950
5951 return 0;
5952 }
5953
5954 static int
5955 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5956 {
5957 struct bnx2 *bp = netdev_priv(dev);
5958 u8 autoneg = bp->autoneg;
5959 u8 req_duplex = bp->req_duplex;
5960 u16 req_line_speed = bp->req_line_speed;
5961 u32 advertising = bp->advertising;
5962 int err = -EINVAL;
5963
5964 spin_lock_bh(&bp->phy_lock);
5965
5966 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5967 goto err_out_unlock;
5968
5969 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5970 goto err_out_unlock;
5971
5972 if (cmd->autoneg == AUTONEG_ENABLE) {
5973 autoneg |= AUTONEG_SPEED;
5974
5975 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5976
5977 /* allow advertising 1 speed */
5978 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5979 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5980 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5981 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5982
5983 if (cmd->port == PORT_FIBRE)
5984 goto err_out_unlock;
5985
5986 advertising = cmd->advertising;
5987
5988 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5989 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5990 (cmd->port == PORT_TP))
5991 goto err_out_unlock;
5992 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5993 advertising = cmd->advertising;
5994 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5995 goto err_out_unlock;
5996 else {
5997 if (cmd->port == PORT_FIBRE)
5998 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5999 else
6000 advertising = ETHTOOL_ALL_COPPER_SPEED;
6001 }
6002 advertising |= ADVERTISED_Autoneg;
6003 }
6004 else {
6005 if (cmd->port == PORT_FIBRE) {
6006 if ((cmd->speed != SPEED_1000 &&
6007 cmd->speed != SPEED_2500) ||
6008 (cmd->duplex != DUPLEX_FULL))
6009 goto err_out_unlock;
6010
6011 if (cmd->speed == SPEED_2500 &&
6012 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
6013 goto err_out_unlock;
6014 }
6015 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6016 goto err_out_unlock;
6017
6018 autoneg &= ~AUTONEG_SPEED;
6019 req_line_speed = cmd->speed;
6020 req_duplex = cmd->duplex;
6021 advertising = 0;
6022 }
6023
6024 bp->autoneg = autoneg;
6025 bp->advertising = advertising;
6026 bp->req_line_speed = req_line_speed;
6027 bp->req_duplex = req_duplex;
6028
6029 err = bnx2_setup_phy(bp, cmd->port);
6030
6031 err_out_unlock:
6032 spin_unlock_bh(&bp->phy_lock);
6033
6034 return err;
6035 }
6036
6037 static void
6038 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6039 {
6040 struct bnx2 *bp = netdev_priv(dev);
6041
6042 strcpy(info->driver, DRV_MODULE_NAME);
6043 strcpy(info->version, DRV_MODULE_VERSION);
6044 strcpy(info->bus_info, pci_name(bp->pdev));
6045 strcpy(info->fw_version, bp->fw_version);
6046 }
6047
6048 #define BNX2_REGDUMP_LEN (32 * 1024)
6049
6050 static int
6051 bnx2_get_regs_len(struct net_device *dev)
6052 {
6053 return BNX2_REGDUMP_LEN;
6054 }
6055
6056 static void
6057 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6058 {
6059 u32 *p = _p, i, offset;
6060 u8 *orig_p = _p;
6061 struct bnx2 *bp = netdev_priv(dev);
6062 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6063 0x0800, 0x0880, 0x0c00, 0x0c10,
6064 0x0c30, 0x0d08, 0x1000, 0x101c,
6065 0x1040, 0x1048, 0x1080, 0x10a4,
6066 0x1400, 0x1490, 0x1498, 0x14f0,
6067 0x1500, 0x155c, 0x1580, 0x15dc,
6068 0x1600, 0x1658, 0x1680, 0x16d8,
6069 0x1800, 0x1820, 0x1840, 0x1854,
6070 0x1880, 0x1894, 0x1900, 0x1984,
6071 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6072 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6073 0x2000, 0x2030, 0x23c0, 0x2400,
6074 0x2800, 0x2820, 0x2830, 0x2850,
6075 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6076 0x3c00, 0x3c94, 0x4000, 0x4010,
6077 0x4080, 0x4090, 0x43c0, 0x4458,
6078 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6079 0x4fc0, 0x5010, 0x53c0, 0x5444,
6080 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6081 0x5fc0, 0x6000, 0x6400, 0x6428,
6082 0x6800, 0x6848, 0x684c, 0x6860,
6083 0x6888, 0x6910, 0x8000 };
6084
6085 regs->version = 0;
6086
6087 memset(p, 0, BNX2_REGDUMP_LEN);
6088
6089 if (!netif_running(bp->dev))
6090 return;
6091
6092 i = 0;
6093 offset = reg_boundaries[0];
6094 p += offset;
6095 while (offset < BNX2_REGDUMP_LEN) {
6096 *p++ = REG_RD(bp, offset);
6097 offset += 4;
6098 if (offset == reg_boundaries[i + 1]) {
6099 offset = reg_boundaries[i + 2];
6100 p = (u32 *) (orig_p + offset);
6101 i += 2;
6102 }
6103 }
6104 }
6105
6106 static void
6107 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6108 {
6109 struct bnx2 *bp = netdev_priv(dev);
6110
6111 if (bp->flags & NO_WOL_FLAG) {
6112 wol->supported = 0;
6113 wol->wolopts = 0;
6114 }
6115 else {
6116 wol->supported = WAKE_MAGIC;
6117 if (bp->wol)
6118 wol->wolopts = WAKE_MAGIC;
6119 else
6120 wol->wolopts = 0;
6121 }
6122 memset(&wol->sopass, 0, sizeof(wol->sopass));
6123 }
6124
6125 static int
6126 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6127 {
6128 struct bnx2 *bp = netdev_priv(dev);
6129
6130 if (wol->wolopts & ~WAKE_MAGIC)
6131 return -EINVAL;
6132
6133 if (wol->wolopts & WAKE_MAGIC) {
6134 if (bp->flags & NO_WOL_FLAG)
6135 return -EINVAL;
6136
6137 bp->wol = 1;
6138 }
6139 else {
6140 bp->wol = 0;
6141 }
6142 return 0;
6143 }
6144
6145 static int
6146 bnx2_nway_reset(struct net_device *dev)
6147 {
6148 struct bnx2 *bp = netdev_priv(dev);
6149 u32 bmcr;
6150
6151 if (!(bp->autoneg & AUTONEG_SPEED)) {
6152 return -EINVAL;
6153 }
6154
6155 spin_lock_bh(&bp->phy_lock);
6156
6157 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
6158 int rc;
6159
6160 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6161 spin_unlock_bh(&bp->phy_lock);
6162 return rc;
6163 }
6164
6165 /* Force a link down visible on the other side */
6166 if (bp->phy_flags & PHY_SERDES_FLAG) {
6167 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6168 spin_unlock_bh(&bp->phy_lock);
6169
6170 msleep(20);
6171
6172 spin_lock_bh(&bp->phy_lock);
6173
6174 bp->current_interval = SERDES_AN_TIMEOUT;
6175 bp->serdes_an_pending = 1;
6176 mod_timer(&bp->timer, jiffies + bp->current_interval);
6177 }
6178
6179 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6180 bmcr &= ~BMCR_LOOPBACK;
6181 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6182
6183 spin_unlock_bh(&bp->phy_lock);
6184
6185 return 0;
6186 }
6187
6188 static int
6189 bnx2_get_eeprom_len(struct net_device *dev)
6190 {
6191 struct bnx2 *bp = netdev_priv(dev);
6192
6193 if (bp->flash_info == NULL)
6194 return 0;
6195
6196 return (int) bp->flash_size;
6197 }
6198
6199 static int
6200 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6201 u8 *eebuf)
6202 {
6203 struct bnx2 *bp = netdev_priv(dev);
6204 int rc;
6205
6206 /* parameters already validated in ethtool_get_eeprom */
6207
6208 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6209
6210 return rc;
6211 }
6212
6213 static int
6214 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6215 u8 *eebuf)
6216 {
6217 struct bnx2 *bp = netdev_priv(dev);
6218 int rc;
6219
6220 /* parameters already validated in ethtool_set_eeprom */
6221
6222 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6223
6224 return rc;
6225 }
6226
6227 static int
6228 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6229 {
6230 struct bnx2 *bp = netdev_priv(dev);
6231
6232 memset(coal, 0, sizeof(struct ethtool_coalesce));
6233
6234 coal->rx_coalesce_usecs = bp->rx_ticks;
6235 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6236 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6237 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6238
6239 coal->tx_coalesce_usecs = bp->tx_ticks;
6240 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6241 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6242 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6243
6244 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6245
6246 return 0;
6247 }
6248
6249 static int
6250 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6251 {
6252 struct bnx2 *bp = netdev_priv(dev);
6253
6254 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6255 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6256
6257 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6258 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6259
6260 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6261 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6262
6263 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6264 if (bp->rx_quick_cons_trip_int > 0xff)
6265 bp->rx_quick_cons_trip_int = 0xff;
6266
6267 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6268 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6269
6270 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6271 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6272
6273 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6274 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6275
6276 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6277 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6278 0xff;
6279
6280 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6281 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6282 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6283 bp->stats_ticks = USEC_PER_SEC;
6284 }
6285 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6286 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6287 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6288
6289 if (netif_running(bp->dev)) {
6290 bnx2_netif_stop(bp);
6291 bnx2_init_nic(bp);
6292 bnx2_netif_start(bp);
6293 }
6294
6295 return 0;
6296 }
6297
6298 static void
6299 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6300 {
6301 struct bnx2 *bp = netdev_priv(dev);
6302
6303 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6304 ering->rx_mini_max_pending = 0;
6305 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6306
6307 ering->rx_pending = bp->rx_ring_size;
6308 ering->rx_mini_pending = 0;
6309 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6310
6311 ering->tx_max_pending = MAX_TX_DESC_CNT;
6312 ering->tx_pending = bp->tx_ring_size;
6313 }
6314
6315 static int
6316 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6317 {
6318 if (netif_running(bp->dev)) {
6319 bnx2_netif_stop(bp);
6320 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6321 bnx2_free_skbs(bp);
6322 bnx2_free_mem(bp);
6323 }
6324
6325 bnx2_set_rx_ring_size(bp, rx);
6326 bp->tx_ring_size = tx;
6327
6328 if (netif_running(bp->dev)) {
6329 int rc;
6330
6331 rc = bnx2_alloc_mem(bp);
6332 if (rc)
6333 return rc;
6334 bnx2_init_nic(bp);
6335 bnx2_netif_start(bp);
6336 }
6337 return 0;
6338 }
6339
6340 static int
6341 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6342 {
6343 struct bnx2 *bp = netdev_priv(dev);
6344 int rc;
6345
6346 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6347 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6348 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6349
6350 return -EINVAL;
6351 }
6352 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6353 return rc;
6354 }
6355
6356 static void
6357 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6358 {
6359 struct bnx2 *bp = netdev_priv(dev);
6360
6361 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6362 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6363 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6364 }
6365
6366 static int
6367 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6368 {
6369 struct bnx2 *bp = netdev_priv(dev);
6370
6371 bp->req_flow_ctrl = 0;
6372 if (epause->rx_pause)
6373 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6374 if (epause->tx_pause)
6375 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6376
6377 if (epause->autoneg) {
6378 bp->autoneg |= AUTONEG_FLOW_CTRL;
6379 }
6380 else {
6381 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6382 }
6383
6384 spin_lock_bh(&bp->phy_lock);
6385
6386 bnx2_setup_phy(bp, bp->phy_port);
6387
6388 spin_unlock_bh(&bp->phy_lock);
6389
6390 return 0;
6391 }
6392
6393 static u32
6394 bnx2_get_rx_csum(struct net_device *dev)
6395 {
6396 struct bnx2 *bp = netdev_priv(dev);
6397
6398 return bp->rx_csum;
6399 }
6400
6401 static int
6402 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6403 {
6404 struct bnx2 *bp = netdev_priv(dev);
6405
6406 bp->rx_csum = data;
6407 return 0;
6408 }
6409
6410 static int
6411 bnx2_set_tso(struct net_device *dev, u32 data)
6412 {
6413 struct bnx2 *bp = netdev_priv(dev);
6414
6415 if (data) {
6416 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6417 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6418 dev->features |= NETIF_F_TSO6;
6419 } else
6420 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6421 NETIF_F_TSO_ECN);
6422 return 0;
6423 }
6424
6425 #define BNX2_NUM_STATS 46
6426
6427 static struct {
6428 char string[ETH_GSTRING_LEN];
6429 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6430 { "rx_bytes" },
6431 { "rx_error_bytes" },
6432 { "tx_bytes" },
6433 { "tx_error_bytes" },
6434 { "rx_ucast_packets" },
6435 { "rx_mcast_packets" },
6436 { "rx_bcast_packets" },
6437 { "tx_ucast_packets" },
6438 { "tx_mcast_packets" },
6439 { "tx_bcast_packets" },
6440 { "tx_mac_errors" },
6441 { "tx_carrier_errors" },
6442 { "rx_crc_errors" },
6443 { "rx_align_errors" },
6444 { "tx_single_collisions" },
6445 { "tx_multi_collisions" },
6446 { "tx_deferred" },
6447 { "tx_excess_collisions" },
6448 { "tx_late_collisions" },
6449 { "tx_total_collisions" },
6450 { "rx_fragments" },
6451 { "rx_jabbers" },
6452 { "rx_undersize_packets" },
6453 { "rx_oversize_packets" },
6454 { "rx_64_byte_packets" },
6455 { "rx_65_to_127_byte_packets" },
6456 { "rx_128_to_255_byte_packets" },
6457 { "rx_256_to_511_byte_packets" },
6458 { "rx_512_to_1023_byte_packets" },
6459 { "rx_1024_to_1522_byte_packets" },
6460 { "rx_1523_to_9022_byte_packets" },
6461 { "tx_64_byte_packets" },
6462 { "tx_65_to_127_byte_packets" },
6463 { "tx_128_to_255_byte_packets" },
6464 { "tx_256_to_511_byte_packets" },
6465 { "tx_512_to_1023_byte_packets" },
6466 { "tx_1024_to_1522_byte_packets" },
6467 { "tx_1523_to_9022_byte_packets" },
6468 { "rx_xon_frames" },
6469 { "rx_xoff_frames" },
6470 { "tx_xon_frames" },
6471 { "tx_xoff_frames" },
6472 { "rx_mac_ctrl_frames" },
6473 { "rx_filtered_packets" },
6474 { "rx_discards" },
6475 { "rx_fw_discards" },
6476 };
6477
6478 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6479
6480 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6481 STATS_OFFSET32(stat_IfHCInOctets_hi),
6482 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6483 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6484 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6485 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6486 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6487 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6488 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6489 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6490 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6491 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6492 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6493 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6494 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6495 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6496 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6497 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6498 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6499 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6500 STATS_OFFSET32(stat_EtherStatsCollisions),
6501 STATS_OFFSET32(stat_EtherStatsFragments),
6502 STATS_OFFSET32(stat_EtherStatsJabbers),
6503 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6504 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6505 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6506 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6507 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6508 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6509 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6510 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6511 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6512 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6513 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6514 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6515 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6516 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6517 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6518 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6519 STATS_OFFSET32(stat_XonPauseFramesReceived),
6520 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6521 STATS_OFFSET32(stat_OutXonSent),
6522 STATS_OFFSET32(stat_OutXoffSent),
6523 STATS_OFFSET32(stat_MacControlFramesReceived),
6524 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6525 STATS_OFFSET32(stat_IfInMBUFDiscards),
6526 STATS_OFFSET32(stat_FwRxDrop),
6527 };
6528
6529 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6530 * skipped because of errata.
6531 */
6532 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6533 8,0,8,8,8,8,8,8,8,8,
6534 4,0,4,4,4,4,4,4,4,4,
6535 4,4,4,4,4,4,4,4,4,4,
6536 4,4,4,4,4,4,4,4,4,4,
6537 4,4,4,4,4,4,
6538 };
6539
6540 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6541 8,0,8,8,8,8,8,8,8,8,
6542 4,4,4,4,4,4,4,4,4,4,
6543 4,4,4,4,4,4,4,4,4,4,
6544 4,4,4,4,4,4,4,4,4,4,
6545 4,4,4,4,4,4,
6546 };
6547
6548 #define BNX2_NUM_TESTS 6
6549
6550 static struct {
6551 char string[ETH_GSTRING_LEN];
6552 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6553 { "register_test (offline)" },
6554 { "memory_test (offline)" },
6555 { "loopback_test (offline)" },
6556 { "nvram_test (online)" },
6557 { "interrupt_test (online)" },
6558 { "link_test (online)" },
6559 };
6560
6561 static int
6562 bnx2_get_sset_count(struct net_device *dev, int sset)
6563 {
6564 switch (sset) {
6565 case ETH_SS_TEST:
6566 return BNX2_NUM_TESTS;
6567 case ETH_SS_STATS:
6568 return BNX2_NUM_STATS;
6569 default:
6570 return -EOPNOTSUPP;
6571 }
6572 }
6573
6574 static void
6575 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6576 {
6577 struct bnx2 *bp = netdev_priv(dev);
6578
6579 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6580 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6581 int i;
6582
6583 bnx2_netif_stop(bp);
6584 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6585 bnx2_free_skbs(bp);
6586
6587 if (bnx2_test_registers(bp) != 0) {
6588 buf[0] = 1;
6589 etest->flags |= ETH_TEST_FL_FAILED;
6590 }
6591 if (bnx2_test_memory(bp) != 0) {
6592 buf[1] = 1;
6593 etest->flags |= ETH_TEST_FL_FAILED;
6594 }
6595 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6596 etest->flags |= ETH_TEST_FL_FAILED;
6597
6598 if (!netif_running(bp->dev)) {
6599 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6600 }
6601 else {
6602 bnx2_init_nic(bp);
6603 bnx2_netif_start(bp);
6604 }
6605
6606 /* wait for link up */
6607 for (i = 0; i < 7; i++) {
6608 if (bp->link_up)
6609 break;
6610 msleep_interruptible(1000);
6611 }
6612 }
6613
6614 if (bnx2_test_nvram(bp) != 0) {
6615 buf[3] = 1;
6616 etest->flags |= ETH_TEST_FL_FAILED;
6617 }
6618 if (bnx2_test_intr(bp) != 0) {
6619 buf[4] = 1;
6620 etest->flags |= ETH_TEST_FL_FAILED;
6621 }
6622
6623 if (bnx2_test_link(bp) != 0) {
6624 buf[5] = 1;
6625 etest->flags |= ETH_TEST_FL_FAILED;
6626
6627 }
6628 }
6629
6630 static void
6631 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6632 {
6633 switch (stringset) {
6634 case ETH_SS_STATS:
6635 memcpy(buf, bnx2_stats_str_arr,
6636 sizeof(bnx2_stats_str_arr));
6637 break;
6638 case ETH_SS_TEST:
6639 memcpy(buf, bnx2_tests_str_arr,
6640 sizeof(bnx2_tests_str_arr));
6641 break;
6642 }
6643 }
6644
6645 static void
6646 bnx2_get_ethtool_stats(struct net_device *dev,
6647 struct ethtool_stats *stats, u64 *buf)
6648 {
6649 struct bnx2 *bp = netdev_priv(dev);
6650 int i;
6651 u32 *hw_stats = (u32 *) bp->stats_blk;
6652 u8 *stats_len_arr = NULL;
6653
6654 if (hw_stats == NULL) {
6655 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6656 return;
6657 }
6658
6659 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6660 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6661 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6662 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6663 stats_len_arr = bnx2_5706_stats_len_arr;
6664 else
6665 stats_len_arr = bnx2_5708_stats_len_arr;
6666
6667 for (i = 0; i < BNX2_NUM_STATS; i++) {
6668 if (stats_len_arr[i] == 0) {
6669 /* skip this counter */
6670 buf[i] = 0;
6671 continue;
6672 }
6673 if (stats_len_arr[i] == 4) {
6674 /* 4-byte counter */
6675 buf[i] = (u64)
6676 *(hw_stats + bnx2_stats_offset_arr[i]);
6677 continue;
6678 }
6679 /* 8-byte counter */
6680 buf[i] = (((u64) *(hw_stats +
6681 bnx2_stats_offset_arr[i])) << 32) +
6682 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6683 }
6684 }
6685
6686 static int
6687 bnx2_phys_id(struct net_device *dev, u32 data)
6688 {
6689 struct bnx2 *bp = netdev_priv(dev);
6690 int i;
6691 u32 save;
6692
6693 if (data == 0)
6694 data = 2;
6695
6696 save = REG_RD(bp, BNX2_MISC_CFG);
6697 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6698
6699 for (i = 0; i < (data * 2); i++) {
6700 if ((i % 2) == 0) {
6701 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6702 }
6703 else {
6704 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6705 BNX2_EMAC_LED_1000MB_OVERRIDE |
6706 BNX2_EMAC_LED_100MB_OVERRIDE |
6707 BNX2_EMAC_LED_10MB_OVERRIDE |
6708 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6709 BNX2_EMAC_LED_TRAFFIC);
6710 }
6711 msleep_interruptible(500);
6712 if (signal_pending(current))
6713 break;
6714 }
6715 REG_WR(bp, BNX2_EMAC_LED, 0);
6716 REG_WR(bp, BNX2_MISC_CFG, save);
6717 return 0;
6718 }
6719
6720 static int
6721 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6722 {
6723 struct bnx2 *bp = netdev_priv(dev);
6724
6725 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6726 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6727 else
6728 return (ethtool_op_set_tx_csum(dev, data));
6729 }
6730
6731 static const struct ethtool_ops bnx2_ethtool_ops = {
6732 .get_settings = bnx2_get_settings,
6733 .set_settings = bnx2_set_settings,
6734 .get_drvinfo = bnx2_get_drvinfo,
6735 .get_regs_len = bnx2_get_regs_len,
6736 .get_regs = bnx2_get_regs,
6737 .get_wol = bnx2_get_wol,
6738 .set_wol = bnx2_set_wol,
6739 .nway_reset = bnx2_nway_reset,
6740 .get_link = ethtool_op_get_link,
6741 .get_eeprom_len = bnx2_get_eeprom_len,
6742 .get_eeprom = bnx2_get_eeprom,
6743 .set_eeprom = bnx2_set_eeprom,
6744 .get_coalesce = bnx2_get_coalesce,
6745 .set_coalesce = bnx2_set_coalesce,
6746 .get_ringparam = bnx2_get_ringparam,
6747 .set_ringparam = bnx2_set_ringparam,
6748 .get_pauseparam = bnx2_get_pauseparam,
6749 .set_pauseparam = bnx2_set_pauseparam,
6750 .get_rx_csum = bnx2_get_rx_csum,
6751 .set_rx_csum = bnx2_set_rx_csum,
6752 .set_tx_csum = bnx2_set_tx_csum,
6753 .set_sg = ethtool_op_set_sg,
6754 .set_tso = bnx2_set_tso,
6755 .self_test = bnx2_self_test,
6756 .get_strings = bnx2_get_strings,
6757 .phys_id = bnx2_phys_id,
6758 .get_ethtool_stats = bnx2_get_ethtool_stats,
6759 .get_sset_count = bnx2_get_sset_count,
6760 };
6761
6762 /* Called with rtnl_lock */
6763 static int
6764 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6765 {
6766 struct mii_ioctl_data *data = if_mii(ifr);
6767 struct bnx2 *bp = netdev_priv(dev);
6768 int err;
6769
6770 switch(cmd) {
6771 case SIOCGMIIPHY:
6772 data->phy_id = bp->phy_addr;
6773
6774 /* fallthru */
6775 case SIOCGMIIREG: {
6776 u32 mii_regval;
6777
6778 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6779 return -EOPNOTSUPP;
6780
6781 if (!netif_running(dev))
6782 return -EAGAIN;
6783
6784 spin_lock_bh(&bp->phy_lock);
6785 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6786 spin_unlock_bh(&bp->phy_lock);
6787
6788 data->val_out = mii_regval;
6789
6790 return err;
6791 }
6792
6793 case SIOCSMIIREG:
6794 if (!capable(CAP_NET_ADMIN))
6795 return -EPERM;
6796
6797 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6798 return -EOPNOTSUPP;
6799
6800 if (!netif_running(dev))
6801 return -EAGAIN;
6802
6803 spin_lock_bh(&bp->phy_lock);
6804 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6805 spin_unlock_bh(&bp->phy_lock);
6806
6807 return err;
6808
6809 default:
6810 /* do nothing */
6811 break;
6812 }
6813 return -EOPNOTSUPP;
6814 }
6815
6816 /* Called with rtnl_lock */
6817 static int
6818 bnx2_change_mac_addr(struct net_device *dev, void *p)
6819 {
6820 struct sockaddr *addr = p;
6821 struct bnx2 *bp = netdev_priv(dev);
6822
6823 if (!is_valid_ether_addr(addr->sa_data))
6824 return -EINVAL;
6825
6826 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6827 if (netif_running(dev))
6828 bnx2_set_mac_addr(bp);
6829
6830 return 0;
6831 }
6832
6833 /* Called with rtnl_lock */
6834 static int
6835 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6836 {
6837 struct bnx2 *bp = netdev_priv(dev);
6838
6839 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6840 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6841 return -EINVAL;
6842
6843 dev->mtu = new_mtu;
6844 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6845 }
6846
6847 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6848 static void
6849 poll_bnx2(struct net_device *dev)
6850 {
6851 struct bnx2 *bp = netdev_priv(dev);
6852
6853 disable_irq(bp->pdev->irq);
6854 bnx2_interrupt(bp->pdev->irq, dev);
6855 enable_irq(bp->pdev->irq);
6856 }
6857 #endif
6858
6859 static void __devinit
6860 bnx2_get_5709_media(struct bnx2 *bp)
6861 {
6862 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6863 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6864 u32 strap;
6865
6866 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6867 return;
6868 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6869 bp->phy_flags |= PHY_SERDES_FLAG;
6870 return;
6871 }
6872
6873 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6874 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6875 else
6876 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6877
6878 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6879 switch (strap) {
6880 case 0x4:
6881 case 0x5:
6882 case 0x6:
6883 bp->phy_flags |= PHY_SERDES_FLAG;
6884 return;
6885 }
6886 } else {
6887 switch (strap) {
6888 case 0x1:
6889 case 0x2:
6890 case 0x4:
6891 bp->phy_flags |= PHY_SERDES_FLAG;
6892 return;
6893 }
6894 }
6895 }
6896
6897 static void __devinit
6898 bnx2_get_pci_speed(struct bnx2 *bp)
6899 {
6900 u32 reg;
6901
6902 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6903 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6904 u32 clkreg;
6905
6906 bp->flags |= PCIX_FLAG;
6907
6908 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6909
6910 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6911 switch (clkreg) {
6912 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6913 bp->bus_speed_mhz = 133;
6914 break;
6915
6916 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6917 bp->bus_speed_mhz = 100;
6918 break;
6919
6920 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6921 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6922 bp->bus_speed_mhz = 66;
6923 break;
6924
6925 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6926 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6927 bp->bus_speed_mhz = 50;
6928 break;
6929
6930 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6931 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6932 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6933 bp->bus_speed_mhz = 33;
6934 break;
6935 }
6936 }
6937 else {
6938 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6939 bp->bus_speed_mhz = 66;
6940 else
6941 bp->bus_speed_mhz = 33;
6942 }
6943
6944 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6945 bp->flags |= PCI_32BIT_FLAG;
6946
6947 }
6948
6949 static int __devinit
6950 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6951 {
6952 struct bnx2 *bp;
6953 unsigned long mem_len;
6954 int rc, i, j;
6955 u32 reg;
6956 u64 dma_mask, persist_dma_mask;
6957
6958 SET_NETDEV_DEV(dev, &pdev->dev);
6959 bp = netdev_priv(dev);
6960
6961 bp->flags = 0;
6962 bp->phy_flags = 0;
6963
6964 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6965 rc = pci_enable_device(pdev);
6966 if (rc) {
6967 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6968 goto err_out;
6969 }
6970
6971 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6972 dev_err(&pdev->dev,
6973 "Cannot find PCI device base address, aborting.\n");
6974 rc = -ENODEV;
6975 goto err_out_disable;
6976 }
6977
6978 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6979 if (rc) {
6980 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6981 goto err_out_disable;
6982 }
6983
6984 pci_set_master(pdev);
6985
6986 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6987 if (bp->pm_cap == 0) {
6988 dev_err(&pdev->dev,
6989 "Cannot find power management capability, aborting.\n");
6990 rc = -EIO;
6991 goto err_out_release;
6992 }
6993
6994 bp->dev = dev;
6995 bp->pdev = pdev;
6996
6997 spin_lock_init(&bp->phy_lock);
6998 spin_lock_init(&bp->indirect_lock);
6999 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7000
7001 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7002 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7003 dev->mem_end = dev->mem_start + mem_len;
7004 dev->irq = pdev->irq;
7005
7006 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7007
7008 if (!bp->regview) {
7009 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7010 rc = -ENOMEM;
7011 goto err_out_release;
7012 }
7013
7014 /* Configure byte swap and enable write to the reg_window registers.
7015 * Rely on CPU to do target byte swapping on big endian systems
7016 * The chip's target access swapping will not swap all accesses
7017 */
7018 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7019 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7020 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7021
7022 bnx2_set_power_state(bp, PCI_D0);
7023
7024 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7025
7026 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7027 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7028 dev_err(&pdev->dev,
7029 "Cannot find PCIE capability, aborting.\n");
7030 rc = -EIO;
7031 goto err_out_unmap;
7032 }
7033 bp->flags |= PCIE_FLAG;
7034 } else {
7035 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7036 if (bp->pcix_cap == 0) {
7037 dev_err(&pdev->dev,
7038 "Cannot find PCIX capability, aborting.\n");
7039 rc = -EIO;
7040 goto err_out_unmap;
7041 }
7042 }
7043
7044 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7045 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7046 bp->flags |= MSIX_CAP_FLAG;
7047 }
7048
7049 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7050 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7051 bp->flags |= MSI_CAP_FLAG;
7052 }
7053
7054 /* 5708 cannot support DMA addresses > 40-bit. */
7055 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7056 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7057 else
7058 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7059
7060 /* Configure DMA attributes. */
7061 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7062 dev->features |= NETIF_F_HIGHDMA;
7063 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7064 if (rc) {
7065 dev_err(&pdev->dev,
7066 "pci_set_consistent_dma_mask failed, aborting.\n");
7067 goto err_out_unmap;
7068 }
7069 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7070 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7071 goto err_out_unmap;
7072 }
7073
7074 if (!(bp->flags & PCIE_FLAG))
7075 bnx2_get_pci_speed(bp);
7076
7077 /* 5706A0 may falsely detect SERR and PERR. */
7078 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7079 reg = REG_RD(bp, PCI_COMMAND);
7080 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7081 REG_WR(bp, PCI_COMMAND, reg);
7082 }
7083 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7084 !(bp->flags & PCIX_FLAG)) {
7085
7086 dev_err(&pdev->dev,
7087 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7088 goto err_out_unmap;
7089 }
7090
7091 bnx2_init_nvram(bp);
7092
7093 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
7094
7095 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7096 BNX2_SHM_HDR_SIGNATURE_SIG) {
7097 u32 off = PCI_FUNC(pdev->devfn) << 2;
7098
7099 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
7100 } else
7101 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7102
7103 /* Get the permanent MAC address. First we need to make sure the
7104 * firmware is actually running.
7105 */
7106 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
7107
7108 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7109 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7110 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7111 rc = -ENODEV;
7112 goto err_out_unmap;
7113 }
7114
7115 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
7116 for (i = 0, j = 0; i < 3; i++) {
7117 u8 num, k, skip0;
7118
7119 num = (u8) (reg >> (24 - (i * 8)));
7120 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7121 if (num >= k || !skip0 || k == 1) {
7122 bp->fw_version[j++] = (num / k) + '0';
7123 skip0 = 0;
7124 }
7125 }
7126 if (i != 2)
7127 bp->fw_version[j++] = '.';
7128 }
7129 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
7130 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7131 bp->wol = 1;
7132
7133 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7134 bp->flags |= ASF_ENABLE_FLAG;
7135
7136 for (i = 0; i < 30; i++) {
7137 reg = REG_RD_IND(bp, bp->shmem_base +
7138 BNX2_BC_STATE_CONDITION);
7139 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7140 break;
7141 msleep(10);
7142 }
7143 }
7144 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
7145 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7146 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7147 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7148 int i;
7149 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
7150
7151 bp->fw_version[j++] = ' ';
7152 for (i = 0; i < 3; i++) {
7153 reg = REG_RD_IND(bp, addr + i * 4);
7154 reg = swab32(reg);
7155 memcpy(&bp->fw_version[j], &reg, 4);
7156 j += 4;
7157 }
7158 }
7159
7160 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
7161 bp->mac_addr[0] = (u8) (reg >> 8);
7162 bp->mac_addr[1] = (u8) reg;
7163
7164 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
7165 bp->mac_addr[2] = (u8) (reg >> 24);
7166 bp->mac_addr[3] = (u8) (reg >> 16);
7167 bp->mac_addr[4] = (u8) (reg >> 8);
7168 bp->mac_addr[5] = (u8) reg;
7169
7170 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7171
7172 bp->tx_ring_size = MAX_TX_DESC_CNT;
7173 bnx2_set_rx_ring_size(bp, 255);
7174
7175 bp->rx_csum = 1;
7176
7177 bp->tx_quick_cons_trip_int = 20;
7178 bp->tx_quick_cons_trip = 20;
7179 bp->tx_ticks_int = 80;
7180 bp->tx_ticks = 80;
7181
7182 bp->rx_quick_cons_trip_int = 6;
7183 bp->rx_quick_cons_trip = 6;
7184 bp->rx_ticks_int = 18;
7185 bp->rx_ticks = 18;
7186
7187 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7188
7189 bp->timer_interval = HZ;
7190 bp->current_interval = HZ;
7191
7192 bp->phy_addr = 1;
7193
7194 /* Disable WOL support if we are running on a SERDES chip. */
7195 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7196 bnx2_get_5709_media(bp);
7197 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7198 bp->phy_flags |= PHY_SERDES_FLAG;
7199
7200 bp->phy_port = PORT_TP;
7201 if (bp->phy_flags & PHY_SERDES_FLAG) {
7202 bp->phy_port = PORT_FIBRE;
7203 reg = REG_RD_IND(bp, bp->shmem_base +
7204 BNX2_SHARED_HW_CFG_CONFIG);
7205 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7206 bp->flags |= NO_WOL_FLAG;
7207 bp->wol = 0;
7208 }
7209 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
7210 bp->phy_addr = 2;
7211 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7212 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
7213 }
7214 bnx2_init_remote_phy(bp);
7215
7216 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7217 CHIP_NUM(bp) == CHIP_NUM_5708)
7218 bp->phy_flags |= PHY_CRC_FIX_FLAG;
7219 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7220 (CHIP_REV(bp) == CHIP_REV_Ax ||
7221 CHIP_REV(bp) == CHIP_REV_Bx))
7222 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
7223
7224 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7225 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7226 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7227 bp->flags |= NO_WOL_FLAG;
7228 bp->wol = 0;
7229 }
7230
7231 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7232 bp->tx_quick_cons_trip_int =
7233 bp->tx_quick_cons_trip;
7234 bp->tx_ticks_int = bp->tx_ticks;
7235 bp->rx_quick_cons_trip_int =
7236 bp->rx_quick_cons_trip;
7237 bp->rx_ticks_int = bp->rx_ticks;
7238 bp->comp_prod_trip_int = bp->comp_prod_trip;
7239 bp->com_ticks_int = bp->com_ticks;
7240 bp->cmd_ticks_int = bp->cmd_ticks;
7241 }
7242
7243 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7244 *
7245 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7246 * with byte enables disabled on the unused 32-bit word. This is legal
7247 * but causes problems on the AMD 8132 which will eventually stop
7248 * responding after a while.
7249 *
7250 * AMD believes this incompatibility is unique to the 5706, and
7251 * prefers to locally disable MSI rather than globally disabling it.
7252 */
7253 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7254 struct pci_dev *amd_8132 = NULL;
7255
7256 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7257 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7258 amd_8132))) {
7259
7260 if (amd_8132->revision >= 0x10 &&
7261 amd_8132->revision <= 0x13) {
7262 disable_msi = 1;
7263 pci_dev_put(amd_8132);
7264 break;
7265 }
7266 }
7267 }
7268
7269 bnx2_set_default_link(bp);
7270 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7271
7272 init_timer(&bp->timer);
7273 bp->timer.expires = RUN_AT(bp->timer_interval);
7274 bp->timer.data = (unsigned long) bp;
7275 bp->timer.function = bnx2_timer;
7276
7277 return 0;
7278
7279 err_out_unmap:
7280 if (bp->regview) {
7281 iounmap(bp->regview);
7282 bp->regview = NULL;
7283 }
7284
7285 err_out_release:
7286 pci_release_regions(pdev);
7287
7288 err_out_disable:
7289 pci_disable_device(pdev);
7290 pci_set_drvdata(pdev, NULL);
7291
7292 err_out:
7293 return rc;
7294 }
7295
7296 static char * __devinit
7297 bnx2_bus_string(struct bnx2 *bp, char *str)
7298 {
7299 char *s = str;
7300
7301 if (bp->flags & PCIE_FLAG) {
7302 s += sprintf(s, "PCI Express");
7303 } else {
7304 s += sprintf(s, "PCI");
7305 if (bp->flags & PCIX_FLAG)
7306 s += sprintf(s, "-X");
7307 if (bp->flags & PCI_32BIT_FLAG)
7308 s += sprintf(s, " 32-bit");
7309 else
7310 s += sprintf(s, " 64-bit");
7311 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7312 }
7313 return str;
7314 }
7315
7316 static int __devinit
7317 bnx2_init_napi(struct bnx2 *bp)
7318 {
7319 int i;
7320 struct bnx2_napi *bnapi;
7321
7322 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7323 bnapi = &bp->bnx2_napi[i];
7324 bnapi->bp = bp;
7325 }
7326 netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7327 netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7328 64);
7329 }
7330
7331 static int __devinit
7332 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7333 {
7334 static int version_printed = 0;
7335 struct net_device *dev = NULL;
7336 struct bnx2 *bp;
7337 int rc;
7338 char str[40];
7339 DECLARE_MAC_BUF(mac);
7340
7341 if (version_printed++ == 0)
7342 printk(KERN_INFO "%s", version);
7343
7344 /* dev zeroed in init_etherdev */
7345 dev = alloc_etherdev(sizeof(*bp));
7346
7347 if (!dev)
7348 return -ENOMEM;
7349
7350 rc = bnx2_init_board(pdev, dev);
7351 if (rc < 0) {
7352 free_netdev(dev);
7353 return rc;
7354 }
7355
7356 dev->open = bnx2_open;
7357 dev->hard_start_xmit = bnx2_start_xmit;
7358 dev->stop = bnx2_close;
7359 dev->get_stats = bnx2_get_stats;
7360 dev->set_multicast_list = bnx2_set_rx_mode;
7361 dev->do_ioctl = bnx2_ioctl;
7362 dev->set_mac_address = bnx2_change_mac_addr;
7363 dev->change_mtu = bnx2_change_mtu;
7364 dev->tx_timeout = bnx2_tx_timeout;
7365 dev->watchdog_timeo = TX_TIMEOUT;
7366 #ifdef BCM_VLAN
7367 dev->vlan_rx_register = bnx2_vlan_rx_register;
7368 #endif
7369 dev->ethtool_ops = &bnx2_ethtool_ops;
7370
7371 bp = netdev_priv(dev);
7372 bnx2_init_napi(bp);
7373
7374 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7375 dev->poll_controller = poll_bnx2;
7376 #endif
7377
7378 pci_set_drvdata(pdev, dev);
7379
7380 memcpy(dev->dev_addr, bp->mac_addr, 6);
7381 memcpy(dev->perm_addr, bp->mac_addr, 6);
7382 bp->name = board_info[ent->driver_data].name;
7383
7384 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7385 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7386 dev->features |= NETIF_F_IPV6_CSUM;
7387
7388 #ifdef BCM_VLAN
7389 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7390 #endif
7391 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7392 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7393 dev->features |= NETIF_F_TSO6;
7394
7395 if ((rc = register_netdev(dev))) {
7396 dev_err(&pdev->dev, "Cannot register net device\n");
7397 if (bp->regview)
7398 iounmap(bp->regview);
7399 pci_release_regions(pdev);
7400 pci_disable_device(pdev);
7401 pci_set_drvdata(pdev, NULL);
7402 free_netdev(dev);
7403 return rc;
7404 }
7405
7406 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7407 "IRQ %d, node addr %s\n",
7408 dev->name,
7409 bp->name,
7410 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7411 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7412 bnx2_bus_string(bp, str),
7413 dev->base_addr,
7414 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7415
7416 return 0;
7417 }
7418
7419 static void __devexit
7420 bnx2_remove_one(struct pci_dev *pdev)
7421 {
7422 struct net_device *dev = pci_get_drvdata(pdev);
7423 struct bnx2 *bp = netdev_priv(dev);
7424
7425 flush_scheduled_work();
7426
7427 unregister_netdev(dev);
7428
7429 if (bp->regview)
7430 iounmap(bp->regview);
7431
7432 free_netdev(dev);
7433 pci_release_regions(pdev);
7434 pci_disable_device(pdev);
7435 pci_set_drvdata(pdev, NULL);
7436 }
7437
7438 static int
7439 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7440 {
7441 struct net_device *dev = pci_get_drvdata(pdev);
7442 struct bnx2 *bp = netdev_priv(dev);
7443 u32 reset_code;
7444
7445 /* PCI register 4 needs to be saved whether netif_running() or not.
7446 * MSI address and data need to be saved if using MSI and
7447 * netif_running().
7448 */
7449 pci_save_state(pdev);
7450 if (!netif_running(dev))
7451 return 0;
7452
7453 flush_scheduled_work();
7454 bnx2_netif_stop(bp);
7455 netif_device_detach(dev);
7456 del_timer_sync(&bp->timer);
7457 if (bp->flags & NO_WOL_FLAG)
7458 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7459 else if (bp->wol)
7460 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7461 else
7462 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7463 bnx2_reset_chip(bp, reset_code);
7464 bnx2_free_skbs(bp);
7465 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7466 return 0;
7467 }
7468
7469 static int
7470 bnx2_resume(struct pci_dev *pdev)
7471 {
7472 struct net_device *dev = pci_get_drvdata(pdev);
7473 struct bnx2 *bp = netdev_priv(dev);
7474
7475 pci_restore_state(pdev);
7476 if (!netif_running(dev))
7477 return 0;
7478
7479 bnx2_set_power_state(bp, PCI_D0);
7480 netif_device_attach(dev);
7481 bnx2_init_nic(bp);
7482 bnx2_netif_start(bp);
7483 return 0;
7484 }
7485
7486 static struct pci_driver bnx2_pci_driver = {
7487 .name = DRV_MODULE_NAME,
7488 .id_table = bnx2_pci_tbl,
7489 .probe = bnx2_init_one,
7490 .remove = __devexit_p(bnx2_remove_one),
7491 .suspend = bnx2_suspend,
7492 .resume = bnx2_resume,
7493 };
7494
7495 static int __init bnx2_init(void)
7496 {
7497 return pci_register_driver(&bnx2_pci_driver);
7498 }
7499
7500 static void __exit bnx2_cleanup(void)
7501 {
7502 pci_unregister_driver(&bnx2_pci_driver);
7503 }
7504
7505 module_init(bnx2_init);
7506 module_exit(bnx2_cleanup);
7507
7508
7509