]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/bnx2.c
bnx2: allocate with GFP_KERNEL flag on RX path init
[mirror_ubuntu-eoan-kernel.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
bec92044 3 * Copyright (c) 2004-2010 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
3a9c6a49 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
f2a4f052
MC
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16
17#include <linux/kernel.h>
18#include <linux/timer.h>
19#include <linux/errno.h>
20#include <linux/ioport.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include <linux/interrupt.h>
24#include <linux/pci.h>
25#include <linux/init.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/dma-mapping.h>
1977f032 30#include <linux/bitops.h>
f2a4f052
MC
31#include <asm/io.h>
32#include <asm/irq.h>
33#include <linux/delay.h>
34#include <asm/byteorder.h>
c86a31f4 35#include <asm/page.h>
f2a4f052
MC
36#include <linux/time.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
f2a4f052 39#include <linux/if_vlan.h>
08013fa3 40#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
f2a4f052
MC
41#define BCM_VLAN 1
42#endif
f2a4f052 43#include <net/ip.h>
de081fa5 44#include <net/tcp.h>
f2a4f052 45#include <net/checksum.h>
f2a4f052
MC
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
48#include <linux/prefetch.h>
29b12174 49#include <linux/cache.h>
57579f76 50#include <linux/firmware.h>
706bf240 51#include <linux/log2.h>
f2a4f052 52
4edd473f
MC
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1
55#include "cnic_if.h"
56#endif
b6016b76
MC
57#include "bnx2.h"
58#include "bnx2_fw.h"
b3448b0b 59
b6016b76 60#define DRV_MODULE_NAME "bnx2"
e5a0c1fd
MC
61#define DRV_MODULE_VERSION "2.0.16"
62#define DRV_MODULE_RELDATE "July 2, 2010"
bec92044 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
078b0735 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
a931d294 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw"
bec92044
MC
66#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
b6016b76
MC
68
69#define RUN_AT(x) (jiffies + (x))
70
71/* Time in jiffies before concluding the transmitter is hung. */
72#define TX_TIMEOUT (5*HZ)
73
fefa8645 74static char version[] __devinitdata =
b6016b76
MC
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
453a9c6e 78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
b6016b76
MC
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
57579f76
MC
81MODULE_FIRMWARE(FW_MIPS_FILE_06);
82MODULE_FIRMWARE(FW_RV2P_FILE_06);
83MODULE_FIRMWARE(FW_MIPS_FILE_09);
84MODULE_FIRMWARE(FW_RV2P_FILE_09);
078b0735 85MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
b6016b76
MC
86
87static int disable_msi = 0;
88
89module_param(disable_msi, int, 0);
90MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92typedef enum {
93 BCM5706 = 0,
94 NC370T,
95 NC370I,
96 BCM5706S,
97 NC370F,
5b0c76ad
MC
98 BCM5708,
99 BCM5708S,
bac0dff6 100 BCM5709,
27a005b8 101 BCM5709S,
7bb0a04f 102 BCM5716,
1caacecb 103 BCM5716S,
b6016b76
MC
104} board_t;
105
106/* indexed by board_t, above */
fefa8645 107static struct {
b6016b76
MC
108 char *name;
109} board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
7bb0a04f 119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
1caacecb 120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
b6016b76
MC
121 };
122
7bb0a04f 123static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
b6016b76
MC
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
7bb0a04f
MC
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
1caacecb 144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
1f2435e5 145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
b6016b76
MC
146 { 0, }
147};
148
0ced9d01 149static const struct flash_spec flash_table[] =
b6016b76 150{
e30372c9
MC
151#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 153 /* Slow EEPROM */
37137709 154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 "EEPROM - slow"},
37137709
MC
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 "Entry 0001"},
b6016b76
MC
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
37137709 165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
37137709 171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
37137709
MC
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 0100"},
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
196 /* Fast EEPROM */
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 "EEPROM - fast"},
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1001"},
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1010"},
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1100"},
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1101"},
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
b6016b76
MC
236};
237
0ced9d01 238static const struct flash_spec flash_5709 = {
e30372c9
MC
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
245};
246
b6016b76
MC
247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
4327ba43 249static void bnx2_init_napi(struct bnx2 *bp);
f048fa9c 250static void bnx2_del_napi(struct bnx2 *bp);
4327ba43 251
35e9010b 252static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
e89bbf10 253{
2f8af120 254 u32 diff;
e89bbf10 255
2f8af120 256 smp_mb();
faac9c4b
MC
257
258 /* The ring uses 256 indices for 255 entries, one of them
259 * needs to be skipped.
260 */
35e9010b 261 diff = txr->tx_prod - txr->tx_cons;
faac9c4b
MC
262 if (unlikely(diff >= TX_DESC_CNT)) {
263 diff &= 0xffff;
264 if (diff == TX_DESC_CNT)
265 diff = MAX_TX_DESC_CNT;
266 }
e89bbf10
MC
267 return (bp->tx_ring_size - diff);
268}
269
b6016b76
MC
270static u32
271bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272{
1b8227c4
MC
273 u32 val;
274
275 spin_lock_bh(&bp->indirect_lock);
b6016b76 276 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
277 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
278 spin_unlock_bh(&bp->indirect_lock);
279 return val;
b6016b76
MC
280}
281
282static void
283bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284{
1b8227c4 285 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
286 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 288 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
289}
290
2726d6e1
MC
291static void
292bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293{
294 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
295}
296
297static u32
298bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299{
300 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
301}
302
b6016b76
MC
303static void
304bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305{
306 offset += cid_addr;
1b8227c4 307 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
308 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
309 int i;
310
311 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
312 REG_WR(bp, BNX2_CTX_CTX_CTRL,
313 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314 for (i = 0; i < 5; i++) {
59b47d8a
MC
315 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
316 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317 break;
318 udelay(5);
319 }
320 } else {
321 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
322 REG_WR(bp, BNX2_CTX_DATA, val);
323 }
1b8227c4 324 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
325}
326
4edd473f
MC
327#ifdef BCM_CNIC
328static int
329bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330{
331 struct bnx2 *bp = netdev_priv(dev);
332 struct drv_ctl_io *io = &info->data.io;
333
334 switch (info->cmd) {
335 case DRV_CTL_IO_WR_CMD:
336 bnx2_reg_wr_ind(bp, io->offset, io->data);
337 break;
338 case DRV_CTL_IO_RD_CMD:
339 io->data = bnx2_reg_rd_ind(bp, io->offset);
340 break;
341 case DRV_CTL_CTX_WR_CMD:
342 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343 break;
344 default:
345 return -EINVAL;
346 }
347 return 0;
348}
349
350static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351{
352 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354 int sb_id;
355
356 if (bp->flags & BNX2_FLAG_USING_MSIX) {
357 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358 bnapi->cnic_present = 0;
359 sb_id = bp->irq_nvecs;
360 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361 } else {
362 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363 bnapi->cnic_tag = bnapi->last_status_idx;
364 bnapi->cnic_present = 1;
365 sb_id = 0;
366 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367 }
368
369 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370 cp->irq_arr[0].status_blk = (void *)
371 ((unsigned long) bnapi->status_blk.msi +
372 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373 cp->irq_arr[0].status_blk_num = sb_id;
374 cp->num_irq = 1;
375}
376
377static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378 void *data)
379{
380 struct bnx2 *bp = netdev_priv(dev);
381 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382
383 if (ops == NULL)
384 return -EINVAL;
385
386 if (cp->drv_state & CNIC_DRV_STATE_REGD)
387 return -EBUSY;
388
389 bp->cnic_data = data;
390 rcu_assign_pointer(bp->cnic_ops, ops);
391
392 cp->num_irq = 0;
393 cp->drv_state = CNIC_DRV_STATE_REGD;
394
395 bnx2_setup_cnic_irq_info(bp);
396
397 return 0;
398}
399
400static int bnx2_unregister_cnic(struct net_device *dev)
401{
402 struct bnx2 *bp = netdev_priv(dev);
403 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
404 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
405
c5a88950 406 mutex_lock(&bp->cnic_lock);
4edd473f
MC
407 cp->drv_state = 0;
408 bnapi->cnic_present = 0;
409 rcu_assign_pointer(bp->cnic_ops, NULL);
c5a88950 410 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
411 synchronize_rcu();
412 return 0;
413}
414
415struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
416{
417 struct bnx2 *bp = netdev_priv(dev);
418 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
419
420 cp->drv_owner = THIS_MODULE;
421 cp->chip_id = bp->chip_id;
422 cp->pdev = bp->pdev;
423 cp->io_base = bp->regview;
424 cp->drv_ctl = bnx2_drv_ctl;
425 cp->drv_register_cnic = bnx2_register_cnic;
426 cp->drv_unregister_cnic = bnx2_unregister_cnic;
427
428 return cp;
429}
430EXPORT_SYMBOL(bnx2_cnic_probe);
431
432static void
433bnx2_cnic_stop(struct bnx2 *bp)
434{
435 struct cnic_ops *c_ops;
436 struct cnic_ctl_info info;
437
c5a88950
MC
438 mutex_lock(&bp->cnic_lock);
439 c_ops = bp->cnic_ops;
4edd473f
MC
440 if (c_ops) {
441 info.cmd = CNIC_CTL_STOP_CMD;
442 c_ops->cnic_ctl(bp->cnic_data, &info);
443 }
c5a88950 444 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
445}
446
447static void
448bnx2_cnic_start(struct bnx2 *bp)
449{
450 struct cnic_ops *c_ops;
451 struct cnic_ctl_info info;
452
c5a88950
MC
453 mutex_lock(&bp->cnic_lock);
454 c_ops = bp->cnic_ops;
4edd473f
MC
455 if (c_ops) {
456 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
457 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
458
459 bnapi->cnic_tag = bnapi->last_status_idx;
460 }
461 info.cmd = CNIC_CTL_START_CMD;
462 c_ops->cnic_ctl(bp->cnic_data, &info);
463 }
c5a88950 464 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
465}
466
467#else
468
469static void
470bnx2_cnic_stop(struct bnx2 *bp)
471{
472}
473
474static void
475bnx2_cnic_start(struct bnx2 *bp)
476{
477}
478
479#endif
480
b6016b76
MC
481static int
482bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
483{
484 u32 val1;
485 int i, ret;
486
583c28e5 487 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
488 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
489 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
490
491 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
492 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
493
494 udelay(40);
495 }
496
497 val1 = (bp->phy_addr << 21) | (reg << 16) |
498 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
499 BNX2_EMAC_MDIO_COMM_START_BUSY;
500 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
501
502 for (i = 0; i < 50; i++) {
503 udelay(10);
504
505 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
506 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
507 udelay(5);
508
509 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
510 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
511
512 break;
513 }
514 }
515
516 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
517 *val = 0x0;
518 ret = -EBUSY;
519 }
520 else {
521 *val = val1;
522 ret = 0;
523 }
524
583c28e5 525 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
526 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
527 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
528
529 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
530 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
531
532 udelay(40);
533 }
534
535 return ret;
536}
537
538static int
539bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
540{
541 u32 val1;
542 int i, ret;
543
583c28e5 544 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
545 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
546 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
547
548 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
549 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
550
551 udelay(40);
552 }
553
554 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
555 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
556 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
557 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 558
b6016b76
MC
559 for (i = 0; i < 50; i++) {
560 udelay(10);
561
562 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
563 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
564 udelay(5);
565 break;
566 }
567 }
568
569 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
570 ret = -EBUSY;
571 else
572 ret = 0;
573
583c28e5 574 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
575 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
576 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
577
578 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
579 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
580
581 udelay(40);
582 }
583
584 return ret;
585}
586
587static void
588bnx2_disable_int(struct bnx2 *bp)
589{
b4b36042
MC
590 int i;
591 struct bnx2_napi *bnapi;
592
593 for (i = 0; i < bp->irq_nvecs; i++) {
594 bnapi = &bp->bnx2_napi[i];
595 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
596 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
597 }
b6016b76
MC
598 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
599}
600
601static void
602bnx2_enable_int(struct bnx2 *bp)
603{
b4b36042
MC
604 int i;
605 struct bnx2_napi *bnapi;
35efa7c1 606
b4b36042
MC
607 for (i = 0; i < bp->irq_nvecs; i++) {
608 bnapi = &bp->bnx2_napi[i];
1269a8a6 609
b4b36042
MC
610 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
611 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
612 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
613 bnapi->last_status_idx);
b6016b76 614
b4b36042
MC
615 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
616 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
617 bnapi->last_status_idx);
618 }
bf5295bb 619 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
620}
621
622static void
623bnx2_disable_int_sync(struct bnx2 *bp)
624{
b4b36042
MC
625 int i;
626
b6016b76 627 atomic_inc(&bp->intr_sem);
3767546c
MC
628 if (!netif_running(bp->dev))
629 return;
630
b6016b76 631 bnx2_disable_int(bp);
b4b36042
MC
632 for (i = 0; i < bp->irq_nvecs; i++)
633 synchronize_irq(bp->irq_tbl[i].vector);
b6016b76
MC
634}
635
35efa7c1
MC
636static void
637bnx2_napi_disable(struct bnx2 *bp)
638{
b4b36042
MC
639 int i;
640
641 for (i = 0; i < bp->irq_nvecs; i++)
642 napi_disable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
643}
644
645static void
646bnx2_napi_enable(struct bnx2 *bp)
647{
b4b36042
MC
648 int i;
649
650 for (i = 0; i < bp->irq_nvecs; i++)
651 napi_enable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
652}
653
b6016b76 654static void
212f9934 655bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
b6016b76 656{
212f9934
MC
657 if (stop_cnic)
658 bnx2_cnic_stop(bp);
b6016b76 659 if (netif_running(bp->dev)) {
35efa7c1 660 bnx2_napi_disable(bp);
b6016b76 661 netif_tx_disable(bp->dev);
b6016b76 662 }
b7466560 663 bnx2_disable_int_sync(bp);
a0ba6760 664 netif_carrier_off(bp->dev); /* prevent tx timeout */
b6016b76
MC
665}
666
667static void
212f9934 668bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
b6016b76
MC
669{
670 if (atomic_dec_and_test(&bp->intr_sem)) {
671 if (netif_running(bp->dev)) {
706bf240 672 netif_tx_wake_all_queues(bp->dev);
a0ba6760
MC
673 spin_lock_bh(&bp->phy_lock);
674 if (bp->link_up)
675 netif_carrier_on(bp->dev);
676 spin_unlock_bh(&bp->phy_lock);
35efa7c1 677 bnx2_napi_enable(bp);
b6016b76 678 bnx2_enable_int(bp);
212f9934
MC
679 if (start_cnic)
680 bnx2_cnic_start(bp);
b6016b76
MC
681 }
682 }
683}
684
35e9010b
MC
685static void
686bnx2_free_tx_mem(struct bnx2 *bp)
687{
688 int i;
689
690 for (i = 0; i < bp->num_tx_rings; i++) {
691 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
692 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
693
694 if (txr->tx_desc_ring) {
695 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
696 txr->tx_desc_ring,
697 txr->tx_desc_mapping);
698 txr->tx_desc_ring = NULL;
699 }
700 kfree(txr->tx_buf_ring);
701 txr->tx_buf_ring = NULL;
702 }
703}
704
bb4f98ab
MC
705static void
706bnx2_free_rx_mem(struct bnx2 *bp)
707{
708 int i;
709
710 for (i = 0; i < bp->num_rx_rings; i++) {
711 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
712 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
713 int j;
714
715 for (j = 0; j < bp->rx_max_ring; j++) {
716 if (rxr->rx_desc_ring[j])
717 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
718 rxr->rx_desc_ring[j],
719 rxr->rx_desc_mapping[j]);
720 rxr->rx_desc_ring[j] = NULL;
721 }
25b0b999 722 vfree(rxr->rx_buf_ring);
bb4f98ab
MC
723 rxr->rx_buf_ring = NULL;
724
725 for (j = 0; j < bp->rx_max_pg_ring; j++) {
726 if (rxr->rx_pg_desc_ring[j])
727 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
3298a738
MC
728 rxr->rx_pg_desc_ring[j],
729 rxr->rx_pg_desc_mapping[j]);
730 rxr->rx_pg_desc_ring[j] = NULL;
bb4f98ab 731 }
25b0b999 732 vfree(rxr->rx_pg_ring);
bb4f98ab
MC
733 rxr->rx_pg_ring = NULL;
734 }
735}
736
35e9010b
MC
737static int
738bnx2_alloc_tx_mem(struct bnx2 *bp)
739{
740 int i;
741
742 for (i = 0; i < bp->num_tx_rings; i++) {
743 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
744 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
745
746 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
747 if (txr->tx_buf_ring == NULL)
748 return -ENOMEM;
749
750 txr->tx_desc_ring =
751 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
752 &txr->tx_desc_mapping);
753 if (txr->tx_desc_ring == NULL)
754 return -ENOMEM;
755 }
756 return 0;
757}
758
bb4f98ab
MC
759static int
760bnx2_alloc_rx_mem(struct bnx2 *bp)
761{
762 int i;
763
764 for (i = 0; i < bp->num_rx_rings; i++) {
765 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
766 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
767 int j;
768
769 rxr->rx_buf_ring =
770 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
771 if (rxr->rx_buf_ring == NULL)
772 return -ENOMEM;
773
774 memset(rxr->rx_buf_ring, 0,
775 SW_RXBD_RING_SIZE * bp->rx_max_ring);
776
777 for (j = 0; j < bp->rx_max_ring; j++) {
778 rxr->rx_desc_ring[j] =
779 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
780 &rxr->rx_desc_mapping[j]);
781 if (rxr->rx_desc_ring[j] == NULL)
782 return -ENOMEM;
783
784 }
785
786 if (bp->rx_pg_ring_size) {
787 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
788 bp->rx_max_pg_ring);
789 if (rxr->rx_pg_ring == NULL)
790 return -ENOMEM;
791
792 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
793 bp->rx_max_pg_ring);
794 }
795
796 for (j = 0; j < bp->rx_max_pg_ring; j++) {
797 rxr->rx_pg_desc_ring[j] =
798 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
799 &rxr->rx_pg_desc_mapping[j]);
800 if (rxr->rx_pg_desc_ring[j] == NULL)
801 return -ENOMEM;
802
803 }
804 }
805 return 0;
806}
807
b6016b76
MC
808static void
809bnx2_free_mem(struct bnx2 *bp)
810{
13daffa2 811 int i;
43e80b89 812 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
13daffa2 813
35e9010b 814 bnx2_free_tx_mem(bp);
bb4f98ab 815 bnx2_free_rx_mem(bp);
35e9010b 816
59b47d8a
MC
817 for (i = 0; i < bp->ctx_pages; i++) {
818 if (bp->ctx_blk[i]) {
819 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
820 bp->ctx_blk[i],
821 bp->ctx_blk_mapping[i]);
822 bp->ctx_blk[i] = NULL;
823 }
824 }
43e80b89 825 if (bnapi->status_blk.msi) {
0f31f994 826 pci_free_consistent(bp->pdev, bp->status_stats_size,
43e80b89
MC
827 bnapi->status_blk.msi,
828 bp->status_blk_mapping);
829 bnapi->status_blk.msi = NULL;
0f31f994 830 bp->stats_blk = NULL;
b6016b76 831 }
b6016b76
MC
832}
833
834static int
835bnx2_alloc_mem(struct bnx2 *bp)
836{
35e9010b 837 int i, status_blk_size, err;
43e80b89
MC
838 struct bnx2_napi *bnapi;
839 void *status_blk;
b6016b76 840
0f31f994
MC
841 /* Combine status and statistics blocks into one allocation. */
842 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
f86e82fb 843 if (bp->flags & BNX2_FLAG_MSIX_CAP)
b4b36042
MC
844 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
845 BNX2_SBLK_MSIX_ALIGN_SIZE);
0f31f994
MC
846 bp->status_stats_size = status_blk_size +
847 sizeof(struct statistics_block);
848
43e80b89
MC
849 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
850 &bp->status_blk_mapping);
851 if (status_blk == NULL)
b6016b76
MC
852 goto alloc_mem_err;
853
43e80b89 854 memset(status_blk, 0, bp->status_stats_size);
b6016b76 855
43e80b89
MC
856 bnapi = &bp->bnx2_napi[0];
857 bnapi->status_blk.msi = status_blk;
858 bnapi->hw_tx_cons_ptr =
859 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
860 bnapi->hw_rx_cons_ptr =
861 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
f86e82fb 862 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
b4b36042 863 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
43e80b89
MC
864 struct status_block_msix *sblk;
865
866 bnapi = &bp->bnx2_napi[i];
b4b36042 867
43e80b89
MC
868 sblk = (void *) (status_blk +
869 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
870 bnapi->status_blk.msix = sblk;
871 bnapi->hw_tx_cons_ptr =
872 &sblk->status_tx_quick_consumer_index;
873 bnapi->hw_rx_cons_ptr =
874 &sblk->status_rx_quick_consumer_index;
b4b36042
MC
875 bnapi->int_num = i << 24;
876 }
877 }
35efa7c1 878
43e80b89 879 bp->stats_blk = status_blk + status_blk_size;
b6016b76 880
0f31f994 881 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 882
59b47d8a
MC
883 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
884 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
885 if (bp->ctx_pages == 0)
886 bp->ctx_pages = 1;
887 for (i = 0; i < bp->ctx_pages; i++) {
888 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
889 BCM_PAGE_SIZE,
890 &bp->ctx_blk_mapping[i]);
891 if (bp->ctx_blk[i] == NULL)
892 goto alloc_mem_err;
893 }
894 }
35e9010b 895
bb4f98ab
MC
896 err = bnx2_alloc_rx_mem(bp);
897 if (err)
898 goto alloc_mem_err;
899
35e9010b
MC
900 err = bnx2_alloc_tx_mem(bp);
901 if (err)
902 goto alloc_mem_err;
903
b6016b76
MC
904 return 0;
905
906alloc_mem_err:
907 bnx2_free_mem(bp);
908 return -ENOMEM;
909}
910
e3648b3d
MC
911static void
912bnx2_report_fw_link(struct bnx2 *bp)
913{
914 u32 fw_link_status = 0;
915
583c28e5 916 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
917 return;
918
e3648b3d
MC
919 if (bp->link_up) {
920 u32 bmsr;
921
922 switch (bp->line_speed) {
923 case SPEED_10:
924 if (bp->duplex == DUPLEX_HALF)
925 fw_link_status = BNX2_LINK_STATUS_10HALF;
926 else
927 fw_link_status = BNX2_LINK_STATUS_10FULL;
928 break;
929 case SPEED_100:
930 if (bp->duplex == DUPLEX_HALF)
931 fw_link_status = BNX2_LINK_STATUS_100HALF;
932 else
933 fw_link_status = BNX2_LINK_STATUS_100FULL;
934 break;
935 case SPEED_1000:
936 if (bp->duplex == DUPLEX_HALF)
937 fw_link_status = BNX2_LINK_STATUS_1000HALF;
938 else
939 fw_link_status = BNX2_LINK_STATUS_1000FULL;
940 break;
941 case SPEED_2500:
942 if (bp->duplex == DUPLEX_HALF)
943 fw_link_status = BNX2_LINK_STATUS_2500HALF;
944 else
945 fw_link_status = BNX2_LINK_STATUS_2500FULL;
946 break;
947 }
948
949 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
950
951 if (bp->autoneg) {
952 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
953
ca58c3af
MC
954 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
955 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
956
957 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583c28e5 958 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
e3648b3d
MC
959 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
960 else
961 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
962 }
963 }
964 else
965 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
966
2726d6e1 967 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
e3648b3d
MC
968}
969
9b1084b8
MC
970static char *
971bnx2_xceiver_str(struct bnx2 *bp)
972{
973 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
583c28e5 974 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
9b1084b8
MC
975 "Copper"));
976}
977
b6016b76
MC
978static void
979bnx2_report_link(struct bnx2 *bp)
980{
981 if (bp->link_up) {
982 netif_carrier_on(bp->dev);
3a9c6a49
JP
983 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
984 bnx2_xceiver_str(bp),
985 bp->line_speed,
986 bp->duplex == DUPLEX_FULL ? "full" : "half");
b6016b76
MC
987
988 if (bp->flow_ctrl) {
989 if (bp->flow_ctrl & FLOW_CTRL_RX) {
3a9c6a49 990 pr_cont(", receive ");
b6016b76 991 if (bp->flow_ctrl & FLOW_CTRL_TX)
3a9c6a49 992 pr_cont("& transmit ");
b6016b76
MC
993 }
994 else {
3a9c6a49 995 pr_cont(", transmit ");
b6016b76 996 }
3a9c6a49 997 pr_cont("flow control ON");
b6016b76 998 }
3a9c6a49
JP
999 pr_cont("\n");
1000 } else {
b6016b76 1001 netif_carrier_off(bp->dev);
3a9c6a49
JP
1002 netdev_err(bp->dev, "NIC %s Link is Down\n",
1003 bnx2_xceiver_str(bp));
b6016b76 1004 }
e3648b3d
MC
1005
1006 bnx2_report_fw_link(bp);
b6016b76
MC
1007}
1008
1009static void
1010bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1011{
1012 u32 local_adv, remote_adv;
1013
1014 bp->flow_ctrl = 0;
6aa20a22 1015 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
1016 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1017
1018 if (bp->duplex == DUPLEX_FULL) {
1019 bp->flow_ctrl = bp->req_flow_ctrl;
1020 }
1021 return;
1022 }
1023
1024 if (bp->duplex != DUPLEX_FULL) {
1025 return;
1026 }
1027
583c28e5 1028 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
5b0c76ad
MC
1029 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1030 u32 val;
1031
1032 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1033 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1034 bp->flow_ctrl |= FLOW_CTRL_TX;
1035 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1036 bp->flow_ctrl |= FLOW_CTRL_RX;
1037 return;
1038 }
1039
ca58c3af
MC
1040 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1041 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76 1042
583c28e5 1043 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1044 u32 new_local_adv = 0;
1045 u32 new_remote_adv = 0;
1046
1047 if (local_adv & ADVERTISE_1000XPAUSE)
1048 new_local_adv |= ADVERTISE_PAUSE_CAP;
1049 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1050 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1051 if (remote_adv & ADVERTISE_1000XPAUSE)
1052 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1053 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1054 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1055
1056 local_adv = new_local_adv;
1057 remote_adv = new_remote_adv;
1058 }
1059
1060 /* See Table 28B-3 of 802.3ab-1999 spec. */
1061 if (local_adv & ADVERTISE_PAUSE_CAP) {
1062 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1063 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1064 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1065 }
1066 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1067 bp->flow_ctrl = FLOW_CTRL_RX;
1068 }
1069 }
1070 else {
1071 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1072 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1073 }
1074 }
1075 }
1076 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1077 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1078 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1079
1080 bp->flow_ctrl = FLOW_CTRL_TX;
1081 }
1082 }
1083}
1084
27a005b8
MC
1085static int
1086bnx2_5709s_linkup(struct bnx2 *bp)
1087{
1088 u32 val, speed;
1089
1090 bp->link_up = 1;
1091
1092 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1093 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1094 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1095
1096 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1097 bp->line_speed = bp->req_line_speed;
1098 bp->duplex = bp->req_duplex;
1099 return 0;
1100 }
1101 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1102 switch (speed) {
1103 case MII_BNX2_GP_TOP_AN_SPEED_10:
1104 bp->line_speed = SPEED_10;
1105 break;
1106 case MII_BNX2_GP_TOP_AN_SPEED_100:
1107 bp->line_speed = SPEED_100;
1108 break;
1109 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1110 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1111 bp->line_speed = SPEED_1000;
1112 break;
1113 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1114 bp->line_speed = SPEED_2500;
1115 break;
1116 }
1117 if (val & MII_BNX2_GP_TOP_AN_FD)
1118 bp->duplex = DUPLEX_FULL;
1119 else
1120 bp->duplex = DUPLEX_HALF;
1121 return 0;
1122}
1123
b6016b76 1124static int
5b0c76ad
MC
1125bnx2_5708s_linkup(struct bnx2 *bp)
1126{
1127 u32 val;
1128
1129 bp->link_up = 1;
1130 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1131 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1132 case BCM5708S_1000X_STAT1_SPEED_10:
1133 bp->line_speed = SPEED_10;
1134 break;
1135 case BCM5708S_1000X_STAT1_SPEED_100:
1136 bp->line_speed = SPEED_100;
1137 break;
1138 case BCM5708S_1000X_STAT1_SPEED_1G:
1139 bp->line_speed = SPEED_1000;
1140 break;
1141 case BCM5708S_1000X_STAT1_SPEED_2G5:
1142 bp->line_speed = SPEED_2500;
1143 break;
1144 }
1145 if (val & BCM5708S_1000X_STAT1_FD)
1146 bp->duplex = DUPLEX_FULL;
1147 else
1148 bp->duplex = DUPLEX_HALF;
1149
1150 return 0;
1151}
1152
1153static int
1154bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
1155{
1156 u32 bmcr, local_adv, remote_adv, common;
1157
1158 bp->link_up = 1;
1159 bp->line_speed = SPEED_1000;
1160
ca58c3af 1161 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1162 if (bmcr & BMCR_FULLDPLX) {
1163 bp->duplex = DUPLEX_FULL;
1164 }
1165 else {
1166 bp->duplex = DUPLEX_HALF;
1167 }
1168
1169 if (!(bmcr & BMCR_ANENABLE)) {
1170 return 0;
1171 }
1172
ca58c3af
MC
1173 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1174 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1175
1176 common = local_adv & remote_adv;
1177 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1178
1179 if (common & ADVERTISE_1000XFULL) {
1180 bp->duplex = DUPLEX_FULL;
1181 }
1182 else {
1183 bp->duplex = DUPLEX_HALF;
1184 }
1185 }
1186
1187 return 0;
1188}
1189
1190static int
1191bnx2_copper_linkup(struct bnx2 *bp)
1192{
1193 u32 bmcr;
1194
ca58c3af 1195 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1196 if (bmcr & BMCR_ANENABLE) {
1197 u32 local_adv, remote_adv, common;
1198
1199 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1200 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1201
1202 common = local_adv & (remote_adv >> 2);
1203 if (common & ADVERTISE_1000FULL) {
1204 bp->line_speed = SPEED_1000;
1205 bp->duplex = DUPLEX_FULL;
1206 }
1207 else if (common & ADVERTISE_1000HALF) {
1208 bp->line_speed = SPEED_1000;
1209 bp->duplex = DUPLEX_HALF;
1210 }
1211 else {
ca58c3af
MC
1212 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1213 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1214
1215 common = local_adv & remote_adv;
1216 if (common & ADVERTISE_100FULL) {
1217 bp->line_speed = SPEED_100;
1218 bp->duplex = DUPLEX_FULL;
1219 }
1220 else if (common & ADVERTISE_100HALF) {
1221 bp->line_speed = SPEED_100;
1222 bp->duplex = DUPLEX_HALF;
1223 }
1224 else if (common & ADVERTISE_10FULL) {
1225 bp->line_speed = SPEED_10;
1226 bp->duplex = DUPLEX_FULL;
1227 }
1228 else if (common & ADVERTISE_10HALF) {
1229 bp->line_speed = SPEED_10;
1230 bp->duplex = DUPLEX_HALF;
1231 }
1232 else {
1233 bp->line_speed = 0;
1234 bp->link_up = 0;
1235 }
1236 }
1237 }
1238 else {
1239 if (bmcr & BMCR_SPEED100) {
1240 bp->line_speed = SPEED_100;
1241 }
1242 else {
1243 bp->line_speed = SPEED_10;
1244 }
1245 if (bmcr & BMCR_FULLDPLX) {
1246 bp->duplex = DUPLEX_FULL;
1247 }
1248 else {
1249 bp->duplex = DUPLEX_HALF;
1250 }
1251 }
1252
1253 return 0;
1254}
1255
83e3fc89 1256static void
bb4f98ab 1257bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
83e3fc89 1258{
bb4f98ab 1259 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
83e3fc89
MC
1260
1261 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1262 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1263 val |= 0x02 << 8;
1264
1265 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1266 u32 lo_water, hi_water;
1267
1268 if (bp->flow_ctrl & FLOW_CTRL_TX)
1269 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1270 else
1271 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1272 if (lo_water >= bp->rx_ring_size)
1273 lo_water = 0;
1274
5726026b 1275 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
83e3fc89
MC
1276
1277 if (hi_water <= lo_water)
1278 lo_water = 0;
1279
1280 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1281 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1282
1283 if (hi_water > 0xf)
1284 hi_water = 0xf;
1285 else if (hi_water == 0)
1286 lo_water = 0;
1287 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1288 }
1289 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1290}
1291
bb4f98ab
MC
1292static void
1293bnx2_init_all_rx_contexts(struct bnx2 *bp)
1294{
1295 int i;
1296 u32 cid;
1297
1298 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1299 if (i == 1)
1300 cid = RX_RSS_CID;
1301 bnx2_init_rx_context(bp, cid);
1302 }
1303}
1304
344478db 1305static void
b6016b76
MC
1306bnx2_set_mac_link(struct bnx2 *bp)
1307{
1308 u32 val;
1309
1310 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1311 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1312 (bp->duplex == DUPLEX_HALF)) {
1313 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1314 }
1315
1316 /* Configure the EMAC mode register. */
1317 val = REG_RD(bp, BNX2_EMAC_MODE);
1318
1319 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 1320 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1321 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
1322
1323 if (bp->link_up) {
5b0c76ad
MC
1324 switch (bp->line_speed) {
1325 case SPEED_10:
59b47d8a
MC
1326 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1327 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
1328 break;
1329 }
1330 /* fall through */
1331 case SPEED_100:
1332 val |= BNX2_EMAC_MODE_PORT_MII;
1333 break;
1334 case SPEED_2500:
59b47d8a 1335 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
1336 /* fall through */
1337 case SPEED_1000:
1338 val |= BNX2_EMAC_MODE_PORT_GMII;
1339 break;
1340 }
b6016b76
MC
1341 }
1342 else {
1343 val |= BNX2_EMAC_MODE_PORT_GMII;
1344 }
1345
1346 /* Set the MAC to operate in the appropriate duplex mode. */
1347 if (bp->duplex == DUPLEX_HALF)
1348 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1349 REG_WR(bp, BNX2_EMAC_MODE, val);
1350
1351 /* Enable/disable rx PAUSE. */
1352 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1353
1354 if (bp->flow_ctrl & FLOW_CTRL_RX)
1355 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1356 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1357
1358 /* Enable/disable tx PAUSE. */
1359 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1360 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1361
1362 if (bp->flow_ctrl & FLOW_CTRL_TX)
1363 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1364 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1365
1366 /* Acknowledge the interrupt. */
1367 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1368
83e3fc89 1369 if (CHIP_NUM(bp) == CHIP_NUM_5709)
bb4f98ab 1370 bnx2_init_all_rx_contexts(bp);
b6016b76
MC
1371}
1372
27a005b8
MC
1373static void
1374bnx2_enable_bmsr1(struct bnx2 *bp)
1375{
583c28e5 1376 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1377 (CHIP_NUM(bp) == CHIP_NUM_5709))
1378 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1379 MII_BNX2_BLK_ADDR_GP_STATUS);
1380}
1381
1382static void
1383bnx2_disable_bmsr1(struct bnx2 *bp)
1384{
583c28e5 1385 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1386 (CHIP_NUM(bp) == CHIP_NUM_5709))
1387 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1389}
1390
605a9e20
MC
1391static int
1392bnx2_test_and_enable_2g5(struct bnx2 *bp)
1393{
1394 u32 up1;
1395 int ret = 1;
1396
583c28e5 1397 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1398 return 0;
1399
1400 if (bp->autoneg & AUTONEG_SPEED)
1401 bp->advertising |= ADVERTISED_2500baseX_Full;
1402
27a005b8
MC
1403 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1404 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1405
605a9e20
MC
1406 bnx2_read_phy(bp, bp->mii_up1, &up1);
1407 if (!(up1 & BCM5708S_UP1_2G5)) {
1408 up1 |= BCM5708S_UP1_2G5;
1409 bnx2_write_phy(bp, bp->mii_up1, up1);
1410 ret = 0;
1411 }
1412
27a005b8
MC
1413 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1414 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1415 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1416
605a9e20
MC
1417 return ret;
1418}
1419
1420static int
1421bnx2_test_and_disable_2g5(struct bnx2 *bp)
1422{
1423 u32 up1;
1424 int ret = 0;
1425
583c28e5 1426 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1427 return 0;
1428
27a005b8
MC
1429 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1430 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1431
605a9e20
MC
1432 bnx2_read_phy(bp, bp->mii_up1, &up1);
1433 if (up1 & BCM5708S_UP1_2G5) {
1434 up1 &= ~BCM5708S_UP1_2G5;
1435 bnx2_write_phy(bp, bp->mii_up1, up1);
1436 ret = 1;
1437 }
1438
27a005b8
MC
1439 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1440 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1441 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1442
605a9e20
MC
1443 return ret;
1444}
1445
1446static void
1447bnx2_enable_forced_2g5(struct bnx2 *bp)
1448{
cbd6890c
MC
1449 u32 uninitialized_var(bmcr);
1450 int err;
605a9e20 1451
583c28e5 1452 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1453 return;
1454
27a005b8
MC
1455 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1456 u32 val;
1457
1458 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1459 MII_BNX2_BLK_ADDR_SERDES_DIG);
cbd6890c
MC
1460 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1461 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1462 val |= MII_BNX2_SD_MISC1_FORCE |
1463 MII_BNX2_SD_MISC1_FORCE_2_5G;
1464 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1465 }
27a005b8
MC
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
cbd6890c 1469 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
27a005b8
MC
1470
1471 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
cbd6890c
MC
1472 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1473 if (!err)
1474 bmcr |= BCM5708S_BMCR_FORCE_2500;
c7079857
ED
1475 } else {
1476 return;
605a9e20
MC
1477 }
1478
cbd6890c
MC
1479 if (err)
1480 return;
1481
605a9e20
MC
1482 if (bp->autoneg & AUTONEG_SPEED) {
1483 bmcr &= ~BMCR_ANENABLE;
1484 if (bp->req_duplex == DUPLEX_FULL)
1485 bmcr |= BMCR_FULLDPLX;
1486 }
1487 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1488}
1489
1490static void
1491bnx2_disable_forced_2g5(struct bnx2 *bp)
1492{
cbd6890c
MC
1493 u32 uninitialized_var(bmcr);
1494 int err;
605a9e20 1495
583c28e5 1496 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1497 return;
1498
27a005b8
MC
1499 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1500 u32 val;
1501
1502 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1503 MII_BNX2_BLK_ADDR_SERDES_DIG);
cbd6890c
MC
1504 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1505 val &= ~MII_BNX2_SD_MISC1_FORCE;
1506 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1507 }
27a005b8
MC
1508
1509 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1510 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
cbd6890c 1511 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
27a005b8
MC
1512
1513 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
cbd6890c
MC
1514 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1515 if (!err)
1516 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
c7079857
ED
1517 } else {
1518 return;
605a9e20
MC
1519 }
1520
cbd6890c
MC
1521 if (err)
1522 return;
1523
605a9e20
MC
1524 if (bp->autoneg & AUTONEG_SPEED)
1525 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1526 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1527}
1528
b2fadeae
MC
1529static void
1530bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1531{
1532 u32 val;
1533
1534 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1535 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1536 if (start)
1537 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1538 else
1539 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1540}
1541
b6016b76
MC
1542static int
1543bnx2_set_link(struct bnx2 *bp)
1544{
1545 u32 bmsr;
1546 u8 link_up;
1547
80be4434 1548 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1549 bp->link_up = 1;
1550 return 0;
1551 }
1552
583c28e5 1553 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1554 return 0;
1555
b6016b76
MC
1556 link_up = bp->link_up;
1557
27a005b8
MC
1558 bnx2_enable_bmsr1(bp);
1559 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1560 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1561 bnx2_disable_bmsr1(bp);
b6016b76 1562
583c28e5 1563 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
b6016b76 1564 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
a2724e25 1565 u32 val, an_dbg;
b6016b76 1566
583c28e5 1567 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
b2fadeae 1568 bnx2_5706s_force_link_dn(bp, 0);
583c28e5 1569 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
b2fadeae 1570 }
b6016b76 1571 val = REG_RD(bp, BNX2_EMAC_STATUS);
a2724e25
MC
1572
1573 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1574 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1575 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1576
1577 if ((val & BNX2_EMAC_STATUS_LINK) &&
1578 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
b6016b76
MC
1579 bmsr |= BMSR_LSTATUS;
1580 else
1581 bmsr &= ~BMSR_LSTATUS;
1582 }
1583
1584 if (bmsr & BMSR_LSTATUS) {
1585 bp->link_up = 1;
1586
583c28e5 1587 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad
MC
1588 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1589 bnx2_5706s_linkup(bp);
1590 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1591 bnx2_5708s_linkup(bp);
27a005b8
MC
1592 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1593 bnx2_5709s_linkup(bp);
b6016b76
MC
1594 }
1595 else {
1596 bnx2_copper_linkup(bp);
1597 }
1598 bnx2_resolve_flow_ctrl(bp);
1599 }
1600 else {
583c28e5 1601 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
605a9e20
MC
1602 (bp->autoneg & AUTONEG_SPEED))
1603 bnx2_disable_forced_2g5(bp);
b6016b76 1604
583c28e5 1605 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
b2fadeae
MC
1606 u32 bmcr;
1607
1608 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1609 bmcr |= BMCR_ANENABLE;
1610 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1611
583c28e5 1612 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b2fadeae 1613 }
b6016b76
MC
1614 bp->link_up = 0;
1615 }
1616
1617 if (bp->link_up != link_up) {
1618 bnx2_report_link(bp);
1619 }
1620
1621 bnx2_set_mac_link(bp);
1622
1623 return 0;
1624}
1625
1626static int
1627bnx2_reset_phy(struct bnx2 *bp)
1628{
1629 int i;
1630 u32 reg;
1631
ca58c3af 1632 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1633
1634#define PHY_RESET_MAX_WAIT 100
1635 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1636 udelay(10);
1637
ca58c3af 1638 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1639 if (!(reg & BMCR_RESET)) {
1640 udelay(20);
1641 break;
1642 }
1643 }
1644 if (i == PHY_RESET_MAX_WAIT) {
1645 return -EBUSY;
1646 }
1647 return 0;
1648}
1649
1650static u32
1651bnx2_phy_get_pause_adv(struct bnx2 *bp)
1652{
1653 u32 adv = 0;
1654
1655 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1656 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1657
583c28e5 1658 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1659 adv = ADVERTISE_1000XPAUSE;
1660 }
1661 else {
1662 adv = ADVERTISE_PAUSE_CAP;
1663 }
1664 }
1665 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
583c28e5 1666 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1667 adv = ADVERTISE_1000XPSE_ASYM;
1668 }
1669 else {
1670 adv = ADVERTISE_PAUSE_ASYM;
1671 }
1672 }
1673 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
583c28e5 1674 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1675 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1676 }
1677 else {
1678 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1679 }
1680 }
1681 return adv;
1682}
1683
a2f13890 1684static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
0d8a6571 1685
b6016b76 1686static int
0d8a6571 1687bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1688__releases(&bp->phy_lock)
1689__acquires(&bp->phy_lock)
0d8a6571
MC
1690{
1691 u32 speed_arg = 0, pause_adv;
1692
1693 pause_adv = bnx2_phy_get_pause_adv(bp);
1694
1695 if (bp->autoneg & AUTONEG_SPEED) {
1696 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1697 if (bp->advertising & ADVERTISED_10baseT_Half)
1698 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1699 if (bp->advertising & ADVERTISED_10baseT_Full)
1700 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1701 if (bp->advertising & ADVERTISED_100baseT_Half)
1702 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1703 if (bp->advertising & ADVERTISED_100baseT_Full)
1704 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1705 if (bp->advertising & ADVERTISED_1000baseT_Full)
1706 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1707 if (bp->advertising & ADVERTISED_2500baseX_Full)
1708 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1709 } else {
1710 if (bp->req_line_speed == SPEED_2500)
1711 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1712 else if (bp->req_line_speed == SPEED_1000)
1713 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1714 else if (bp->req_line_speed == SPEED_100) {
1715 if (bp->req_duplex == DUPLEX_FULL)
1716 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1717 else
1718 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1719 } else if (bp->req_line_speed == SPEED_10) {
1720 if (bp->req_duplex == DUPLEX_FULL)
1721 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1722 else
1723 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1724 }
1725 }
1726
1727 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1728 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
c26736ec 1729 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
0d8a6571
MC
1730 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1731
1732 if (port == PORT_TP)
1733 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1734 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1735
2726d6e1 1736 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
0d8a6571
MC
1737
1738 spin_unlock_bh(&bp->phy_lock);
a2f13890 1739 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
0d8a6571
MC
1740 spin_lock_bh(&bp->phy_lock);
1741
1742 return 0;
1743}
1744
1745static int
1746bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1747__releases(&bp->phy_lock)
1748__acquires(&bp->phy_lock)
b6016b76 1749{
605a9e20 1750 u32 adv, bmcr;
b6016b76
MC
1751 u32 new_adv = 0;
1752
583c28e5 1753 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1754 return (bnx2_setup_remote_phy(bp, port));
1755
b6016b76
MC
1756 if (!(bp->autoneg & AUTONEG_SPEED)) {
1757 u32 new_bmcr;
5b0c76ad
MC
1758 int force_link_down = 0;
1759
605a9e20
MC
1760 if (bp->req_line_speed == SPEED_2500) {
1761 if (!bnx2_test_and_enable_2g5(bp))
1762 force_link_down = 1;
1763 } else if (bp->req_line_speed == SPEED_1000) {
1764 if (bnx2_test_and_disable_2g5(bp))
1765 force_link_down = 1;
1766 }
ca58c3af 1767 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1768 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1769
ca58c3af 1770 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1771 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1772 new_bmcr |= BMCR_SPEED1000;
605a9e20 1773
27a005b8
MC
1774 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1775 if (bp->req_line_speed == SPEED_2500)
1776 bnx2_enable_forced_2g5(bp);
1777 else if (bp->req_line_speed == SPEED_1000) {
1778 bnx2_disable_forced_2g5(bp);
1779 new_bmcr &= ~0x2000;
1780 }
1781
1782 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1783 if (bp->req_line_speed == SPEED_2500)
1784 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1785 else
1786 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1787 }
1788
b6016b76 1789 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1790 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1791 new_bmcr |= BMCR_FULLDPLX;
1792 }
1793 else {
5b0c76ad 1794 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1795 new_bmcr &= ~BMCR_FULLDPLX;
1796 }
5b0c76ad 1797 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1798 /* Force a link down visible on the other side */
1799 if (bp->link_up) {
ca58c3af 1800 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1801 ~(ADVERTISE_1000XFULL |
1802 ADVERTISE_1000XHALF));
ca58c3af 1803 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1804 BMCR_ANRESTART | BMCR_ANENABLE);
1805
1806 bp->link_up = 0;
1807 netif_carrier_off(bp->dev);
ca58c3af 1808 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1809 bnx2_report_link(bp);
b6016b76 1810 }
ca58c3af
MC
1811 bnx2_write_phy(bp, bp->mii_adv, adv);
1812 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1813 } else {
1814 bnx2_resolve_flow_ctrl(bp);
1815 bnx2_set_mac_link(bp);
b6016b76
MC
1816 }
1817 return 0;
1818 }
1819
605a9e20 1820 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1821
b6016b76
MC
1822 if (bp->advertising & ADVERTISED_1000baseT_Full)
1823 new_adv |= ADVERTISE_1000XFULL;
1824
1825 new_adv |= bnx2_phy_get_pause_adv(bp);
1826
ca58c3af
MC
1827 bnx2_read_phy(bp, bp->mii_adv, &adv);
1828 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1829
1830 bp->serdes_an_pending = 0;
1831 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1832 /* Force a link down visible on the other side */
1833 if (bp->link_up) {
ca58c3af 1834 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1835 spin_unlock_bh(&bp->phy_lock);
1836 msleep(20);
1837 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1838 }
1839
ca58c3af
MC
1840 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1841 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1842 BMCR_ANENABLE);
f8dd064e
MC
1843 /* Speed up link-up time when the link partner
1844 * does not autonegotiate which is very common
1845 * in blade servers. Some blade servers use
1846 * IPMI for kerboard input and it's important
1847 * to minimize link disruptions. Autoneg. involves
1848 * exchanging base pages plus 3 next pages and
1849 * normally completes in about 120 msec.
1850 */
40105c0b 1851 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
1852 bp->serdes_an_pending = 1;
1853 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1854 } else {
1855 bnx2_resolve_flow_ctrl(bp);
1856 bnx2_set_mac_link(bp);
b6016b76
MC
1857 }
1858
1859 return 0;
1860}
1861
1862#define ETHTOOL_ALL_FIBRE_SPEED \
583c28e5 1863 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
deaf391b
MC
1864 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1865 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1866
1867#define ETHTOOL_ALL_COPPER_SPEED \
1868 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1869 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1870 ADVERTISED_1000baseT_Full)
1871
1872#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1873 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1874
b6016b76
MC
1875#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1876
0d8a6571
MC
1877static void
1878bnx2_set_default_remote_link(struct bnx2 *bp)
1879{
1880 u32 link;
1881
1882 if (bp->phy_port == PORT_TP)
2726d6e1 1883 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
0d8a6571 1884 else
2726d6e1 1885 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
0d8a6571
MC
1886
1887 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1888 bp->req_line_speed = 0;
1889 bp->autoneg |= AUTONEG_SPEED;
1890 bp->advertising = ADVERTISED_Autoneg;
1891 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1892 bp->advertising |= ADVERTISED_10baseT_Half;
1893 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1894 bp->advertising |= ADVERTISED_10baseT_Full;
1895 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1896 bp->advertising |= ADVERTISED_100baseT_Half;
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1898 bp->advertising |= ADVERTISED_100baseT_Full;
1899 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1900 bp->advertising |= ADVERTISED_1000baseT_Full;
1901 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1902 bp->advertising |= ADVERTISED_2500baseX_Full;
1903 } else {
1904 bp->autoneg = 0;
1905 bp->advertising = 0;
1906 bp->req_duplex = DUPLEX_FULL;
1907 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1908 bp->req_line_speed = SPEED_10;
1909 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1910 bp->req_duplex = DUPLEX_HALF;
1911 }
1912 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1913 bp->req_line_speed = SPEED_100;
1914 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1915 bp->req_duplex = DUPLEX_HALF;
1916 }
1917 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1918 bp->req_line_speed = SPEED_1000;
1919 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1920 bp->req_line_speed = SPEED_2500;
1921 }
1922}
1923
deaf391b
MC
1924static void
1925bnx2_set_default_link(struct bnx2 *bp)
1926{
ab59859d
HH
1927 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1928 bnx2_set_default_remote_link(bp);
1929 return;
1930 }
0d8a6571 1931
deaf391b
MC
1932 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1933 bp->req_line_speed = 0;
583c28e5 1934 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
deaf391b
MC
1935 u32 reg;
1936
1937 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1938
2726d6e1 1939 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
deaf391b
MC
1940 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1941 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1942 bp->autoneg = 0;
1943 bp->req_line_speed = bp->line_speed = SPEED_1000;
1944 bp->req_duplex = DUPLEX_FULL;
1945 }
1946 } else
1947 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1948}
1949
df149d70
MC
1950static void
1951bnx2_send_heart_beat(struct bnx2 *bp)
1952{
1953 u32 msg;
1954 u32 addr;
1955
1956 spin_lock(&bp->indirect_lock);
1957 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1958 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1959 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1960 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1961 spin_unlock(&bp->indirect_lock);
1962}
1963
0d8a6571
MC
1964static void
1965bnx2_remote_phy_event(struct bnx2 *bp)
1966{
1967 u32 msg;
1968 u8 link_up = bp->link_up;
1969 u8 old_port;
1970
2726d6e1 1971 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
0d8a6571 1972
df149d70
MC
1973 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1974 bnx2_send_heart_beat(bp);
1975
1976 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1977
0d8a6571
MC
1978 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1979 bp->link_up = 0;
1980 else {
1981 u32 speed;
1982
1983 bp->link_up = 1;
1984 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1985 bp->duplex = DUPLEX_FULL;
1986 switch (speed) {
1987 case BNX2_LINK_STATUS_10HALF:
1988 bp->duplex = DUPLEX_HALF;
1989 case BNX2_LINK_STATUS_10FULL:
1990 bp->line_speed = SPEED_10;
1991 break;
1992 case BNX2_LINK_STATUS_100HALF:
1993 bp->duplex = DUPLEX_HALF;
1994 case BNX2_LINK_STATUS_100BASE_T4:
1995 case BNX2_LINK_STATUS_100FULL:
1996 bp->line_speed = SPEED_100;
1997 break;
1998 case BNX2_LINK_STATUS_1000HALF:
1999 bp->duplex = DUPLEX_HALF;
2000 case BNX2_LINK_STATUS_1000FULL:
2001 bp->line_speed = SPEED_1000;
2002 break;
2003 case BNX2_LINK_STATUS_2500HALF:
2004 bp->duplex = DUPLEX_HALF;
2005 case BNX2_LINK_STATUS_2500FULL:
2006 bp->line_speed = SPEED_2500;
2007 break;
2008 default:
2009 bp->line_speed = 0;
2010 break;
2011 }
2012
0d8a6571
MC
2013 bp->flow_ctrl = 0;
2014 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2015 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2016 if (bp->duplex == DUPLEX_FULL)
2017 bp->flow_ctrl = bp->req_flow_ctrl;
2018 } else {
2019 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2020 bp->flow_ctrl |= FLOW_CTRL_TX;
2021 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2022 bp->flow_ctrl |= FLOW_CTRL_RX;
2023 }
2024
2025 old_port = bp->phy_port;
2026 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2027 bp->phy_port = PORT_FIBRE;
2028 else
2029 bp->phy_port = PORT_TP;
2030
2031 if (old_port != bp->phy_port)
2032 bnx2_set_default_link(bp);
2033
0d8a6571
MC
2034 }
2035 if (bp->link_up != link_up)
2036 bnx2_report_link(bp);
2037
2038 bnx2_set_mac_link(bp);
2039}
2040
2041static int
2042bnx2_set_remote_link(struct bnx2 *bp)
2043{
2044 u32 evt_code;
2045
2726d6e1 2046 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
0d8a6571
MC
2047 switch (evt_code) {
2048 case BNX2_FW_EVT_CODE_LINK_EVENT:
2049 bnx2_remote_phy_event(bp);
2050 break;
2051 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2052 default:
df149d70 2053 bnx2_send_heart_beat(bp);
0d8a6571
MC
2054 break;
2055 }
2056 return 0;
2057}
2058
b6016b76
MC
2059static int
2060bnx2_setup_copper_phy(struct bnx2 *bp)
52d07b1f
HH
2061__releases(&bp->phy_lock)
2062__acquires(&bp->phy_lock)
b6016b76
MC
2063{
2064 u32 bmcr;
2065 u32 new_bmcr;
2066
ca58c3af 2067 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
2068
2069 if (bp->autoneg & AUTONEG_SPEED) {
2070 u32 adv_reg, adv1000_reg;
2071 u32 new_adv_reg = 0;
2072 u32 new_adv1000_reg = 0;
2073
ca58c3af 2074 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
2075 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2076 ADVERTISE_PAUSE_ASYM);
2077
2078 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2079 adv1000_reg &= PHY_ALL_1000_SPEED;
2080
2081 if (bp->advertising & ADVERTISED_10baseT_Half)
2082 new_adv_reg |= ADVERTISE_10HALF;
2083 if (bp->advertising & ADVERTISED_10baseT_Full)
2084 new_adv_reg |= ADVERTISE_10FULL;
2085 if (bp->advertising & ADVERTISED_100baseT_Half)
2086 new_adv_reg |= ADVERTISE_100HALF;
2087 if (bp->advertising & ADVERTISED_100baseT_Full)
2088 new_adv_reg |= ADVERTISE_100FULL;
2089 if (bp->advertising & ADVERTISED_1000baseT_Full)
2090 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 2091
b6016b76
MC
2092 new_adv_reg |= ADVERTISE_CSMA;
2093
2094 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2095
2096 if ((adv1000_reg != new_adv1000_reg) ||
2097 (adv_reg != new_adv_reg) ||
2098 ((bmcr & BMCR_ANENABLE) == 0)) {
2099
ca58c3af 2100 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 2101 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 2102 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
2103 BMCR_ANENABLE);
2104 }
2105 else if (bp->link_up) {
2106 /* Flow ctrl may have changed from auto to forced */
2107 /* or vice-versa. */
2108
2109 bnx2_resolve_flow_ctrl(bp);
2110 bnx2_set_mac_link(bp);
2111 }
2112 return 0;
2113 }
2114
2115 new_bmcr = 0;
2116 if (bp->req_line_speed == SPEED_100) {
2117 new_bmcr |= BMCR_SPEED100;
2118 }
2119 if (bp->req_duplex == DUPLEX_FULL) {
2120 new_bmcr |= BMCR_FULLDPLX;
2121 }
2122 if (new_bmcr != bmcr) {
2123 u32 bmsr;
b6016b76 2124
ca58c3af
MC
2125 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 2127
b6016b76
MC
2128 if (bmsr & BMSR_LSTATUS) {
2129 /* Force link down */
ca58c3af 2130 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
2131 spin_unlock_bh(&bp->phy_lock);
2132 msleep(50);
2133 spin_lock_bh(&bp->phy_lock);
2134
ca58c3af
MC
2135 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2136 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
2137 }
2138
ca58c3af 2139 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
2140
2141 /* Normally, the new speed is setup after the link has
2142 * gone down and up again. In some cases, link will not go
2143 * down so we need to set up the new speed here.
2144 */
2145 if (bmsr & BMSR_LSTATUS) {
2146 bp->line_speed = bp->req_line_speed;
2147 bp->duplex = bp->req_duplex;
2148 bnx2_resolve_flow_ctrl(bp);
2149 bnx2_set_mac_link(bp);
2150 }
27a005b8
MC
2151 } else {
2152 bnx2_resolve_flow_ctrl(bp);
2153 bnx2_set_mac_link(bp);
b6016b76
MC
2154 }
2155 return 0;
2156}
2157
2158static int
0d8a6571 2159bnx2_setup_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
2160__releases(&bp->phy_lock)
2161__acquires(&bp->phy_lock)
b6016b76
MC
2162{
2163 if (bp->loopback == MAC_LOOPBACK)
2164 return 0;
2165
583c28e5 2166 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 2167 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
2168 }
2169 else {
2170 return (bnx2_setup_copper_phy(bp));
2171 }
2172}
2173
27a005b8 2174static int
9a120bc5 2175bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
27a005b8
MC
2176{
2177 u32 val;
2178
2179 bp->mii_bmcr = MII_BMCR + 0x10;
2180 bp->mii_bmsr = MII_BMSR + 0x10;
2181 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2182 bp->mii_adv = MII_ADVERTISE + 0x10;
2183 bp->mii_lpa = MII_LPA + 0x10;
2184 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2185
2186 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2187 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2188
2189 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
9a120bc5
MC
2190 if (reset_phy)
2191 bnx2_reset_phy(bp);
27a005b8
MC
2192
2193 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2194
2195 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2196 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2197 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2198 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2199
2200 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2201 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
583c28e5 2202 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
27a005b8
MC
2203 val |= BCM5708S_UP1_2G5;
2204 else
2205 val &= ~BCM5708S_UP1_2G5;
2206 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2207
2208 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2209 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2210 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2211 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2212
2213 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2214
2215 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2216 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2217 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2218
2219 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2220
2221 return 0;
2222}
2223
b6016b76 2224static int
9a120bc5 2225bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
5b0c76ad
MC
2226{
2227 u32 val;
2228
9a120bc5
MC
2229 if (reset_phy)
2230 bnx2_reset_phy(bp);
27a005b8
MC
2231
2232 bp->mii_up1 = BCM5708S_UP1;
2233
5b0c76ad
MC
2234 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2235 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2236 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2237
2238 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2239 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2240 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2241
2242 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2243 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2244 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2245
583c28e5 2246 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
5b0c76ad
MC
2247 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2248 val |= BCM5708S_UP1_2G5;
2249 bnx2_write_phy(bp, BCM5708S_UP1, val);
2250 }
2251
2252 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
2253 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2254 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
2255 /* increase tx signal amplitude */
2256 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2257 BCM5708S_BLK_ADDR_TX_MISC);
2258 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2259 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2260 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2261 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2262 }
2263
2726d6e1 2264 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
2265 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2266
2267 if (val) {
2268 u32 is_backplane;
2269
2726d6e1 2270 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
5b0c76ad
MC
2271 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2272 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2273 BCM5708S_BLK_ADDR_TX_MISC);
2274 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2275 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2276 BCM5708S_BLK_ADDR_DIG);
2277 }
2278 }
2279 return 0;
2280}
2281
2282static int
9a120bc5 2283bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2284{
9a120bc5
MC
2285 if (reset_phy)
2286 bnx2_reset_phy(bp);
27a005b8 2287
583c28e5 2288 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b6016b76 2289
59b47d8a
MC
2290 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2291 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
2292
2293 if (bp->dev->mtu > 1500) {
2294 u32 val;
2295
2296 /* Set extended packet length bit */
2297 bnx2_write_phy(bp, 0x18, 0x7);
2298 bnx2_read_phy(bp, 0x18, &val);
2299 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2300
2301 bnx2_write_phy(bp, 0x1c, 0x6c00);
2302 bnx2_read_phy(bp, 0x1c, &val);
2303 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2304 }
2305 else {
2306 u32 val;
2307
2308 bnx2_write_phy(bp, 0x18, 0x7);
2309 bnx2_read_phy(bp, 0x18, &val);
2310 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2311
2312 bnx2_write_phy(bp, 0x1c, 0x6c00);
2313 bnx2_read_phy(bp, 0x1c, &val);
2314 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2315 }
2316
2317 return 0;
2318}
2319
2320static int
9a120bc5 2321bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2322{
5b0c76ad
MC
2323 u32 val;
2324
9a120bc5
MC
2325 if (reset_phy)
2326 bnx2_reset_phy(bp);
27a005b8 2327
583c28e5 2328 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
b6016b76
MC
2329 bnx2_write_phy(bp, 0x18, 0x0c00);
2330 bnx2_write_phy(bp, 0x17, 0x000a);
2331 bnx2_write_phy(bp, 0x15, 0x310b);
2332 bnx2_write_phy(bp, 0x17, 0x201f);
2333 bnx2_write_phy(bp, 0x15, 0x9506);
2334 bnx2_write_phy(bp, 0x17, 0x401f);
2335 bnx2_write_phy(bp, 0x15, 0x14e2);
2336 bnx2_write_phy(bp, 0x18, 0x0400);
2337 }
2338
583c28e5 2339 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
b659f44e
MC
2340 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2341 MII_BNX2_DSP_EXPAND_REG | 0x8);
2342 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2343 val &= ~(1 << 8);
2344 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2345 }
2346
b6016b76 2347 if (bp->dev->mtu > 1500) {
b6016b76
MC
2348 /* Set extended packet length bit */
2349 bnx2_write_phy(bp, 0x18, 0x7);
2350 bnx2_read_phy(bp, 0x18, &val);
2351 bnx2_write_phy(bp, 0x18, val | 0x4000);
2352
2353 bnx2_read_phy(bp, 0x10, &val);
2354 bnx2_write_phy(bp, 0x10, val | 0x1);
2355 }
2356 else {
b6016b76
MC
2357 bnx2_write_phy(bp, 0x18, 0x7);
2358 bnx2_read_phy(bp, 0x18, &val);
2359 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2360
2361 bnx2_read_phy(bp, 0x10, &val);
2362 bnx2_write_phy(bp, 0x10, val & ~0x1);
2363 }
2364
5b0c76ad
MC
2365 /* ethernet@wirespeed */
2366 bnx2_write_phy(bp, 0x18, 0x7007);
2367 bnx2_read_phy(bp, 0x18, &val);
2368 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
2369 return 0;
2370}
2371
2372
2373static int
9a120bc5 2374bnx2_init_phy(struct bnx2 *bp, int reset_phy)
52d07b1f
HH
2375__releases(&bp->phy_lock)
2376__acquires(&bp->phy_lock)
b6016b76
MC
2377{
2378 u32 val;
2379 int rc = 0;
2380
583c28e5
MC
2381 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2382 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
b6016b76 2383
ca58c3af
MC
2384 bp->mii_bmcr = MII_BMCR;
2385 bp->mii_bmsr = MII_BMSR;
27a005b8 2386 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
2387 bp->mii_adv = MII_ADVERTISE;
2388 bp->mii_lpa = MII_LPA;
2389
b6016b76
MC
2390 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2391
583c28e5 2392 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
2393 goto setup_phy;
2394
b6016b76
MC
2395 bnx2_read_phy(bp, MII_PHYSID1, &val);
2396 bp->phy_id = val << 16;
2397 bnx2_read_phy(bp, MII_PHYSID2, &val);
2398 bp->phy_id |= val & 0xffff;
2399
583c28e5 2400 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad 2401 if (CHIP_NUM(bp) == CHIP_NUM_5706)
9a120bc5 2402 rc = bnx2_init_5706s_phy(bp, reset_phy);
5b0c76ad 2403 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
9a120bc5 2404 rc = bnx2_init_5708s_phy(bp, reset_phy);
27a005b8 2405 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
9a120bc5 2406 rc = bnx2_init_5709s_phy(bp, reset_phy);
b6016b76
MC
2407 }
2408 else {
9a120bc5 2409 rc = bnx2_init_copper_phy(bp, reset_phy);
b6016b76
MC
2410 }
2411
0d8a6571
MC
2412setup_phy:
2413 if (!rc)
2414 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
2415
2416 return rc;
2417}
2418
2419static int
2420bnx2_set_mac_loopback(struct bnx2 *bp)
2421{
2422 u32 mac_mode;
2423
2424 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2425 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2426 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2427 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2428 bp->link_up = 1;
2429 return 0;
2430}
2431
bc5a0690
MC
2432static int bnx2_test_link(struct bnx2 *);
2433
2434static int
2435bnx2_set_phy_loopback(struct bnx2 *bp)
2436{
2437 u32 mac_mode;
2438 int rc, i;
2439
2440 spin_lock_bh(&bp->phy_lock);
ca58c3af 2441 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2442 BMCR_SPEED1000);
2443 spin_unlock_bh(&bp->phy_lock);
2444 if (rc)
2445 return rc;
2446
2447 for (i = 0; i < 10; i++) {
2448 if (bnx2_test_link(bp) == 0)
2449 break;
80be4434 2450 msleep(100);
bc5a0690
MC
2451 }
2452
2453 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2454 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2455 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2456 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2457
2458 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2459 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2460 bp->link_up = 1;
2461 return 0;
2462}
2463
b6016b76 2464static int
a2f13890 2465bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
b6016b76
MC
2466{
2467 int i;
2468 u32 val;
2469
b6016b76
MC
2470 bp->fw_wr_seq++;
2471 msg_data |= bp->fw_wr_seq;
2472
2726d6e1 2473 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2474
a2f13890
MC
2475 if (!ack)
2476 return 0;
2477
b6016b76 2478 /* wait for an acknowledgement. */
40105c0b 2479 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
b090ae2b 2480 msleep(10);
b6016b76 2481
2726d6e1 2482 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
b6016b76
MC
2483
2484 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2485 break;
2486 }
b090ae2b
MC
2487 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2488 return 0;
b6016b76
MC
2489
2490 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2491 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2492 if (!silent)
3a9c6a49 2493 pr_err("fw sync timeout, reset code = %x\n", msg_data);
b6016b76
MC
2494
2495 msg_data &= ~BNX2_DRV_MSG_CODE;
2496 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2497
2726d6e1 2498 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2499
b6016b76
MC
2500 return -EBUSY;
2501 }
2502
b090ae2b
MC
2503 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2504 return -EIO;
2505
b6016b76
MC
2506 return 0;
2507}
2508
59b47d8a
MC
2509static int
2510bnx2_init_5709_context(struct bnx2 *bp)
2511{
2512 int i, ret = 0;
2513 u32 val;
2514
2515 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2516 val |= (BCM_PAGE_BITS - 8) << 16;
2517 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2518 for (i = 0; i < 10; i++) {
2519 val = REG_RD(bp, BNX2_CTX_COMMAND);
2520 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2521 break;
2522 udelay(2);
2523 }
2524 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2525 return -EBUSY;
2526
59b47d8a
MC
2527 for (i = 0; i < bp->ctx_pages; i++) {
2528 int j;
2529
352f7687
MC
2530 if (bp->ctx_blk[i])
2531 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2532 else
2533 return -ENOMEM;
2534
59b47d8a
MC
2535 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2536 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2537 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2538 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2539 (u64) bp->ctx_blk_mapping[i] >> 32);
2540 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2541 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2542 for (j = 0; j < 10; j++) {
2543
2544 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2545 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2546 break;
2547 udelay(5);
2548 }
2549 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2550 ret = -EBUSY;
2551 break;
2552 }
2553 }
2554 return ret;
2555}
2556
b6016b76
MC
2557static void
2558bnx2_init_context(struct bnx2 *bp)
2559{
2560 u32 vcid;
2561
2562 vcid = 96;
2563 while (vcid) {
2564 u32 vcid_addr, pcid_addr, offset;
7947b20e 2565 int i;
b6016b76
MC
2566
2567 vcid--;
2568
2569 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2570 u32 new_vcid;
2571
2572 vcid_addr = GET_PCID_ADDR(vcid);
2573 if (vcid & 0x8) {
2574 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2575 }
2576 else {
2577 new_vcid = vcid;
2578 }
2579 pcid_addr = GET_PCID_ADDR(new_vcid);
2580 }
2581 else {
2582 vcid_addr = GET_CID_ADDR(vcid);
2583 pcid_addr = vcid_addr;
2584 }
2585
7947b20e
MC
2586 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2587 vcid_addr += (i << PHY_CTX_SHIFT);
2588 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2589
5d5d0015 2590 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2591 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2592
7947b20e
MC
2593 /* Zero out the context. */
2594 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
62a8313c 2595 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
7947b20e 2596 }
b6016b76
MC
2597 }
2598}
2599
2600static int
2601bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2602{
2603 u16 *good_mbuf;
2604 u32 good_mbuf_cnt;
2605 u32 val;
2606
2607 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2608 if (good_mbuf == NULL) {
3a9c6a49 2609 pr_err("Failed to allocate memory in %s\n", __func__);
b6016b76
MC
2610 return -ENOMEM;
2611 }
2612
2613 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2614 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2615
2616 good_mbuf_cnt = 0;
2617
2618 /* Allocate a bunch of mbufs and save the good ones in an array. */
2726d6e1 2619 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76 2620 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2726d6e1
MC
2621 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2622 BNX2_RBUF_COMMAND_ALLOC_REQ);
b6016b76 2623
2726d6e1 2624 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
b6016b76
MC
2625
2626 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2627
2628 /* The addresses with Bit 9 set are bad memory blocks. */
2629 if (!(val & (1 << 9))) {
2630 good_mbuf[good_mbuf_cnt] = (u16) val;
2631 good_mbuf_cnt++;
2632 }
2633
2726d6e1 2634 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76
MC
2635 }
2636
2637 /* Free the good ones back to the mbuf pool thus discarding
2638 * all the bad ones. */
2639 while (good_mbuf_cnt) {
2640 good_mbuf_cnt--;
2641
2642 val = good_mbuf[good_mbuf_cnt];
2643 val = (val << 9) | val | 1;
2644
2726d6e1 2645 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
b6016b76
MC
2646 }
2647 kfree(good_mbuf);
2648 return 0;
2649}
2650
2651static void
5fcaed01 2652bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
b6016b76
MC
2653{
2654 u32 val;
b6016b76
MC
2655
2656 val = (mac_addr[0] << 8) | mac_addr[1];
2657
5fcaed01 2658 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
b6016b76 2659
6aa20a22 2660 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2661 (mac_addr[4] << 8) | mac_addr[5];
2662
5fcaed01 2663 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
b6016b76
MC
2664}
2665
47bf4246 2666static inline int
a2df00aa 2667bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
47bf4246
MC
2668{
2669 dma_addr_t mapping;
bb4f98ab 2670 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246 2671 struct rx_bd *rxbd =
bb4f98ab 2672 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
a2df00aa 2673 struct page *page = alloc_page(gfp);
47bf4246
MC
2674
2675 if (!page)
2676 return -ENOMEM;
2677 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2678 PCI_DMA_FROMDEVICE);
3d16af86
BL
2679 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2680 __free_page(page);
2681 return -EIO;
2682 }
2683
47bf4246 2684 rx_pg->page = page;
1a4ccc2d 2685 dma_unmap_addr_set(rx_pg, mapping, mapping);
47bf4246
MC
2686 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2687 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2688 return 0;
2689}
2690
2691static void
bb4f98ab 2692bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246 2693{
bb4f98ab 2694 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246
MC
2695 struct page *page = rx_pg->page;
2696
2697 if (!page)
2698 return;
2699
1a4ccc2d 2700 pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
47bf4246
MC
2701 PCI_DMA_FROMDEVICE);
2702
2703 __free_page(page);
2704 rx_pg->page = NULL;
2705}
2706
b6016b76 2707static inline int
a2df00aa 2708bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
b6016b76
MC
2709{
2710 struct sk_buff *skb;
bb4f98ab 2711 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
b6016b76 2712 dma_addr_t mapping;
bb4f98ab 2713 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2714 unsigned long align;
2715
a2df00aa 2716 skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
b6016b76
MC
2717 if (skb == NULL) {
2718 return -ENOMEM;
2719 }
2720
59b47d8a
MC
2721 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2722 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2723
b6016b76
MC
2724 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2725 PCI_DMA_FROMDEVICE);
3d16af86
BL
2726 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2727 dev_kfree_skb(skb);
2728 return -EIO;
2729 }
b6016b76
MC
2730
2731 rx_buf->skb = skb;
a33fa66b 2732 rx_buf->desc = (struct l2_fhdr *) skb->data;
1a4ccc2d 2733 dma_unmap_addr_set(rx_buf, mapping, mapping);
b6016b76
MC
2734
2735 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2736 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2737
bb4f98ab 2738 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76
MC
2739
2740 return 0;
2741}
2742
da3e4fbe 2743static int
35efa7c1 2744bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2745{
43e80b89 2746 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76 2747 u32 new_link_state, old_link_state;
da3e4fbe 2748 int is_set = 1;
b6016b76 2749
da3e4fbe
MC
2750 new_link_state = sblk->status_attn_bits & event;
2751 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2752 if (new_link_state != old_link_state) {
da3e4fbe
MC
2753 if (new_link_state)
2754 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2755 else
2756 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2757 } else
2758 is_set = 0;
2759
2760 return is_set;
2761}
2762
2763static void
35efa7c1 2764bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2765{
74ecc62d
MC
2766 spin_lock(&bp->phy_lock);
2767
2768 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
b6016b76 2769 bnx2_set_link(bp);
35efa7c1 2770 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2771 bnx2_set_remote_link(bp);
2772
74ecc62d
MC
2773 spin_unlock(&bp->phy_lock);
2774
b6016b76
MC
2775}
2776
ead7270b 2777static inline u16
35efa7c1 2778bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2779{
2780 u16 cons;
2781
43e80b89
MC
2782 /* Tell compiler that status block fields can change. */
2783 barrier();
2784 cons = *bnapi->hw_tx_cons_ptr;
581daf7e 2785 barrier();
ead7270b
MC
2786 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2787 cons++;
2788 return cons;
2789}
2790
57851d84
MC
2791static int
2792bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2793{
35e9010b 2794 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
b6016b76 2795 u16 hw_cons, sw_cons, sw_ring_cons;
706bf240
BL
2796 int tx_pkt = 0, index;
2797 struct netdev_queue *txq;
2798
2799 index = (bnapi - bp->bnx2_napi);
2800 txq = netdev_get_tx_queue(bp->dev, index);
b6016b76 2801
35efa7c1 2802 hw_cons = bnx2_get_hw_tx_cons(bnapi);
35e9010b 2803 sw_cons = txr->tx_cons;
b6016b76
MC
2804
2805 while (sw_cons != hw_cons) {
3d16af86 2806 struct sw_tx_bd *tx_buf;
b6016b76
MC
2807 struct sk_buff *skb;
2808 int i, last;
2809
2810 sw_ring_cons = TX_RING_IDX(sw_cons);
2811
35e9010b 2812 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
b6016b76 2813 skb = tx_buf->skb;
1d39ed56 2814
d62fda08
ED
2815 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2816 prefetch(&skb->end);
2817
b6016b76 2818 /* partial BD completions possible with TSO packets */
d62fda08 2819 if (tx_buf->is_gso) {
b6016b76
MC
2820 u16 last_idx, last_ring_idx;
2821
d62fda08
ED
2822 last_idx = sw_cons + tx_buf->nr_frags + 1;
2823 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
b6016b76
MC
2824 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2825 last_idx++;
2826 }
2827 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2828 break;
2829 }
2830 }
1d39ed56 2831
1a4ccc2d 2832 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
e95524a7 2833 skb_headlen(skb), PCI_DMA_TODEVICE);
b6016b76
MC
2834
2835 tx_buf->skb = NULL;
d62fda08 2836 last = tx_buf->nr_frags;
b6016b76
MC
2837
2838 for (i = 0; i < last; i++) {
2839 sw_cons = NEXT_TX_BD(sw_cons);
e95524a7
AD
2840
2841 pci_unmap_page(bp->pdev,
1a4ccc2d 2842 dma_unmap_addr(
e95524a7
AD
2843 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2844 mapping),
2845 skb_shinfo(skb)->frags[i].size,
2846 PCI_DMA_TODEVICE);
b6016b76
MC
2847 }
2848
2849 sw_cons = NEXT_TX_BD(sw_cons);
2850
745720e5 2851 dev_kfree_skb(skb);
57851d84
MC
2852 tx_pkt++;
2853 if (tx_pkt == budget)
2854 break;
b6016b76 2855
d62fda08
ED
2856 if (hw_cons == sw_cons)
2857 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2858 }
2859
35e9010b
MC
2860 txr->hw_tx_cons = hw_cons;
2861 txr->tx_cons = sw_cons;
706bf240 2862
2f8af120 2863 /* Need to make the tx_cons update visible to bnx2_start_xmit()
706bf240 2864 * before checking for netif_tx_queue_stopped(). Without the
2f8af120
MC
2865 * memory barrier, there is a small possibility that bnx2_start_xmit()
2866 * will miss it and cause the queue to be stopped forever.
2867 */
2868 smp_mb();
b6016b76 2869
706bf240 2870 if (unlikely(netif_tx_queue_stopped(txq)) &&
35e9010b 2871 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
706bf240
BL
2872 __netif_tx_lock(txq, smp_processor_id());
2873 if ((netif_tx_queue_stopped(txq)) &&
35e9010b 2874 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
706bf240
BL
2875 netif_tx_wake_queue(txq);
2876 __netif_tx_unlock(txq);
b6016b76 2877 }
706bf240 2878
57851d84 2879 return tx_pkt;
b6016b76
MC
2880}
2881
1db82f2a 2882static void
bb4f98ab 2883bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
a1f60190 2884 struct sk_buff *skb, int count)
1db82f2a
MC
2885{
2886 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2887 struct rx_bd *cons_bd, *prod_bd;
1db82f2a 2888 int i;
3d16af86 2889 u16 hw_prod, prod;
bb4f98ab 2890 u16 cons = rxr->rx_pg_cons;
1db82f2a 2891
3d16af86
BL
2892 cons_rx_pg = &rxr->rx_pg_ring[cons];
2893
2894 /* The caller was unable to allocate a new page to replace the
2895 * last one in the frags array, so we need to recycle that page
2896 * and then free the skb.
2897 */
2898 if (skb) {
2899 struct page *page;
2900 struct skb_shared_info *shinfo;
2901
2902 shinfo = skb_shinfo(skb);
2903 shinfo->nr_frags--;
2904 page = shinfo->frags[shinfo->nr_frags].page;
2905 shinfo->frags[shinfo->nr_frags].page = NULL;
2906
2907 cons_rx_pg->page = page;
2908 dev_kfree_skb(skb);
2909 }
2910
2911 hw_prod = rxr->rx_pg_prod;
2912
1db82f2a
MC
2913 for (i = 0; i < count; i++) {
2914 prod = RX_PG_RING_IDX(hw_prod);
2915
bb4f98ab
MC
2916 prod_rx_pg = &rxr->rx_pg_ring[prod];
2917 cons_rx_pg = &rxr->rx_pg_ring[cons];
2918 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2919 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1db82f2a 2920
1db82f2a
MC
2921 if (prod != cons) {
2922 prod_rx_pg->page = cons_rx_pg->page;
2923 cons_rx_pg->page = NULL;
1a4ccc2d
FT
2924 dma_unmap_addr_set(prod_rx_pg, mapping,
2925 dma_unmap_addr(cons_rx_pg, mapping));
1db82f2a
MC
2926
2927 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2928 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2929
2930 }
2931 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2932 hw_prod = NEXT_RX_BD(hw_prod);
2933 }
bb4f98ab
MC
2934 rxr->rx_pg_prod = hw_prod;
2935 rxr->rx_pg_cons = cons;
1db82f2a
MC
2936}
2937
b6016b76 2938static inline void
bb4f98ab
MC
2939bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2940 struct sk_buff *skb, u16 cons, u16 prod)
b6016b76 2941{
236b6394
MC
2942 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2943 struct rx_bd *cons_bd, *prod_bd;
2944
bb4f98ab
MC
2945 cons_rx_buf = &rxr->rx_buf_ring[cons];
2946 prod_rx_buf = &rxr->rx_buf_ring[prod];
b6016b76
MC
2947
2948 pci_dma_sync_single_for_device(bp->pdev,
1a4ccc2d 2949 dma_unmap_addr(cons_rx_buf, mapping),
601d3d18 2950 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
b6016b76 2951
bb4f98ab 2952 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2953
236b6394 2954 prod_rx_buf->skb = skb;
a33fa66b 2955 prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
b6016b76 2956
236b6394
MC
2957 if (cons == prod)
2958 return;
b6016b76 2959
1a4ccc2d
FT
2960 dma_unmap_addr_set(prod_rx_buf, mapping,
2961 dma_unmap_addr(cons_rx_buf, mapping));
236b6394 2962
bb4f98ab
MC
2963 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2964 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2965 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2966 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2967}
2968
85833c62 2969static int
bb4f98ab 2970bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
a1f60190
MC
2971 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2972 u32 ring_idx)
85833c62
MC
2973{
2974 int err;
2975 u16 prod = ring_idx & 0xffff;
2976
a2df00aa 2977 err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
85833c62 2978 if (unlikely(err)) {
bb4f98ab 2979 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
2980 if (hdr_len) {
2981 unsigned int raw_len = len + 4;
2982 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2983
bb4f98ab 2984 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
1db82f2a 2985 }
85833c62
MC
2986 return err;
2987 }
2988
d89cb6af 2989 skb_reserve(skb, BNX2_RX_OFFSET);
85833c62
MC
2990 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2991 PCI_DMA_FROMDEVICE);
2992
1db82f2a
MC
2993 if (hdr_len == 0) {
2994 skb_put(skb, len);
2995 return 0;
2996 } else {
2997 unsigned int i, frag_len, frag_size, pages;
2998 struct sw_pg *rx_pg;
bb4f98ab
MC
2999 u16 pg_cons = rxr->rx_pg_cons;
3000 u16 pg_prod = rxr->rx_pg_prod;
1db82f2a
MC
3001
3002 frag_size = len + 4 - hdr_len;
3003 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3004 skb_put(skb, hdr_len);
3005
3006 for (i = 0; i < pages; i++) {
3d16af86
BL
3007 dma_addr_t mapping_old;
3008
1db82f2a
MC
3009 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3010 if (unlikely(frag_len <= 4)) {
3011 unsigned int tail = 4 - frag_len;
3012
bb4f98ab
MC
3013 rxr->rx_pg_cons = pg_cons;
3014 rxr->rx_pg_prod = pg_prod;
3015 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
a1f60190 3016 pages - i);
1db82f2a
MC
3017 skb->len -= tail;
3018 if (i == 0) {
3019 skb->tail -= tail;
3020 } else {
3021 skb_frag_t *frag =
3022 &skb_shinfo(skb)->frags[i - 1];
3023 frag->size -= tail;
3024 skb->data_len -= tail;
3025 skb->truesize -= tail;
3026 }
3027 return 0;
3028 }
bb4f98ab 3029 rx_pg = &rxr->rx_pg_ring[pg_cons];
1db82f2a 3030
3d16af86
BL
3031 /* Don't unmap yet. If we're unable to allocate a new
3032 * page, we need to recycle the page and the DMA addr.
3033 */
1a4ccc2d 3034 mapping_old = dma_unmap_addr(rx_pg, mapping);
1db82f2a
MC
3035 if (i == pages - 1)
3036 frag_len -= 4;
3037
3038 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3039 rx_pg->page = NULL;
3040
bb4f98ab 3041 err = bnx2_alloc_rx_page(bp, rxr,
a2df00aa
SG
3042 RX_PG_RING_IDX(pg_prod),
3043 GFP_ATOMIC);
1db82f2a 3044 if (unlikely(err)) {
bb4f98ab
MC
3045 rxr->rx_pg_cons = pg_cons;
3046 rxr->rx_pg_prod = pg_prod;
3047 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
a1f60190 3048 pages - i);
1db82f2a
MC
3049 return err;
3050 }
3051
3d16af86
BL
3052 pci_unmap_page(bp->pdev, mapping_old,
3053 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3054
1db82f2a
MC
3055 frag_size -= frag_len;
3056 skb->data_len += frag_len;
3057 skb->truesize += frag_len;
3058 skb->len += frag_len;
3059
3060 pg_prod = NEXT_RX_BD(pg_prod);
3061 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3062 }
bb4f98ab
MC
3063 rxr->rx_pg_prod = pg_prod;
3064 rxr->rx_pg_cons = pg_cons;
1db82f2a 3065 }
85833c62
MC
3066 return 0;
3067}
3068
c09c2627 3069static inline u16
35efa7c1 3070bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 3071{
bb4f98ab
MC
3072 u16 cons;
3073
43e80b89
MC
3074 /* Tell compiler that status block fields can change. */
3075 barrier();
3076 cons = *bnapi->hw_rx_cons_ptr;
581daf7e 3077 barrier();
c09c2627
MC
3078 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3079 cons++;
3080 return cons;
3081}
3082
b6016b76 3083static int
35efa7c1 3084bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 3085{
bb4f98ab 3086 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76
MC
3087 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3088 struct l2_fhdr *rx_hdr;
1db82f2a 3089 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 3090
35efa7c1 3091 hw_cons = bnx2_get_hw_rx_cons(bnapi);
bb4f98ab
MC
3092 sw_cons = rxr->rx_cons;
3093 sw_prod = rxr->rx_prod;
b6016b76
MC
3094
3095 /* Memory barrier necessary as speculative reads of the rx
3096 * buffer can be ahead of the index in the status block
3097 */
3098 rmb();
3099 while (sw_cons != hw_cons) {
1db82f2a 3100 unsigned int len, hdr_len;
ade2bfe7 3101 u32 status;
a33fa66b 3102 struct sw_bd *rx_buf, *next_rx_buf;
b6016b76 3103 struct sk_buff *skb;
236b6394 3104 dma_addr_t dma_addr;
f22828e8
MC
3105 u16 vtag = 0;
3106 int hw_vlan __maybe_unused = 0;
b6016b76
MC
3107
3108 sw_ring_cons = RX_RING_IDX(sw_cons);
3109 sw_ring_prod = RX_RING_IDX(sw_prod);
3110
bb4f98ab 3111 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
b6016b76 3112 skb = rx_buf->skb;
a33fa66b 3113 prefetchw(skb);
236b6394 3114
aabef8b2
FT
3115 next_rx_buf =
3116 &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3117 prefetch(next_rx_buf->desc);
3118
236b6394
MC
3119 rx_buf->skb = NULL;
3120
1a4ccc2d 3121 dma_addr = dma_unmap_addr(rx_buf, mapping);
236b6394
MC
3122
3123 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
601d3d18
BL
3124 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3125 PCI_DMA_FROMDEVICE);
b6016b76 3126
a33fa66b 3127 rx_hdr = rx_buf->desc;
1db82f2a 3128 len = rx_hdr->l2_fhdr_pkt_len;
990ec380 3129 status = rx_hdr->l2_fhdr_status;
b6016b76 3130
1db82f2a
MC
3131 hdr_len = 0;
3132 if (status & L2_FHDR_STATUS_SPLIT) {
3133 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3134 pg_ring_used = 1;
3135 } else if (len > bp->rx_jumbo_thresh) {
3136 hdr_len = bp->rx_jumbo_thresh;
3137 pg_ring_used = 1;
3138 }
3139
990ec380
MC
3140 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3141 L2_FHDR_ERRORS_PHY_DECODE |
3142 L2_FHDR_ERRORS_ALIGNMENT |
3143 L2_FHDR_ERRORS_TOO_SHORT |
3144 L2_FHDR_ERRORS_GIANT_FRAME))) {
3145
3146 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3147 sw_ring_prod);
3148 if (pg_ring_used) {
3149 int pages;
3150
3151 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3152
3153 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3154 }
3155 goto next_rx;
3156 }
3157
1db82f2a 3158 len -= 4;
b6016b76 3159
5d5d0015 3160 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
3161 struct sk_buff *new_skb;
3162
f22828e8 3163 new_skb = netdev_alloc_skb(bp->dev, len + 6);
85833c62 3164 if (new_skb == NULL) {
bb4f98ab 3165 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
85833c62
MC
3166 sw_ring_prod);
3167 goto next_rx;
3168 }
b6016b76
MC
3169
3170 /* aligned copy */
d89cb6af 3171 skb_copy_from_linear_data_offset(skb,
f22828e8
MC
3172 BNX2_RX_OFFSET - 6,
3173 new_skb->data, len + 6);
3174 skb_reserve(new_skb, 6);
b6016b76 3175 skb_put(new_skb, len);
b6016b76 3176
bb4f98ab 3177 bnx2_reuse_rx_skb(bp, rxr, skb,
b6016b76
MC
3178 sw_ring_cons, sw_ring_prod);
3179
3180 skb = new_skb;
bb4f98ab 3181 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
a1f60190 3182 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 3183 goto next_rx;
b6016b76 3184
f22828e8
MC
3185 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3186 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3187 vtag = rx_hdr->l2_fhdr_vlan_tag;
3188#ifdef BCM_VLAN
3189 if (bp->vlgrp)
3190 hw_vlan = 1;
3191 else
3192#endif
3193 {
3194 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3195 __skb_push(skb, 4);
3196
3197 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3198 ve->h_vlan_proto = htons(ETH_P_8021Q);
3199 ve->h_vlan_TCI = htons(vtag);
3200 len += 4;
3201 }
3202 }
3203
b6016b76
MC
3204 skb->protocol = eth_type_trans(skb, bp->dev);
3205
3206 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 3207 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 3208
745720e5 3209 dev_kfree_skb(skb);
b6016b76
MC
3210 goto next_rx;
3211
3212 }
3213
b6016b76
MC
3214 skb->ip_summed = CHECKSUM_NONE;
3215 if (bp->rx_csum &&
3216 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3217 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3218
ade2bfe7
MC
3219 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3220 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
3221 skb->ip_summed = CHECKSUM_UNNECESSARY;
3222 }
fdc8541d
MC
3223 if ((bp->dev->features & NETIF_F_RXHASH) &&
3224 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3225 L2_FHDR_STATUS_USE_RXHASH))
3226 skb->rxhash = rx_hdr->l2_fhdr_hash;
b6016b76 3227
0c8dfc83
DM
3228 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3229
b6016b76 3230#ifdef BCM_VLAN
f22828e8 3231 if (hw_vlan)
c67938a9 3232 vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
b6016b76
MC
3233 else
3234#endif
c67938a9 3235 napi_gro_receive(&bnapi->napi, skb);
b6016b76 3236
b6016b76
MC
3237 rx_pkt++;
3238
3239next_rx:
b6016b76
MC
3240 sw_cons = NEXT_RX_BD(sw_cons);
3241 sw_prod = NEXT_RX_BD(sw_prod);
3242
3243 if ((rx_pkt == budget))
3244 break;
f4e418f7
MC
3245
3246 /* Refresh hw_cons to see if there is new work */
3247 if (sw_cons == hw_cons) {
35efa7c1 3248 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
3249 rmb();
3250 }
b6016b76 3251 }
bb4f98ab
MC
3252 rxr->rx_cons = sw_cons;
3253 rxr->rx_prod = sw_prod;
b6016b76 3254
1db82f2a 3255 if (pg_ring_used)
bb4f98ab 3256 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
1db82f2a 3257
bb4f98ab 3258 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
b6016b76 3259
bb4f98ab 3260 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
3261
3262 mmiowb();
3263
3264 return rx_pkt;
3265
3266}
3267
3268/* MSI ISR - The only difference between this and the INTx ISR
3269 * is that the MSI interrupt is always serviced.
3270 */
3271static irqreturn_t
7d12e780 3272bnx2_msi(int irq, void *dev_instance)
b6016b76 3273{
f0ea2e63
MC
3274 struct bnx2_napi *bnapi = dev_instance;
3275 struct bnx2 *bp = bnapi->bp;
b6016b76 3276
43e80b89 3277 prefetch(bnapi->status_blk.msi);
b6016b76
MC
3278 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3279 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3280 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3281
3282 /* Return here if interrupt is disabled. */
73eef4cd
MC
3283 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3284 return IRQ_HANDLED;
b6016b76 3285
288379f0 3286 napi_schedule(&bnapi->napi);
b6016b76 3287
73eef4cd 3288 return IRQ_HANDLED;
b6016b76
MC
3289}
3290
8e6a72c4
MC
3291static irqreturn_t
3292bnx2_msi_1shot(int irq, void *dev_instance)
3293{
f0ea2e63
MC
3294 struct bnx2_napi *bnapi = dev_instance;
3295 struct bnx2 *bp = bnapi->bp;
8e6a72c4 3296
43e80b89 3297 prefetch(bnapi->status_blk.msi);
8e6a72c4
MC
3298
3299 /* Return here if interrupt is disabled. */
3300 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3301 return IRQ_HANDLED;
3302
288379f0 3303 napi_schedule(&bnapi->napi);
8e6a72c4
MC
3304
3305 return IRQ_HANDLED;
3306}
3307
b6016b76 3308static irqreturn_t
7d12e780 3309bnx2_interrupt(int irq, void *dev_instance)
b6016b76 3310{
f0ea2e63
MC
3311 struct bnx2_napi *bnapi = dev_instance;
3312 struct bnx2 *bp = bnapi->bp;
43e80b89 3313 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76
MC
3314
3315 /* When using INTx, it is possible for the interrupt to arrive
3316 * at the CPU before the status block posted prior to the
3317 * interrupt. Reading a register will flush the status block.
3318 * When using MSI, the MSI message will always complete after
3319 * the status block write.
3320 */
35efa7c1 3321 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
3322 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3323 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 3324 return IRQ_NONE;
b6016b76
MC
3325
3326 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3327 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3328 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3329
b8a7ce7b
MC
3330 /* Read back to deassert IRQ immediately to avoid too many
3331 * spurious interrupts.
3332 */
3333 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3334
b6016b76 3335 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
3336 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3337 return IRQ_HANDLED;
b6016b76 3338
288379f0 3339 if (napi_schedule_prep(&bnapi->napi)) {
35efa7c1 3340 bnapi->last_status_idx = sblk->status_idx;
288379f0 3341 __napi_schedule(&bnapi->napi);
b8a7ce7b 3342 }
b6016b76 3343
73eef4cd 3344 return IRQ_HANDLED;
b6016b76
MC
3345}
3346
f4e418f7 3347static inline int
43e80b89 3348bnx2_has_fast_work(struct bnx2_napi *bnapi)
f4e418f7 3349{
35e9010b 3350 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 3351 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
f4e418f7 3352
bb4f98ab 3353 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
35e9010b 3354 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
f4e418f7 3355 return 1;
43e80b89
MC
3356 return 0;
3357}
3358
3359#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3360 STATUS_ATTN_BITS_TIMER_ABORT)
3361
3362static inline int
3363bnx2_has_work(struct bnx2_napi *bnapi)
3364{
3365 struct status_block *sblk = bnapi->status_blk.msi;
3366
3367 if (bnx2_has_fast_work(bnapi))
3368 return 1;
f4e418f7 3369
4edd473f
MC
3370#ifdef BCM_CNIC
3371 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3372 return 1;
3373#endif
3374
da3e4fbe
MC
3375 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3376 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
3377 return 1;
3378
3379 return 0;
3380}
3381
efba0180
MC
3382static void
3383bnx2_chk_missed_msi(struct bnx2 *bp)
3384{
3385 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3386 u32 msi_ctrl;
3387
3388 if (bnx2_has_work(bnapi)) {
3389 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3390 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3391 return;
3392
3393 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3394 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3395 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3396 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3397 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3398 }
3399 }
3400
3401 bp->idle_chk_status_idx = bnapi->last_status_idx;
3402}
3403
4edd473f
MC
3404#ifdef BCM_CNIC
3405static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3406{
3407 struct cnic_ops *c_ops;
3408
3409 if (!bnapi->cnic_present)
3410 return;
3411
3412 rcu_read_lock();
3413 c_ops = rcu_dereference(bp->cnic_ops);
3414 if (c_ops)
3415 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3416 bnapi->status_blk.msi);
3417 rcu_read_unlock();
3418}
3419#endif
3420
43e80b89 3421static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76 3422{
43e80b89 3423 struct status_block *sblk = bnapi->status_blk.msi;
da3e4fbe
MC
3424 u32 status_attn_bits = sblk->status_attn_bits;
3425 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 3426
da3e4fbe
MC
3427 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3428 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 3429
35efa7c1 3430 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
3431
3432 /* This is needed to take care of transient status
3433 * during link changes.
3434 */
3435 REG_WR(bp, BNX2_HC_COMMAND,
3436 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3437 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76 3438 }
43e80b89
MC
3439}
3440
3441static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3442 int work_done, int budget)
3443{
3444 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3445 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76 3446
35e9010b 3447 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
57851d84 3448 bnx2_tx_int(bp, bnapi, 0);
b6016b76 3449
bb4f98ab 3450 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
35efa7c1 3451 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 3452
6f535763
DM
3453 return work_done;
3454}
3455
f0ea2e63
MC
3456static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3457{
3458 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3459 struct bnx2 *bp = bnapi->bp;
3460 int work_done = 0;
3461 struct status_block_msix *sblk = bnapi->status_blk.msix;
3462
3463 while (1) {
3464 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3465 if (unlikely(work_done >= budget))
3466 break;
3467
3468 bnapi->last_status_idx = sblk->status_idx;
3469 /* status idx must be read before checking for more work. */
3470 rmb();
3471 if (likely(!bnx2_has_fast_work(bnapi))) {
3472
288379f0 3473 napi_complete(napi);
f0ea2e63
MC
3474 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3475 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3476 bnapi->last_status_idx);
3477 break;
3478 }
3479 }
3480 return work_done;
3481}
3482
6f535763
DM
3483static int bnx2_poll(struct napi_struct *napi, int budget)
3484{
35efa7c1
MC
3485 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3486 struct bnx2 *bp = bnapi->bp;
6f535763 3487 int work_done = 0;
43e80b89 3488 struct status_block *sblk = bnapi->status_blk.msi;
6f535763
DM
3489
3490 while (1) {
43e80b89
MC
3491 bnx2_poll_link(bp, bnapi);
3492
35efa7c1 3493 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 3494
4edd473f
MC
3495#ifdef BCM_CNIC
3496 bnx2_poll_cnic(bp, bnapi);
3497#endif
3498
35efa7c1 3499 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
3500 * much work has been processed, so we must read it before
3501 * checking for more work.
3502 */
35efa7c1 3503 bnapi->last_status_idx = sblk->status_idx;
efba0180
MC
3504
3505 if (unlikely(work_done >= budget))
3506 break;
3507
6dee6421 3508 rmb();
35efa7c1 3509 if (likely(!bnx2_has_work(bnapi))) {
288379f0 3510 napi_complete(napi);
f86e82fb 3511 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
6f535763
DM
3512 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3513 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3514 bnapi->last_status_idx);
6dee6421 3515 break;
6f535763 3516 }
1269a8a6
MC
3517 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3518 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 3519 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 3520 bnapi->last_status_idx);
1269a8a6 3521
6f535763
DM
3522 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3523 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3524 bnapi->last_status_idx);
6f535763
DM
3525 break;
3526 }
b6016b76
MC
3527 }
3528
bea3348e 3529 return work_done;
b6016b76
MC
3530}
3531
932ff279 3532/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
3533 * from set_multicast.
3534 */
3535static void
3536bnx2_set_rx_mode(struct net_device *dev)
3537{
972ec0d4 3538 struct bnx2 *bp = netdev_priv(dev);
b6016b76 3539 u32 rx_mode, sort_mode;
ccffad25 3540 struct netdev_hw_addr *ha;
b6016b76 3541 int i;
b6016b76 3542
9f52b564
MC
3543 if (!netif_running(dev))
3544 return;
3545
c770a65c 3546 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
3547
3548 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3549 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3550 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3551#ifdef BCM_VLAN
7c6337a1 3552 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
b6016b76 3553 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 3554#else
7c6337a1 3555 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
e29054f9 3556 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
3557#endif
3558 if (dev->flags & IFF_PROMISC) {
3559 /* Promiscuous mode. */
3560 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
3561 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3562 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
3563 }
3564 else if (dev->flags & IFF_ALLMULTI) {
3565 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3566 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3567 0xffffffff);
3568 }
3569 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3570 }
3571 else {
3572 /* Accept one or more multicast(s). */
b6016b76
MC
3573 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3574 u32 regidx;
3575 u32 bit;
3576 u32 crc;
3577
3578 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3579
22bedad3
JP
3580 netdev_for_each_mc_addr(ha, dev) {
3581 crc = ether_crc_le(ETH_ALEN, ha->addr);
b6016b76
MC
3582 bit = crc & 0xff;
3583 regidx = (bit & 0xe0) >> 5;
3584 bit &= 0x1f;
3585 mc_filter[regidx] |= (1 << bit);
3586 }
3587
3588 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3589 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3590 mc_filter[i]);
3591 }
3592
3593 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3594 }
3595
32e7bfc4 3596 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
5fcaed01
BL
3597 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3598 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3599 BNX2_RPM_SORT_USER0_PROM_VLAN;
3600 } else if (!(dev->flags & IFF_PROMISC)) {
5fcaed01 3601 /* Add all entries into to the match filter list */
ccffad25 3602 i = 0;
32e7bfc4 3603 netdev_for_each_uc_addr(ha, dev) {
ccffad25 3604 bnx2_set_mac_addr(bp, ha->addr,
5fcaed01
BL
3605 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3606 sort_mode |= (1 <<
3607 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
ccffad25 3608 i++;
5fcaed01
BL
3609 }
3610
3611 }
3612
b6016b76
MC
3613 if (rx_mode != bp->rx_mode) {
3614 bp->rx_mode = rx_mode;
3615 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3616 }
3617
3618 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3619 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3620 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3621
c770a65c 3622 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3623}
3624
57579f76
MC
3625static int __devinit
3626check_fw_section(const struct firmware *fw,
3627 const struct bnx2_fw_file_section *section,
3628 u32 alignment, bool non_empty)
3629{
3630 u32 offset = be32_to_cpu(section->offset);
3631 u32 len = be32_to_cpu(section->len);
3632
3633 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3634 return -EINVAL;
3635 if ((non_empty && len == 0) || len > fw->size - offset ||
3636 len & (alignment - 1))
3637 return -EINVAL;
3638 return 0;
3639}
3640
3641static int __devinit
3642check_mips_fw_entry(const struct firmware *fw,
3643 const struct bnx2_mips_fw_file_entry *entry)
3644{
3645 if (check_fw_section(fw, &entry->text, 4, true) ||
3646 check_fw_section(fw, &entry->data, 4, false) ||
3647 check_fw_section(fw, &entry->rodata, 4, false))
3648 return -EINVAL;
3649 return 0;
3650}
3651
3652static int __devinit
3653bnx2_request_firmware(struct bnx2 *bp)
b6016b76 3654{
57579f76 3655 const char *mips_fw_file, *rv2p_fw_file;
5ee1c326
BB
3656 const struct bnx2_mips_fw_file *mips_fw;
3657 const struct bnx2_rv2p_fw_file *rv2p_fw;
57579f76
MC
3658 int rc;
3659
3660 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3661 mips_fw_file = FW_MIPS_FILE_09;
078b0735
MC
3662 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3663 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3664 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3665 else
3666 rv2p_fw_file = FW_RV2P_FILE_09;
57579f76
MC
3667 } else {
3668 mips_fw_file = FW_MIPS_FILE_06;
3669 rv2p_fw_file = FW_RV2P_FILE_06;
3670 }
3671
3672 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3673 if (rc) {
3a9c6a49 3674 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
57579f76
MC
3675 return rc;
3676 }
3677
3678 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3679 if (rc) {
3a9c6a49 3680 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
57579f76
MC
3681 return rc;
3682 }
5ee1c326
BB
3683 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3684 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3685 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3686 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3687 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3688 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3689 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3690 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3a9c6a49 3691 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
57579f76
MC
3692 return -EINVAL;
3693 }
5ee1c326
BB
3694 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3695 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3696 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3a9c6a49 3697 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
57579f76
MC
3698 return -EINVAL;
3699 }
3700
3701 return 0;
3702}
3703
3704static u32
3705rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3706{
3707 switch (idx) {
3708 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3709 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3710 rv2p_code |= RV2P_BD_PAGE_SIZE;
3711 break;
3712 }
3713 return rv2p_code;
3714}
3715
3716static int
3717load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3718 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3719{
3720 u32 rv2p_code_len, file_offset;
3721 __be32 *rv2p_code;
b6016b76 3722 int i;
57579f76
MC
3723 u32 val, cmd, addr;
3724
3725 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3726 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3727
3728 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
b6016b76 3729
57579f76
MC
3730 if (rv2p_proc == RV2P_PROC1) {
3731 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3732 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3733 } else {
3734 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3735 addr = BNX2_RV2P_PROC2_ADDR_CMD;
d25be1d3 3736 }
b6016b76
MC
3737
3738 for (i = 0; i < rv2p_code_len; i += 8) {
57579f76 3739 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
b6016b76 3740 rv2p_code++;
57579f76 3741 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
b6016b76
MC
3742 rv2p_code++;
3743
57579f76
MC
3744 val = (i / 8) | cmd;
3745 REG_WR(bp, addr, val);
3746 }
3747
3748 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3749 for (i = 0; i < 8; i++) {
3750 u32 loc, code;
3751
3752 loc = be32_to_cpu(fw_entry->fixup[i]);
3753 if (loc && ((loc * 4) < rv2p_code_len)) {
3754 code = be32_to_cpu(*(rv2p_code + loc - 1));
3755 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3756 code = be32_to_cpu(*(rv2p_code + loc));
3757 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3758 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3759
3760 val = (loc / 2) | cmd;
3761 REG_WR(bp, addr, val);
b6016b76
MC
3762 }
3763 }
3764
3765 /* Reset the processor, un-stall is done later. */
3766 if (rv2p_proc == RV2P_PROC1) {
3767 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3768 }
3769 else {
3770 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3771 }
57579f76
MC
3772
3773 return 0;
b6016b76
MC
3774}
3775
af3ee519 3776static int
57579f76
MC
3777load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3778 const struct bnx2_mips_fw_file_entry *fw_entry)
b6016b76 3779{
57579f76
MC
3780 u32 addr, len, file_offset;
3781 __be32 *data;
b6016b76
MC
3782 u32 offset;
3783 u32 val;
3784
3785 /* Halt the CPU. */
2726d6e1 3786 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3787 val |= cpu_reg->mode_value_halt;
2726d6e1
MC
3788 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3789 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
b6016b76
MC
3790
3791 /* Load the Text area. */
57579f76
MC
3792 addr = be32_to_cpu(fw_entry->text.addr);
3793 len = be32_to_cpu(fw_entry->text.len);
3794 file_offset = be32_to_cpu(fw_entry->text.offset);
3795 data = (__be32 *)(bp->mips_firmware->data + file_offset);
ea1f8d5c 3796
57579f76
MC
3797 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3798 if (len) {
b6016b76
MC
3799 int j;
3800
57579f76
MC
3801 for (j = 0; j < (len / 4); j++, offset += 4)
3802 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3803 }
3804
57579f76
MC
3805 /* Load the Data area. */
3806 addr = be32_to_cpu(fw_entry->data.addr);
3807 len = be32_to_cpu(fw_entry->data.len);
3808 file_offset = be32_to_cpu(fw_entry->data.offset);
3809 data = (__be32 *)(bp->mips_firmware->data + file_offset);
b6016b76 3810
57579f76
MC
3811 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3812 if (len) {
b6016b76
MC
3813 int j;
3814
57579f76
MC
3815 for (j = 0; j < (len / 4); j++, offset += 4)
3816 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3817 }
3818
3819 /* Load the Read-Only area. */
57579f76
MC
3820 addr = be32_to_cpu(fw_entry->rodata.addr);
3821 len = be32_to_cpu(fw_entry->rodata.len);
3822 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3823 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3824
3825 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3826 if (len) {
b6016b76
MC
3827 int j;
3828
57579f76
MC
3829 for (j = 0; j < (len / 4); j++, offset += 4)
3830 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3831 }
3832
3833 /* Clear the pre-fetch instruction. */
2726d6e1 3834 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
57579f76
MC
3835
3836 val = be32_to_cpu(fw_entry->start_addr);
3837 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
b6016b76
MC
3838
3839 /* Start the CPU. */
2726d6e1 3840 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3841 val &= ~cpu_reg->mode_value_halt;
2726d6e1
MC
3842 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3843 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
af3ee519
MC
3844
3845 return 0;
b6016b76
MC
3846}
3847
fba9fe91 3848static int
b6016b76
MC
3849bnx2_init_cpus(struct bnx2 *bp)
3850{
57579f76
MC
3851 const struct bnx2_mips_fw_file *mips_fw =
3852 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3853 const struct bnx2_rv2p_fw_file *rv2p_fw =
3854 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3855 int rc;
b6016b76
MC
3856
3857 /* Initialize the RV2P processor. */
57579f76
MC
3858 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3859 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
b6016b76
MC
3860
3861 /* Initialize the RX Processor. */
57579f76 3862 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
fba9fe91
MC
3863 if (rc)
3864 goto init_cpu_err;
3865
b6016b76 3866 /* Initialize the TX Processor. */
57579f76 3867 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
fba9fe91
MC
3868 if (rc)
3869 goto init_cpu_err;
3870
b6016b76 3871 /* Initialize the TX Patch-up Processor. */
57579f76 3872 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
fba9fe91
MC
3873 if (rc)
3874 goto init_cpu_err;
3875
b6016b76 3876 /* Initialize the Completion Processor. */
57579f76 3877 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
fba9fe91
MC
3878 if (rc)
3879 goto init_cpu_err;
3880
d43584c8 3881 /* Initialize the Command Processor. */
57579f76 3882 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
b6016b76 3883
fba9fe91 3884init_cpu_err:
fba9fe91 3885 return rc;
b6016b76
MC
3886}
3887
3888static int
829ca9a3 3889bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3890{
3891 u16 pmcsr;
3892
3893 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3894
3895 switch (state) {
829ca9a3 3896 case PCI_D0: {
b6016b76
MC
3897 u32 val;
3898
3899 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3900 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3901 PCI_PM_CTRL_PME_STATUS);
3902
3903 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3904 /* delay required during transition out of D3hot */
3905 msleep(20);
3906
3907 val = REG_RD(bp, BNX2_EMAC_MODE);
3908 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3909 val &= ~BNX2_EMAC_MODE_MPKT;
3910 REG_WR(bp, BNX2_EMAC_MODE, val);
3911
3912 val = REG_RD(bp, BNX2_RPM_CONFIG);
3913 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3914 REG_WR(bp, BNX2_RPM_CONFIG, val);
3915 break;
3916 }
829ca9a3 3917 case PCI_D3hot: {
b6016b76
MC
3918 int i;
3919 u32 val, wol_msg;
3920
3921 if (bp->wol) {
3922 u32 advertising;
3923 u8 autoneg;
3924
3925 autoneg = bp->autoneg;
3926 advertising = bp->advertising;
3927
239cd343
MC
3928 if (bp->phy_port == PORT_TP) {
3929 bp->autoneg = AUTONEG_SPEED;
3930 bp->advertising = ADVERTISED_10baseT_Half |
3931 ADVERTISED_10baseT_Full |
3932 ADVERTISED_100baseT_Half |
3933 ADVERTISED_100baseT_Full |
3934 ADVERTISED_Autoneg;
3935 }
b6016b76 3936
239cd343
MC
3937 spin_lock_bh(&bp->phy_lock);
3938 bnx2_setup_phy(bp, bp->phy_port);
3939 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3940
3941 bp->autoneg = autoneg;
3942 bp->advertising = advertising;
3943
5fcaed01 3944 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
3945
3946 val = REG_RD(bp, BNX2_EMAC_MODE);
3947
3948 /* Enable port mode. */
3949 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3950 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3951 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3952 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3953 if (bp->phy_port == PORT_TP)
3954 val |= BNX2_EMAC_MODE_PORT_MII;
3955 else {
3956 val |= BNX2_EMAC_MODE_PORT_GMII;
3957 if (bp->line_speed == SPEED_2500)
3958 val |= BNX2_EMAC_MODE_25G_MODE;
3959 }
b6016b76
MC
3960
3961 REG_WR(bp, BNX2_EMAC_MODE, val);
3962
3963 /* receive all multicast */
3964 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3965 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3966 0xffffffff);
3967 }
3968 REG_WR(bp, BNX2_EMAC_RX_MODE,
3969 BNX2_EMAC_RX_MODE_SORT_MODE);
3970
3971 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3972 BNX2_RPM_SORT_USER0_MC_EN;
3973 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3974 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3975 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3976 BNX2_RPM_SORT_USER0_ENA);
3977
3978 /* Need to enable EMAC and RPM for WOL. */
3979 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3980 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3981 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3982 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3983
3984 val = REG_RD(bp, BNX2_RPM_CONFIG);
3985 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3986 REG_WR(bp, BNX2_RPM_CONFIG, val);
3987
3988 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3989 }
3990 else {
3991 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3992 }
3993
f86e82fb 3994 if (!(bp->flags & BNX2_FLAG_NO_WOL))
a2f13890
MC
3995 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3996 1, 0);
b6016b76
MC
3997
3998 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3999 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4000 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4001
4002 if (bp->wol)
4003 pmcsr |= 3;
4004 }
4005 else {
4006 pmcsr |= 3;
4007 }
4008 if (bp->wol) {
4009 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4010 }
4011 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4012 pmcsr);
4013
4014 /* No more memory access after this point until
4015 * device is brought back to D0.
4016 */
4017 udelay(50);
4018 break;
4019 }
4020 default:
4021 return -EINVAL;
4022 }
4023 return 0;
4024}
4025
4026static int
4027bnx2_acquire_nvram_lock(struct bnx2 *bp)
4028{
4029 u32 val;
4030 int j;
4031
4032 /* Request access to the flash interface. */
4033 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4034 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4035 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4036 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4037 break;
4038
4039 udelay(5);
4040 }
4041
4042 if (j >= NVRAM_TIMEOUT_COUNT)
4043 return -EBUSY;
4044
4045 return 0;
4046}
4047
4048static int
4049bnx2_release_nvram_lock(struct bnx2 *bp)
4050{
4051 int j;
4052 u32 val;
4053
4054 /* Relinquish nvram interface. */
4055 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4056
4057 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4058 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4059 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4060 break;
4061
4062 udelay(5);
4063 }
4064
4065 if (j >= NVRAM_TIMEOUT_COUNT)
4066 return -EBUSY;
4067
4068 return 0;
4069}
4070
4071
4072static int
4073bnx2_enable_nvram_write(struct bnx2 *bp)
4074{
4075 u32 val;
4076
4077 val = REG_RD(bp, BNX2_MISC_CFG);
4078 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4079
e30372c9 4080 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
4081 int j;
4082
4083 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4084 REG_WR(bp, BNX2_NVM_COMMAND,
4085 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4086
4087 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4088 udelay(5);
4089
4090 val = REG_RD(bp, BNX2_NVM_COMMAND);
4091 if (val & BNX2_NVM_COMMAND_DONE)
4092 break;
4093 }
4094
4095 if (j >= NVRAM_TIMEOUT_COUNT)
4096 return -EBUSY;
4097 }
4098 return 0;
4099}
4100
4101static void
4102bnx2_disable_nvram_write(struct bnx2 *bp)
4103{
4104 u32 val;
4105
4106 val = REG_RD(bp, BNX2_MISC_CFG);
4107 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4108}
4109
4110
4111static void
4112bnx2_enable_nvram_access(struct bnx2 *bp)
4113{
4114 u32 val;
4115
4116 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4117 /* Enable both bits, even on read. */
6aa20a22 4118 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4119 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4120}
4121
4122static void
4123bnx2_disable_nvram_access(struct bnx2 *bp)
4124{
4125 u32 val;
4126
4127 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4128 /* Disable both bits, even after read. */
6aa20a22 4129 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4130 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4131 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4132}
4133
4134static int
4135bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4136{
4137 u32 cmd;
4138 int j;
4139
e30372c9 4140 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
4141 /* Buffered flash, no erase needed */
4142 return 0;
4143
4144 /* Build an erase command */
4145 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4146 BNX2_NVM_COMMAND_DOIT;
4147
4148 /* Need to clear DONE bit separately. */
4149 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4150
4151 /* Address of the NVRAM to read from. */
4152 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4153
4154 /* Issue an erase command. */
4155 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4156
4157 /* Wait for completion. */
4158 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4159 u32 val;
4160
4161 udelay(5);
4162
4163 val = REG_RD(bp, BNX2_NVM_COMMAND);
4164 if (val & BNX2_NVM_COMMAND_DONE)
4165 break;
4166 }
4167
4168 if (j >= NVRAM_TIMEOUT_COUNT)
4169 return -EBUSY;
4170
4171 return 0;
4172}
4173
4174static int
4175bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4176{
4177 u32 cmd;
4178 int j;
4179
4180 /* Build the command word. */
4181 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4182
e30372c9
MC
4183 /* Calculate an offset of a buffered flash, not needed for 5709. */
4184 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4185 offset = ((offset / bp->flash_info->page_size) <<
4186 bp->flash_info->page_bits) +
4187 (offset % bp->flash_info->page_size);
4188 }
4189
4190 /* Need to clear DONE bit separately. */
4191 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4192
4193 /* Address of the NVRAM to read from. */
4194 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4195
4196 /* Issue a read command. */
4197 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4198
4199 /* Wait for completion. */
4200 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4201 u32 val;
4202
4203 udelay(5);
4204
4205 val = REG_RD(bp, BNX2_NVM_COMMAND);
4206 if (val & BNX2_NVM_COMMAND_DONE) {
b491edd5
AV
4207 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4208 memcpy(ret_val, &v, 4);
b6016b76
MC
4209 break;
4210 }
4211 }
4212 if (j >= NVRAM_TIMEOUT_COUNT)
4213 return -EBUSY;
4214
4215 return 0;
4216}
4217
4218
4219static int
4220bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4221{
b491edd5
AV
4222 u32 cmd;
4223 __be32 val32;
b6016b76
MC
4224 int j;
4225
4226 /* Build the command word. */
4227 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4228
e30372c9
MC
4229 /* Calculate an offset of a buffered flash, not needed for 5709. */
4230 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4231 offset = ((offset / bp->flash_info->page_size) <<
4232 bp->flash_info->page_bits) +
4233 (offset % bp->flash_info->page_size);
4234 }
4235
4236 /* Need to clear DONE bit separately. */
4237 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4238
4239 memcpy(&val32, val, 4);
b6016b76
MC
4240
4241 /* Write the data. */
b491edd5 4242 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
b6016b76
MC
4243
4244 /* Address of the NVRAM to write to. */
4245 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4246
4247 /* Issue the write command. */
4248 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4249
4250 /* Wait for completion. */
4251 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4252 udelay(5);
4253
4254 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4255 break;
4256 }
4257 if (j >= NVRAM_TIMEOUT_COUNT)
4258 return -EBUSY;
4259
4260 return 0;
4261}
4262
4263static int
4264bnx2_init_nvram(struct bnx2 *bp)
4265{
4266 u32 val;
e30372c9 4267 int j, entry_count, rc = 0;
0ced9d01 4268 const struct flash_spec *flash;
b6016b76 4269
e30372c9
MC
4270 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4271 bp->flash_info = &flash_5709;
4272 goto get_flash_size;
4273 }
4274
b6016b76
MC
4275 /* Determine the selected interface. */
4276 val = REG_RD(bp, BNX2_NVM_CFG1);
4277
ff8ac609 4278 entry_count = ARRAY_SIZE(flash_table);
b6016b76 4279
b6016b76
MC
4280 if (val & 0x40000000) {
4281
4282 /* Flash interface has been reconfigured */
4283 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
4284 j++, flash++) {
4285 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4286 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
4287 bp->flash_info = flash;
4288 break;
4289 }
4290 }
4291 }
4292 else {
37137709 4293 u32 mask;
b6016b76
MC
4294 /* Not yet been reconfigured */
4295
37137709
MC
4296 if (val & (1 << 23))
4297 mask = FLASH_BACKUP_STRAP_MASK;
4298 else
4299 mask = FLASH_STRAP_MASK;
4300
b6016b76
MC
4301 for (j = 0, flash = &flash_table[0]; j < entry_count;
4302 j++, flash++) {
4303
37137709 4304 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
4305 bp->flash_info = flash;
4306
4307 /* Request access to the flash interface. */
4308 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4309 return rc;
4310
4311 /* Enable access to flash interface */
4312 bnx2_enable_nvram_access(bp);
4313
4314 /* Reconfigure the flash interface */
4315 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4316 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4317 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4318 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4319
4320 /* Disable access to flash interface */
4321 bnx2_disable_nvram_access(bp);
4322 bnx2_release_nvram_lock(bp);
4323
4324 break;
4325 }
4326 }
4327 } /* if (val & 0x40000000) */
4328
4329 if (j == entry_count) {
4330 bp->flash_info = NULL;
3a9c6a49 4331 pr_alert("Unknown flash/EEPROM type\n");
1122db71 4332 return -ENODEV;
b6016b76
MC
4333 }
4334
e30372c9 4335get_flash_size:
2726d6e1 4336 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
1122db71
MC
4337 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4338 if (val)
4339 bp->flash_size = val;
4340 else
4341 bp->flash_size = bp->flash_info->total_size;
4342
b6016b76
MC
4343 return rc;
4344}
4345
4346static int
4347bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4348 int buf_size)
4349{
4350 int rc = 0;
4351 u32 cmd_flags, offset32, len32, extra;
4352
4353 if (buf_size == 0)
4354 return 0;
4355
4356 /* Request access to the flash interface. */
4357 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4358 return rc;
4359
4360 /* Enable access to flash interface */
4361 bnx2_enable_nvram_access(bp);
4362
4363 len32 = buf_size;
4364 offset32 = offset;
4365 extra = 0;
4366
4367 cmd_flags = 0;
4368
4369 if (offset32 & 3) {
4370 u8 buf[4];
4371 u32 pre_len;
4372
4373 offset32 &= ~3;
4374 pre_len = 4 - (offset & 3);
4375
4376 if (pre_len >= len32) {
4377 pre_len = len32;
4378 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4379 BNX2_NVM_COMMAND_LAST;
4380 }
4381 else {
4382 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4383 }
4384
4385 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4386
4387 if (rc)
4388 return rc;
4389
4390 memcpy(ret_buf, buf + (offset & 3), pre_len);
4391
4392 offset32 += 4;
4393 ret_buf += pre_len;
4394 len32 -= pre_len;
4395 }
4396 if (len32 & 3) {
4397 extra = 4 - (len32 & 3);
4398 len32 = (len32 + 4) & ~3;
4399 }
4400
4401 if (len32 == 4) {
4402 u8 buf[4];
4403
4404 if (cmd_flags)
4405 cmd_flags = BNX2_NVM_COMMAND_LAST;
4406 else
4407 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4408 BNX2_NVM_COMMAND_LAST;
4409
4410 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4411
4412 memcpy(ret_buf, buf, 4 - extra);
4413 }
4414 else if (len32 > 0) {
4415 u8 buf[4];
4416
4417 /* Read the first word. */
4418 if (cmd_flags)
4419 cmd_flags = 0;
4420 else
4421 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4422
4423 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4424
4425 /* Advance to the next dword. */
4426 offset32 += 4;
4427 ret_buf += 4;
4428 len32 -= 4;
4429
4430 while (len32 > 4 && rc == 0) {
4431 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4432
4433 /* Advance to the next dword. */
4434 offset32 += 4;
4435 ret_buf += 4;
4436 len32 -= 4;
4437 }
4438
4439 if (rc)
4440 return rc;
4441
4442 cmd_flags = BNX2_NVM_COMMAND_LAST;
4443 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4444
4445 memcpy(ret_buf, buf, 4 - extra);
4446 }
4447
4448 /* Disable access to flash interface */
4449 bnx2_disable_nvram_access(bp);
4450
4451 bnx2_release_nvram_lock(bp);
4452
4453 return rc;
4454}
4455
4456static int
4457bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4458 int buf_size)
4459{
4460 u32 written, offset32, len32;
e6be763f 4461 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
4462 int rc = 0;
4463 int align_start, align_end;
4464
4465 buf = data_buf;
4466 offset32 = offset;
4467 len32 = buf_size;
4468 align_start = align_end = 0;
4469
4470 if ((align_start = (offset32 & 3))) {
4471 offset32 &= ~3;
c873879c
MC
4472 len32 += align_start;
4473 if (len32 < 4)
4474 len32 = 4;
b6016b76
MC
4475 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4476 return rc;
4477 }
4478
4479 if (len32 & 3) {
c873879c
MC
4480 align_end = 4 - (len32 & 3);
4481 len32 += align_end;
4482 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4483 return rc;
b6016b76
MC
4484 }
4485
4486 if (align_start || align_end) {
e6be763f
MC
4487 align_buf = kmalloc(len32, GFP_KERNEL);
4488 if (align_buf == NULL)
b6016b76
MC
4489 return -ENOMEM;
4490 if (align_start) {
e6be763f 4491 memcpy(align_buf, start, 4);
b6016b76
MC
4492 }
4493 if (align_end) {
e6be763f 4494 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 4495 }
e6be763f
MC
4496 memcpy(align_buf + align_start, data_buf, buf_size);
4497 buf = align_buf;
b6016b76
MC
4498 }
4499
e30372c9 4500 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
4501 flash_buffer = kmalloc(264, GFP_KERNEL);
4502 if (flash_buffer == NULL) {
4503 rc = -ENOMEM;
4504 goto nvram_write_end;
4505 }
4506 }
4507
b6016b76
MC
4508 written = 0;
4509 while ((written < len32) && (rc == 0)) {
4510 u32 page_start, page_end, data_start, data_end;
4511 u32 addr, cmd_flags;
4512 int i;
b6016b76
MC
4513
4514 /* Find the page_start addr */
4515 page_start = offset32 + written;
4516 page_start -= (page_start % bp->flash_info->page_size);
4517 /* Find the page_end addr */
4518 page_end = page_start + bp->flash_info->page_size;
4519 /* Find the data_start addr */
4520 data_start = (written == 0) ? offset32 : page_start;
4521 /* Find the data_end addr */
6aa20a22 4522 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
4523 (offset32 + len32) : page_end;
4524
4525 /* Request access to the flash interface. */
4526 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4527 goto nvram_write_end;
4528
4529 /* Enable access to flash interface */
4530 bnx2_enable_nvram_access(bp);
4531
4532 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 4533 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4534 int j;
4535
4536 /* Read the whole page into the buffer
4537 * (non-buffer flash only) */
4538 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4539 if (j == (bp->flash_info->page_size - 4)) {
4540 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4541 }
4542 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
4543 page_start + j,
4544 &flash_buffer[j],
b6016b76
MC
4545 cmd_flags);
4546
4547 if (rc)
4548 goto nvram_write_end;
4549
4550 cmd_flags = 0;
4551 }
4552 }
4553
4554 /* Enable writes to flash interface (unlock write-protect) */
4555 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4556 goto nvram_write_end;
4557
b6016b76
MC
4558 /* Loop to write back the buffer data from page_start to
4559 * data_start */
4560 i = 0;
e30372c9 4561 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
4562 /* Erase the page */
4563 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4564 goto nvram_write_end;
4565
4566 /* Re-enable the write again for the actual write */
4567 bnx2_enable_nvram_write(bp);
4568
b6016b76
MC
4569 for (addr = page_start; addr < data_start;
4570 addr += 4, i += 4) {
6aa20a22 4571
b6016b76
MC
4572 rc = bnx2_nvram_write_dword(bp, addr,
4573 &flash_buffer[i], cmd_flags);
4574
4575 if (rc != 0)
4576 goto nvram_write_end;
4577
4578 cmd_flags = 0;
4579 }
4580 }
4581
4582 /* Loop to write the new data from data_start to data_end */
bae25761 4583 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 4584 if ((addr == page_end - 4) ||
e30372c9 4585 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
4586 (addr == data_end - 4))) {
4587
4588 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4589 }
4590 rc = bnx2_nvram_write_dword(bp, addr, buf,
4591 cmd_flags);
4592
4593 if (rc != 0)
4594 goto nvram_write_end;
4595
4596 cmd_flags = 0;
4597 buf += 4;
4598 }
4599
4600 /* Loop to write back the buffer data from data_end
4601 * to page_end */
e30372c9 4602 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4603 for (addr = data_end; addr < page_end;
4604 addr += 4, i += 4) {
6aa20a22 4605
b6016b76
MC
4606 if (addr == page_end-4) {
4607 cmd_flags = BNX2_NVM_COMMAND_LAST;
4608 }
4609 rc = bnx2_nvram_write_dword(bp, addr,
4610 &flash_buffer[i], cmd_flags);
4611
4612 if (rc != 0)
4613 goto nvram_write_end;
4614
4615 cmd_flags = 0;
4616 }
4617 }
4618
4619 /* Disable writes to flash interface (lock write-protect) */
4620 bnx2_disable_nvram_write(bp);
4621
4622 /* Disable access to flash interface */
4623 bnx2_disable_nvram_access(bp);
4624 bnx2_release_nvram_lock(bp);
4625
4626 /* Increment written */
4627 written += data_end - data_start;
4628 }
4629
4630nvram_write_end:
e6be763f
MC
4631 kfree(flash_buffer);
4632 kfree(align_buf);
b6016b76
MC
4633 return rc;
4634}
4635
0d8a6571 4636static void
7c62e83b 4637bnx2_init_fw_cap(struct bnx2 *bp)
0d8a6571 4638{
7c62e83b 4639 u32 val, sig = 0;
0d8a6571 4640
583c28e5 4641 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
7c62e83b
MC
4642 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4643
4644 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4645 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
0d8a6571 4646
2726d6e1 4647 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
0d8a6571
MC
4648 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4649 return;
4650
7c62e83b
MC
4651 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4652 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4653 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4654 }
4655
4656 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4657 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4658 u32 link;
4659
583c28e5 4660 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
0d8a6571 4661
7c62e83b
MC
4662 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4663 if (link & BNX2_LINK_STATUS_SERDES_LINK)
0d8a6571
MC
4664 bp->phy_port = PORT_FIBRE;
4665 else
4666 bp->phy_port = PORT_TP;
489310a4 4667
7c62e83b
MC
4668 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4669 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
0d8a6571 4670 }
7c62e83b
MC
4671
4672 if (netif_running(bp->dev) && sig)
4673 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
0d8a6571
MC
4674}
4675
b4b36042
MC
4676static void
4677bnx2_setup_msix_tbl(struct bnx2 *bp)
4678{
4679 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4680
4681 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4682 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4683}
4684
b6016b76
MC
4685static int
4686bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4687{
4688 u32 val;
4689 int i, rc = 0;
489310a4 4690 u8 old_port;
b6016b76
MC
4691
4692 /* Wait for the current PCI transaction to complete before
4693 * issuing a reset. */
4694 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4695 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4696 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4697 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4698 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4699 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4700 udelay(5);
4701
b090ae2b 4702 /* Wait for the firmware to tell us it is ok to issue a reset. */
a2f13890 4703 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
b090ae2b 4704
b6016b76
MC
4705 /* Deposit a driver reset signature so the firmware knows that
4706 * this is a soft reset. */
2726d6e1
MC
4707 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4708 BNX2_DRV_RESET_SIGNATURE_MAGIC);
b6016b76 4709
b6016b76
MC
4710 /* Do a dummy read to force the chip to complete all current transaction
4711 * before we issue a reset. */
4712 val = REG_RD(bp, BNX2_MISC_ID);
4713
234754d5
MC
4714 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4715 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4716 REG_RD(bp, BNX2_MISC_COMMAND);
4717 udelay(5);
b6016b76 4718
234754d5
MC
4719 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4720 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4721
234754d5 4722 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4723
234754d5
MC
4724 } else {
4725 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4726 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4727 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4728
4729 /* Chip reset. */
4730 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4731
594a9dfa
MC
4732 /* Reading back any register after chip reset will hang the
4733 * bus on 5706 A0 and A1. The msleep below provides plenty
4734 * of margin for write posting.
4735 */
234754d5 4736 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4737 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4738 msleep(20);
b6016b76 4739
234754d5
MC
4740 /* Reset takes approximate 30 usec */
4741 for (i = 0; i < 10; i++) {
4742 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4743 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4744 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4745 break;
4746 udelay(10);
4747 }
4748
4749 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4750 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3a9c6a49 4751 pr_err("Chip reset did not complete\n");
234754d5
MC
4752 return -EBUSY;
4753 }
b6016b76
MC
4754 }
4755
4756 /* Make sure byte swapping is properly configured. */
4757 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4758 if (val != 0x01020304) {
3a9c6a49 4759 pr_err("Chip not in correct endian mode\n");
b6016b76
MC
4760 return -ENODEV;
4761 }
4762
b6016b76 4763 /* Wait for the firmware to finish its initialization. */
a2f13890 4764 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
b090ae2b
MC
4765 if (rc)
4766 return rc;
b6016b76 4767
0d8a6571 4768 spin_lock_bh(&bp->phy_lock);
489310a4 4769 old_port = bp->phy_port;
7c62e83b 4770 bnx2_init_fw_cap(bp);
583c28e5
MC
4771 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4772 old_port != bp->phy_port)
0d8a6571
MC
4773 bnx2_set_default_remote_link(bp);
4774 spin_unlock_bh(&bp->phy_lock);
4775
b6016b76
MC
4776 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4777 /* Adjust the voltage regular to two steps lower. The default
4778 * of this register is 0x0000000e. */
4779 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4780
4781 /* Remove bad rbuf memory from the free pool. */
4782 rc = bnx2_alloc_bad_rbuf(bp);
4783 }
4784
c441b8d2 4785 if (bp->flags & BNX2_FLAG_USING_MSIX) {
b4b36042 4786 bnx2_setup_msix_tbl(bp);
c441b8d2
MC
4787 /* Prevent MSIX table reads and write from timing out */
4788 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4789 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4790 }
b4b36042 4791
b6016b76
MC
4792 return rc;
4793}
4794
4795static int
4796bnx2_init_chip(struct bnx2 *bp)
4797{
d8026d93 4798 u32 val, mtu;
b4b36042 4799 int rc, i;
b6016b76
MC
4800
4801 /* Make sure the interrupt is not active. */
4802 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4803
4804 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4805 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4806#ifdef __BIG_ENDIAN
6aa20a22 4807 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4808#endif
6aa20a22 4809 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4810 DMA_READ_CHANS << 12 |
4811 DMA_WRITE_CHANS << 16;
4812
4813 val |= (0x2 << 20) | (1 << 11);
4814
f86e82fb 4815 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4816 val |= (1 << 23);
4817
4818 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
f86e82fb 4819 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
b6016b76
MC
4820 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4821
4822 REG_WR(bp, BNX2_DMA_CONFIG, val);
4823
4824 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4825 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4826 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4827 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4828 }
4829
f86e82fb 4830 if (bp->flags & BNX2_FLAG_PCIX) {
b6016b76
MC
4831 u16 val16;
4832
4833 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4834 &val16);
4835 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4836 val16 & ~PCI_X_CMD_ERO);
4837 }
4838
4839 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4840 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4841 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4842 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4843
4844 /* Initialize context mapping and zero out the quick contexts. The
4845 * context block must have already been enabled. */
641bdcd5
MC
4846 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4847 rc = bnx2_init_5709_context(bp);
4848 if (rc)
4849 return rc;
4850 } else
59b47d8a 4851 bnx2_init_context(bp);
b6016b76 4852
fba9fe91
MC
4853 if ((rc = bnx2_init_cpus(bp)) != 0)
4854 return rc;
4855
b6016b76
MC
4856 bnx2_init_nvram(bp);
4857
5fcaed01 4858 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
4859
4860 val = REG_RD(bp, BNX2_MQ_CONFIG);
4861 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4862 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4edd473f
MC
4863 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4864 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4865 if (CHIP_REV(bp) == CHIP_REV_Ax)
4866 val |= BNX2_MQ_CONFIG_HALT_DIS;
4867 }
68c9f75a 4868
b6016b76
MC
4869 REG_WR(bp, BNX2_MQ_CONFIG, val);
4870
4871 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4872 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4873 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4874
4875 val = (BCM_PAGE_BITS - 8) << 24;
4876 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4877
4878 /* Configure page size. */
4879 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4880 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4881 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4882 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4883
4884 val = bp->mac_addr[0] +
4885 (bp->mac_addr[1] << 8) +
4886 (bp->mac_addr[2] << 16) +
4887 bp->mac_addr[3] +
4888 (bp->mac_addr[4] << 8) +
4889 (bp->mac_addr[5] << 16);
4890 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4891
4892 /* Program the MTU. Also include 4 bytes for CRC32. */
d8026d93
MC
4893 mtu = bp->dev->mtu;
4894 val = mtu + ETH_HLEN + ETH_FCS_LEN;
b6016b76
MC
4895 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4896 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4897 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4898
d8026d93
MC
4899 if (mtu < 1500)
4900 mtu = 1500;
4901
4902 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4903 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4904 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4905
155d5561 4906 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
b4b36042
MC
4907 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4908 bp->bnx2_napi[i].last_status_idx = 0;
4909
efba0180
MC
4910 bp->idle_chk_status_idx = 0xffff;
4911
b6016b76
MC
4912 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4913
4914 /* Set up how to generate a link change interrupt. */
4915 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4916
4917 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4918 (u64) bp->status_blk_mapping & 0xffffffff);
4919 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4920
4921 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4922 (u64) bp->stats_blk_mapping & 0xffffffff);
4923 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4924 (u64) bp->stats_blk_mapping >> 32);
4925
6aa20a22 4926 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4927 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4928
4929 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4930 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4931
4932 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4933 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4934
4935 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4936
4937 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4938
4939 REG_WR(bp, BNX2_HC_COM_TICKS,
4940 (bp->com_ticks_int << 16) | bp->com_ticks);
4941
4942 REG_WR(bp, BNX2_HC_CMD_TICKS,
4943 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4944
61d9e3fa 4945 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
02537b06
MC
4946 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4947 else
7ea6920e 4948 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4949 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4950
4951 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4952 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4953 else {
8e6a72c4
MC
4954 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4955 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4956 }
4957
efde73a3 4958 if (bp->flags & BNX2_FLAG_USING_MSIX) {
c76c0475
MC
4959 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4960 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4961
5e9ad9e1
MC
4962 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4963 }
4964
4965 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
cf7474a6 4966 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5e9ad9e1
MC
4967
4968 REG_WR(bp, BNX2_HC_CONFIG, val);
4969
4970 for (i = 1; i < bp->irq_nvecs; i++) {
4971 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4972 BNX2_HC_SB_CONFIG_1;
4973
6f743ca0 4974 REG_WR(bp, base,
c76c0475 4975 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5e9ad9e1 4976 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
c76c0475
MC
4977 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4978
6f743ca0 4979 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
c76c0475
MC
4980 (bp->tx_quick_cons_trip_int << 16) |
4981 bp->tx_quick_cons_trip);
4982
6f743ca0 4983 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
c76c0475
MC
4984 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4985
5e9ad9e1
MC
4986 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4987 (bp->rx_quick_cons_trip_int << 16) |
4988 bp->rx_quick_cons_trip);
8e6a72c4 4989
5e9ad9e1
MC
4990 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4991 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4992 }
8e6a72c4 4993
b6016b76
MC
4994 /* Clear internal stats counters. */
4995 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4996
da3e4fbe 4997 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4998
4999 /* Initialize the receive filter. */
5000 bnx2_set_rx_mode(bp->dev);
5001
0aa38df7
MC
5002 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5003 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5004 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5005 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5006 }
b090ae2b 5007 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
a2f13890 5008 1, 0);
b6016b76 5009
df149d70 5010 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
5011 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5012
5013 udelay(20);
5014
bf5295bb
MC
5015 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5016
b090ae2b 5017 return rc;
b6016b76
MC
5018}
5019
c76c0475
MC
5020static void
5021bnx2_clear_ring_states(struct bnx2 *bp)
5022{
5023 struct bnx2_napi *bnapi;
35e9010b 5024 struct bnx2_tx_ring_info *txr;
bb4f98ab 5025 struct bnx2_rx_ring_info *rxr;
c76c0475
MC
5026 int i;
5027
5028 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5029 bnapi = &bp->bnx2_napi[i];
35e9010b 5030 txr = &bnapi->tx_ring;
bb4f98ab 5031 rxr = &bnapi->rx_ring;
c76c0475 5032
35e9010b
MC
5033 txr->tx_cons = 0;
5034 txr->hw_tx_cons = 0;
bb4f98ab
MC
5035 rxr->rx_prod_bseq = 0;
5036 rxr->rx_prod = 0;
5037 rxr->rx_cons = 0;
5038 rxr->rx_pg_prod = 0;
5039 rxr->rx_pg_cons = 0;
c76c0475
MC
5040 }
5041}
5042
59b47d8a 5043static void
35e9010b 5044bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
59b47d8a
MC
5045{
5046 u32 val, offset0, offset1, offset2, offset3;
62a8313c 5047 u32 cid_addr = GET_CID_ADDR(cid);
59b47d8a
MC
5048
5049 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5050 offset0 = BNX2_L2CTX_TYPE_XI;
5051 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5052 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5053 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5054 } else {
5055 offset0 = BNX2_L2CTX_TYPE;
5056 offset1 = BNX2_L2CTX_CMD_TYPE;
5057 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5058 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5059 }
5060 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
62a8313c 5061 bnx2_ctx_wr(bp, cid_addr, offset0, val);
59b47d8a
MC
5062
5063 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
62a8313c 5064 bnx2_ctx_wr(bp, cid_addr, offset1, val);
59b47d8a 5065
35e9010b 5066 val = (u64) txr->tx_desc_mapping >> 32;
62a8313c 5067 bnx2_ctx_wr(bp, cid_addr, offset2, val);
59b47d8a 5068
35e9010b 5069 val = (u64) txr->tx_desc_mapping & 0xffffffff;
62a8313c 5070 bnx2_ctx_wr(bp, cid_addr, offset3, val);
59b47d8a 5071}
b6016b76
MC
5072
5073static void
35e9010b 5074bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
b6016b76
MC
5075{
5076 struct tx_bd *txbd;
c76c0475
MC
5077 u32 cid = TX_CID;
5078 struct bnx2_napi *bnapi;
35e9010b 5079 struct bnx2_tx_ring_info *txr;
c76c0475 5080
35e9010b
MC
5081 bnapi = &bp->bnx2_napi[ring_num];
5082 txr = &bnapi->tx_ring;
5083
5084 if (ring_num == 0)
5085 cid = TX_CID;
5086 else
5087 cid = TX_TSS_CID + ring_num - 1;
b6016b76 5088
2f8af120
MC
5089 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5090
35e9010b 5091 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 5092
35e9010b
MC
5093 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5094 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
b6016b76 5095
35e9010b
MC
5096 txr->tx_prod = 0;
5097 txr->tx_prod_bseq = 0;
6aa20a22 5098
35e9010b
MC
5099 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5100 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 5101
35e9010b 5102 bnx2_init_tx_context(bp, cid, txr);
b6016b76
MC
5103}
5104
5105static void
5d5d0015
MC
5106bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5107 int num_rings)
b6016b76 5108{
b6016b76 5109 int i;
5d5d0015 5110 struct rx_bd *rxbd;
6aa20a22 5111
5d5d0015 5112 for (i = 0; i < num_rings; i++) {
13daffa2 5113 int j;
b6016b76 5114
5d5d0015 5115 rxbd = &rx_ring[i][0];
13daffa2 5116 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 5117 rxbd->rx_bd_len = buf_size;
13daffa2
MC
5118 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5119 }
5d5d0015 5120 if (i == (num_rings - 1))
13daffa2
MC
5121 j = 0;
5122 else
5123 j = i + 1;
5d5d0015
MC
5124 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5125 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 5126 }
5d5d0015
MC
5127}
5128
5129static void
bb4f98ab 5130bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5d5d0015
MC
5131{
5132 int i;
5133 u16 prod, ring_prod;
bb4f98ab
MC
5134 u32 cid, rx_cid_addr, val;
5135 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5136 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5137
5138 if (ring_num == 0)
5139 cid = RX_CID;
5140 else
5141 cid = RX_RSS_CID + ring_num - 1;
5142
5143 rx_cid_addr = GET_CID_ADDR(cid);
5d5d0015 5144
bb4f98ab 5145 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5d5d0015
MC
5146 bp->rx_buf_use_size, bp->rx_max_ring);
5147
bb4f98ab 5148 bnx2_init_rx_context(bp, cid);
83e3fc89
MC
5149
5150 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5151 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5152 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5153 }
5154
62a8313c 5155 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246 5156 if (bp->rx_pg_ring_size) {
bb4f98ab
MC
5157 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5158 rxr->rx_pg_desc_mapping,
47bf4246
MC
5159 PAGE_SIZE, bp->rx_max_pg_ring);
5160 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
62a8313c
MC
5161 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5162 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5e9ad9e1 5163 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
47bf4246 5164
bb4f98ab 5165 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
62a8313c 5166 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
47bf4246 5167
bb4f98ab 5168 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
62a8313c 5169 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
47bf4246
MC
5170
5171 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5172 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5173 }
b6016b76 5174
bb4f98ab 5175 val = (u64) rxr->rx_desc_mapping[0] >> 32;
62a8313c 5176 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 5177
bb4f98ab 5178 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
62a8313c 5179 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 5180
bb4f98ab 5181 ring_prod = prod = rxr->rx_pg_prod;
47bf4246 5182 for (i = 0; i < bp->rx_pg_ring_size; i++) {
a2df00aa 5183 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
3a9c6a49
JP
5184 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5185 ring_num, i, bp->rx_pg_ring_size);
47bf4246 5186 break;
b929e53c 5187 }
47bf4246
MC
5188 prod = NEXT_RX_BD(prod);
5189 ring_prod = RX_PG_RING_IDX(prod);
5190 }
bb4f98ab 5191 rxr->rx_pg_prod = prod;
47bf4246 5192
bb4f98ab 5193 ring_prod = prod = rxr->rx_prod;
236b6394 5194 for (i = 0; i < bp->rx_ring_size; i++) {
a2df00aa 5195 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
3a9c6a49
JP
5196 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5197 ring_num, i, bp->rx_ring_size);
b6016b76 5198 break;
b929e53c 5199 }
b6016b76
MC
5200 prod = NEXT_RX_BD(prod);
5201 ring_prod = RX_RING_IDX(prod);
5202 }
bb4f98ab 5203 rxr->rx_prod = prod;
b6016b76 5204
bb4f98ab
MC
5205 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5206 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5207 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
b6016b76 5208
bb4f98ab
MC
5209 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5210 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5211
5212 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
5213}
5214
35e9010b
MC
5215static void
5216bnx2_init_all_rings(struct bnx2 *bp)
5217{
5218 int i;
5e9ad9e1 5219 u32 val;
35e9010b
MC
5220
5221 bnx2_clear_ring_states(bp);
5222
5223 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5224 for (i = 0; i < bp->num_tx_rings; i++)
5225 bnx2_init_tx_ring(bp, i);
5226
5227 if (bp->num_tx_rings > 1)
5228 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5229 (TX_TSS_CID << 7));
5230
5e9ad9e1
MC
5231 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5232 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5233
bb4f98ab
MC
5234 for (i = 0; i < bp->num_rx_rings; i++)
5235 bnx2_init_rx_ring(bp, i);
5e9ad9e1
MC
5236
5237 if (bp->num_rx_rings > 1) {
5238 u32 tbl_32;
5239 u8 *tbl = (u8 *) &tbl_32;
5240
5241 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5242 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5243
5244 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5245 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5246 if ((i % 4) == 3)
5247 bnx2_reg_wr_ind(bp,
5248 BNX2_RXP_SCRATCH_RSS_TBL + i,
5249 cpu_to_be32(tbl_32));
5250 }
5251
5252 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5253 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5254
5255 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5256
5257 }
35e9010b
MC
5258}
5259
5d5d0015 5260static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 5261{
5d5d0015 5262 u32 max, num_rings = 1;
13daffa2 5263
5d5d0015
MC
5264 while (ring_size > MAX_RX_DESC_CNT) {
5265 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
5266 num_rings++;
5267 }
5268 /* round to next power of 2 */
5d5d0015 5269 max = max_size;
13daffa2
MC
5270 while ((max & num_rings) == 0)
5271 max >>= 1;
5272
5273 if (num_rings != max)
5274 max <<= 1;
5275
5d5d0015
MC
5276 return max;
5277}
5278
5279static void
5280bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5281{
84eaa187 5282 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
5283
5284 /* 8 for CRC and VLAN */
d89cb6af 5285 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5d5d0015 5286
84eaa187
MC
5287 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5288 sizeof(struct skb_shared_info);
5289
601d3d18 5290 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
47bf4246
MC
5291 bp->rx_pg_ring_size = 0;
5292 bp->rx_max_pg_ring = 0;
5293 bp->rx_max_pg_ring_idx = 0;
f86e82fb 5294 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
84eaa187
MC
5295 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5296
5297 jumbo_size = size * pages;
5298 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5299 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5300
5301 bp->rx_pg_ring_size = jumbo_size;
5302 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5303 MAX_RX_PG_RINGS);
5304 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
601d3d18 5305 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
84eaa187
MC
5306 bp->rx_copy_thresh = 0;
5307 }
5d5d0015
MC
5308
5309 bp->rx_buf_use_size = rx_size;
5310 /* hw alignment */
5311 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
d89cb6af 5312 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5d5d0015
MC
5313 bp->rx_ring_size = size;
5314 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
5315 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5316}
5317
b6016b76
MC
5318static void
5319bnx2_free_tx_skbs(struct bnx2 *bp)
5320{
5321 int i;
5322
35e9010b
MC
5323 for (i = 0; i < bp->num_tx_rings; i++) {
5324 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5325 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5326 int j;
b6016b76 5327
35e9010b 5328 if (txr->tx_buf_ring == NULL)
b6016b76 5329 continue;
b6016b76 5330
35e9010b 5331 for (j = 0; j < TX_DESC_CNT; ) {
3d16af86 5332 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
35e9010b 5333 struct sk_buff *skb = tx_buf->skb;
e95524a7 5334 int k, last;
35e9010b
MC
5335
5336 if (skb == NULL) {
5337 j++;
5338 continue;
5339 }
5340
e95524a7 5341 pci_unmap_single(bp->pdev,
1a4ccc2d 5342 dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
5343 skb_headlen(skb),
5344 PCI_DMA_TODEVICE);
b6016b76 5345
35e9010b 5346 tx_buf->skb = NULL;
b6016b76 5347
e95524a7
AD
5348 last = tx_buf->nr_frags;
5349 j++;
5350 for (k = 0; k < last; k++, j++) {
5351 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5352 pci_unmap_page(bp->pdev,
1a4ccc2d 5353 dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
5354 skb_shinfo(skb)->frags[k].size,
5355 PCI_DMA_TODEVICE);
5356 }
35e9010b 5357 dev_kfree_skb(skb);
b6016b76 5358 }
b6016b76 5359 }
b6016b76
MC
5360}
5361
5362static void
5363bnx2_free_rx_skbs(struct bnx2 *bp)
5364{
5365 int i;
5366
bb4f98ab
MC
5367 for (i = 0; i < bp->num_rx_rings; i++) {
5368 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5369 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5370 int j;
b6016b76 5371
bb4f98ab
MC
5372 if (rxr->rx_buf_ring == NULL)
5373 return;
b6016b76 5374
bb4f98ab
MC
5375 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5376 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5377 struct sk_buff *skb = rx_buf->skb;
b6016b76 5378
bb4f98ab
MC
5379 if (skb == NULL)
5380 continue;
b6016b76 5381
bb4f98ab 5382 pci_unmap_single(bp->pdev,
1a4ccc2d 5383 dma_unmap_addr(rx_buf, mapping),
bb4f98ab
MC
5384 bp->rx_buf_use_size,
5385 PCI_DMA_FROMDEVICE);
b6016b76 5386
bb4f98ab
MC
5387 rx_buf->skb = NULL;
5388
5389 dev_kfree_skb(skb);
5390 }
5391 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5392 bnx2_free_rx_page(bp, rxr, j);
b6016b76
MC
5393 }
5394}
5395
5396static void
5397bnx2_free_skbs(struct bnx2 *bp)
5398{
5399 bnx2_free_tx_skbs(bp);
5400 bnx2_free_rx_skbs(bp);
5401}
5402
5403static int
5404bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5405{
5406 int rc;
5407
5408 rc = bnx2_reset_chip(bp, reset_code);
5409 bnx2_free_skbs(bp);
5410 if (rc)
5411 return rc;
5412
fba9fe91
MC
5413 if ((rc = bnx2_init_chip(bp)) != 0)
5414 return rc;
5415
35e9010b 5416 bnx2_init_all_rings(bp);
b6016b76
MC
5417 return 0;
5418}
5419
5420static int
9a120bc5 5421bnx2_init_nic(struct bnx2 *bp, int reset_phy)
b6016b76
MC
5422{
5423 int rc;
5424
5425 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5426 return rc;
5427
80be4434 5428 spin_lock_bh(&bp->phy_lock);
9a120bc5 5429 bnx2_init_phy(bp, reset_phy);
b6016b76 5430 bnx2_set_link(bp);
543a827d
MC
5431 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5432 bnx2_remote_phy_event(bp);
0d8a6571 5433 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5434 return 0;
5435}
5436
74bf4ba3
MC
5437static int
5438bnx2_shutdown_chip(struct bnx2 *bp)
5439{
5440 u32 reset_code;
5441
5442 if (bp->flags & BNX2_FLAG_NO_WOL)
5443 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5444 else if (bp->wol)
5445 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5446 else
5447 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5448
5449 return bnx2_reset_chip(bp, reset_code);
5450}
5451
b6016b76
MC
5452static int
5453bnx2_test_registers(struct bnx2 *bp)
5454{
5455 int ret;
5bae30c9 5456 int i, is_5709;
f71e1309 5457 static const struct {
b6016b76
MC
5458 u16 offset;
5459 u16 flags;
5bae30c9 5460#define BNX2_FL_NOT_5709 1
b6016b76
MC
5461 u32 rw_mask;
5462 u32 ro_mask;
5463 } reg_tbl[] = {
5464 { 0x006c, 0, 0x00000000, 0x0000003f },
5465 { 0x0090, 0, 0xffffffff, 0x00000000 },
5466 { 0x0094, 0, 0x00000000, 0x00000000 },
5467
5bae30c9
MC
5468 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5469 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5470 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5471 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5472 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5473 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5474 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5475 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5476 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5477
5478 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5479 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5480 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5481 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5482 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5483 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5484
5485 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5486 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5487 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
5488
5489 { 0x1000, 0, 0x00000000, 0x00000001 },
15b169cc 5490 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
b6016b76
MC
5491
5492 { 0x1408, 0, 0x01c00800, 0x00000000 },
5493 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5494 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 5495 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
5496 { 0x14b0, 0, 0x00000002, 0x00000001 },
5497 { 0x14b8, 0, 0x00000000, 0x00000000 },
5498 { 0x14c0, 0, 0x00000000, 0x00000009 },
5499 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5500 { 0x14cc, 0, 0x00000000, 0x00000001 },
5501 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
5502
5503 { 0x1800, 0, 0x00000000, 0x00000001 },
5504 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
5505
5506 { 0x2800, 0, 0x00000000, 0x00000001 },
5507 { 0x2804, 0, 0x00000000, 0x00003f01 },
5508 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5509 { 0x2810, 0, 0xffff0000, 0x00000000 },
5510 { 0x2814, 0, 0xffff0000, 0x00000000 },
5511 { 0x2818, 0, 0xffff0000, 0x00000000 },
5512 { 0x281c, 0, 0xffff0000, 0x00000000 },
5513 { 0x2834, 0, 0xffffffff, 0x00000000 },
5514 { 0x2840, 0, 0x00000000, 0xffffffff },
5515 { 0x2844, 0, 0x00000000, 0xffffffff },
5516 { 0x2848, 0, 0xffffffff, 0x00000000 },
5517 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5518
5519 { 0x2c00, 0, 0x00000000, 0x00000011 },
5520 { 0x2c04, 0, 0x00000000, 0x00030007 },
5521
b6016b76
MC
5522 { 0x3c00, 0, 0x00000000, 0x00000001 },
5523 { 0x3c04, 0, 0x00000000, 0x00070000 },
5524 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5525 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5526 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5527 { 0x3c14, 0, 0x00000000, 0xffffffff },
5528 { 0x3c18, 0, 0x00000000, 0xffffffff },
5529 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5530 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
5531
5532 { 0x5004, 0, 0x00000000, 0x0000007f },
5533 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 5534
b6016b76
MC
5535 { 0x5c00, 0, 0x00000000, 0x00000001 },
5536 { 0x5c04, 0, 0x00000000, 0x0003000f },
5537 { 0x5c08, 0, 0x00000003, 0x00000000 },
5538 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5539 { 0x5c10, 0, 0x00000000, 0xffffffff },
5540 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5541 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5542 { 0x5c88, 0, 0x00000000, 0x00077373 },
5543 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5544
5545 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5546 { 0x680c, 0, 0xffffffff, 0x00000000 },
5547 { 0x6810, 0, 0xffffffff, 0x00000000 },
5548 { 0x6814, 0, 0xffffffff, 0x00000000 },
5549 { 0x6818, 0, 0xffffffff, 0x00000000 },
5550 { 0x681c, 0, 0xffffffff, 0x00000000 },
5551 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5552 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5553 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5554 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5555 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5556 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5557 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5558 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5559 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5560 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5561 { 0x684c, 0, 0xffffffff, 0x00000000 },
5562 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5563 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5564 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5565 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5566 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5567 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5568
5569 { 0xffff, 0, 0x00000000, 0x00000000 },
5570 };
5571
5572 ret = 0;
5bae30c9
MC
5573 is_5709 = 0;
5574 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5575 is_5709 = 1;
5576
b6016b76
MC
5577 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5578 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
5579 u16 flags = reg_tbl[i].flags;
5580
5581 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5582 continue;
b6016b76
MC
5583
5584 offset = (u32) reg_tbl[i].offset;
5585 rw_mask = reg_tbl[i].rw_mask;
5586 ro_mask = reg_tbl[i].ro_mask;
5587
14ab9b86 5588 save_val = readl(bp->regview + offset);
b6016b76 5589
14ab9b86 5590 writel(0, bp->regview + offset);
b6016b76 5591
14ab9b86 5592 val = readl(bp->regview + offset);
b6016b76
MC
5593 if ((val & rw_mask) != 0) {
5594 goto reg_test_err;
5595 }
5596
5597 if ((val & ro_mask) != (save_val & ro_mask)) {
5598 goto reg_test_err;
5599 }
5600
14ab9b86 5601 writel(0xffffffff, bp->regview + offset);
b6016b76 5602
14ab9b86 5603 val = readl(bp->regview + offset);
b6016b76
MC
5604 if ((val & rw_mask) != rw_mask) {
5605 goto reg_test_err;
5606 }
5607
5608 if ((val & ro_mask) != (save_val & ro_mask)) {
5609 goto reg_test_err;
5610 }
5611
14ab9b86 5612 writel(save_val, bp->regview + offset);
b6016b76
MC
5613 continue;
5614
5615reg_test_err:
14ab9b86 5616 writel(save_val, bp->regview + offset);
b6016b76
MC
5617 ret = -ENODEV;
5618 break;
5619 }
5620 return ret;
5621}
5622
5623static int
5624bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5625{
f71e1309 5626 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
5627 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5628 int i;
5629
5630 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5631 u32 offset;
5632
5633 for (offset = 0; offset < size; offset += 4) {
5634
2726d6e1 5635 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
b6016b76 5636
2726d6e1 5637 if (bnx2_reg_rd_ind(bp, start + offset) !=
b6016b76
MC
5638 test_pattern[i]) {
5639 return -ENODEV;
5640 }
5641 }
5642 }
5643 return 0;
5644}
5645
5646static int
5647bnx2_test_memory(struct bnx2 *bp)
5648{
5649 int ret = 0;
5650 int i;
5bae30c9 5651 static struct mem_entry {
b6016b76
MC
5652 u32 offset;
5653 u32 len;
5bae30c9 5654 } mem_tbl_5706[] = {
b6016b76 5655 { 0x60000, 0x4000 },
5b0c76ad 5656 { 0xa0000, 0x3000 },
b6016b76
MC
5657 { 0xe0000, 0x4000 },
5658 { 0x120000, 0x4000 },
5659 { 0x1a0000, 0x4000 },
5660 { 0x160000, 0x4000 },
5661 { 0xffffffff, 0 },
5bae30c9
MC
5662 },
5663 mem_tbl_5709[] = {
5664 { 0x60000, 0x4000 },
5665 { 0xa0000, 0x3000 },
5666 { 0xe0000, 0x4000 },
5667 { 0x120000, 0x4000 },
5668 { 0x1a0000, 0x4000 },
5669 { 0xffffffff, 0 },
b6016b76 5670 };
5bae30c9
MC
5671 struct mem_entry *mem_tbl;
5672
5673 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5674 mem_tbl = mem_tbl_5709;
5675 else
5676 mem_tbl = mem_tbl_5706;
b6016b76
MC
5677
5678 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5679 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5680 mem_tbl[i].len)) != 0) {
5681 return ret;
5682 }
5683 }
6aa20a22 5684
b6016b76
MC
5685 return ret;
5686}
5687
bc5a0690
MC
5688#define BNX2_MAC_LOOPBACK 0
5689#define BNX2_PHY_LOOPBACK 1
5690
b6016b76 5691static int
bc5a0690 5692bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
5693{
5694 unsigned int pkt_size, num_pkts, i;
5695 struct sk_buff *skb, *rx_skb;
5696 unsigned char *packet;
bc5a0690 5697 u16 rx_start_idx, rx_idx;
b6016b76
MC
5698 dma_addr_t map;
5699 struct tx_bd *txbd;
5700 struct sw_bd *rx_buf;
5701 struct l2_fhdr *rx_hdr;
5702 int ret = -ENODEV;
c76c0475 5703 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
35e9010b 5704 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 5705 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
c76c0475
MC
5706
5707 tx_napi = bnapi;
b6016b76 5708
35e9010b 5709 txr = &tx_napi->tx_ring;
bb4f98ab 5710 rxr = &bnapi->rx_ring;
bc5a0690
MC
5711 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5712 bp->loopback = MAC_LOOPBACK;
5713 bnx2_set_mac_loopback(bp);
5714 }
5715 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
583c28e5 5716 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
489310a4
MC
5717 return 0;
5718
80be4434 5719 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
5720 bnx2_set_phy_loopback(bp);
5721 }
5722 else
5723 return -EINVAL;
b6016b76 5724
84eaa187 5725 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 5726 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
5727 if (!skb)
5728 return -ENOMEM;
b6016b76 5729 packet = skb_put(skb, pkt_size);
6634292b 5730 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
5731 memset(packet + 6, 0x0, 8);
5732 for (i = 14; i < pkt_size; i++)
5733 packet[i] = (unsigned char) (i & 0xff);
5734
e95524a7
AD
5735 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5736 PCI_DMA_TODEVICE);
5737 if (pci_dma_mapping_error(bp->pdev, map)) {
3d16af86
BL
5738 dev_kfree_skb(skb);
5739 return -EIO;
5740 }
b6016b76 5741
bf5295bb
MC
5742 REG_WR(bp, BNX2_HC_COMMAND,
5743 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5744
b6016b76
MC
5745 REG_RD(bp, BNX2_HC_COMMAND);
5746
5747 udelay(5);
35efa7c1 5748 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 5749
b6016b76
MC
5750 num_pkts = 0;
5751
35e9010b 5752 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
b6016b76
MC
5753
5754 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5755 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5756 txbd->tx_bd_mss_nbytes = pkt_size;
5757 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5758
5759 num_pkts++;
35e9010b
MC
5760 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5761 txr->tx_prod_bseq += pkt_size;
b6016b76 5762
35e9010b
MC
5763 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5764 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
5765
5766 udelay(100);
5767
bf5295bb
MC
5768 REG_WR(bp, BNX2_HC_COMMAND,
5769 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5770
b6016b76
MC
5771 REG_RD(bp, BNX2_HC_COMMAND);
5772
5773 udelay(5);
5774
e95524a7 5775 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 5776 dev_kfree_skb(skb);
b6016b76 5777
35e9010b 5778 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
b6016b76 5779 goto loopback_test_done;
b6016b76 5780
35efa7c1 5781 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
5782 if (rx_idx != rx_start_idx + num_pkts) {
5783 goto loopback_test_done;
5784 }
5785
bb4f98ab 5786 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
b6016b76
MC
5787 rx_skb = rx_buf->skb;
5788
a33fa66b 5789 rx_hdr = rx_buf->desc;
d89cb6af 5790 skb_reserve(rx_skb, BNX2_RX_OFFSET);
b6016b76
MC
5791
5792 pci_dma_sync_single_for_cpu(bp->pdev,
1a4ccc2d 5793 dma_unmap_addr(rx_buf, mapping),
b6016b76
MC
5794 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5795
ade2bfe7 5796 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
5797 (L2_FHDR_ERRORS_BAD_CRC |
5798 L2_FHDR_ERRORS_PHY_DECODE |
5799 L2_FHDR_ERRORS_ALIGNMENT |
5800 L2_FHDR_ERRORS_TOO_SHORT |
5801 L2_FHDR_ERRORS_GIANT_FRAME)) {
5802
5803 goto loopback_test_done;
5804 }
5805
5806 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5807 goto loopback_test_done;
5808 }
5809
5810 for (i = 14; i < pkt_size; i++) {
5811 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5812 goto loopback_test_done;
5813 }
5814 }
5815
5816 ret = 0;
5817
5818loopback_test_done:
5819 bp->loopback = 0;
5820 return ret;
5821}
5822
bc5a0690
MC
5823#define BNX2_MAC_LOOPBACK_FAILED 1
5824#define BNX2_PHY_LOOPBACK_FAILED 2
5825#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5826 BNX2_PHY_LOOPBACK_FAILED)
5827
5828static int
5829bnx2_test_loopback(struct bnx2 *bp)
5830{
5831 int rc = 0;
5832
5833 if (!netif_running(bp->dev))
5834 return BNX2_LOOPBACK_FAILED;
5835
5836 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5837 spin_lock_bh(&bp->phy_lock);
9a120bc5 5838 bnx2_init_phy(bp, 1);
bc5a0690
MC
5839 spin_unlock_bh(&bp->phy_lock);
5840 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5841 rc |= BNX2_MAC_LOOPBACK_FAILED;
5842 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5843 rc |= BNX2_PHY_LOOPBACK_FAILED;
5844 return rc;
5845}
5846
b6016b76
MC
5847#define NVRAM_SIZE 0x200
5848#define CRC32_RESIDUAL 0xdebb20e3
5849
5850static int
5851bnx2_test_nvram(struct bnx2 *bp)
5852{
b491edd5 5853 __be32 buf[NVRAM_SIZE / 4];
b6016b76
MC
5854 u8 *data = (u8 *) buf;
5855 int rc = 0;
5856 u32 magic, csum;
5857
5858 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5859 goto test_nvram_done;
5860
5861 magic = be32_to_cpu(buf[0]);
5862 if (magic != 0x669955aa) {
5863 rc = -ENODEV;
5864 goto test_nvram_done;
5865 }
5866
5867 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5868 goto test_nvram_done;
5869
5870 csum = ether_crc_le(0x100, data);
5871 if (csum != CRC32_RESIDUAL) {
5872 rc = -ENODEV;
5873 goto test_nvram_done;
5874 }
5875
5876 csum = ether_crc_le(0x100, data + 0x100);
5877 if (csum != CRC32_RESIDUAL) {
5878 rc = -ENODEV;
5879 }
5880
5881test_nvram_done:
5882 return rc;
5883}
5884
5885static int
5886bnx2_test_link(struct bnx2 *bp)
5887{
5888 u32 bmsr;
5889
9f52b564
MC
5890 if (!netif_running(bp->dev))
5891 return -ENODEV;
5892
583c28e5 5893 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
489310a4
MC
5894 if (bp->link_up)
5895 return 0;
5896 return -ENODEV;
5897 }
c770a65c 5898 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5899 bnx2_enable_bmsr1(bp);
5900 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5901 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5902 bnx2_disable_bmsr1(bp);
c770a65c 5903 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5904
b6016b76
MC
5905 if (bmsr & BMSR_LSTATUS) {
5906 return 0;
5907 }
5908 return -ENODEV;
5909}
5910
5911static int
5912bnx2_test_intr(struct bnx2 *bp)
5913{
5914 int i;
b6016b76
MC
5915 u16 status_idx;
5916
5917 if (!netif_running(bp->dev))
5918 return -ENODEV;
5919
5920 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5921
5922 /* This register is not touched during run-time. */
bf5295bb 5923 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5924 REG_RD(bp, BNX2_HC_COMMAND);
5925
5926 for (i = 0; i < 10; i++) {
5927 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5928 status_idx) {
5929
5930 break;
5931 }
5932
5933 msleep_interruptible(10);
5934 }
5935 if (i < 10)
5936 return 0;
5937
5938 return -ENODEV;
5939}
5940
38ea3686 5941/* Determining link for parallel detection. */
b2fadeae
MC
5942static int
5943bnx2_5706_serdes_has_link(struct bnx2 *bp)
5944{
5945 u32 mode_ctl, an_dbg, exp;
5946
38ea3686
MC
5947 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5948 return 0;
5949
b2fadeae
MC
5950 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5951 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5952
5953 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5954 return 0;
5955
5956 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5957 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5958 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5959
f3014c0c 5960 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
b2fadeae
MC
5961 return 0;
5962
5963 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5964 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5965 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5966
5967 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5968 return 0;
5969
5970 return 1;
5971}
5972
b6016b76 5973static void
48b01e2d 5974bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5975{
b2fadeae
MC
5976 int check_link = 1;
5977
48b01e2d 5978 spin_lock(&bp->phy_lock);
b2fadeae 5979 if (bp->serdes_an_pending) {
48b01e2d 5980 bp->serdes_an_pending--;
b2fadeae
MC
5981 check_link = 0;
5982 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
48b01e2d 5983 u32 bmcr;
b6016b76 5984
ac392abc 5985 bp->current_interval = BNX2_TIMER_INTERVAL;
cd339a0e 5986
ca58c3af 5987 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5988
48b01e2d 5989 if (bmcr & BMCR_ANENABLE) {
b2fadeae 5990 if (bnx2_5706_serdes_has_link(bp)) {
48b01e2d
MC
5991 bmcr &= ~BMCR_ANENABLE;
5992 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 5993 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
583c28e5 5994 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d 5995 }
b6016b76 5996 }
48b01e2d
MC
5997 }
5998 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
583c28e5 5999 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
48b01e2d 6000 u32 phy2;
b6016b76 6001
48b01e2d
MC
6002 bnx2_write_phy(bp, 0x17, 0x0f01);
6003 bnx2_read_phy(bp, 0x15, &phy2);
6004 if (phy2 & 0x20) {
6005 u32 bmcr;
cd339a0e 6006
ca58c3af 6007 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 6008 bmcr |= BMCR_ANENABLE;
ca58c3af 6009 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 6010
583c28e5 6011 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d
MC
6012 }
6013 } else
ac392abc 6014 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6015
a2724e25 6016 if (check_link) {
b2fadeae
MC
6017 u32 val;
6018
6019 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6020 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6021 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6022
a2724e25
MC
6023 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6024 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6025 bnx2_5706s_force_link_dn(bp, 1);
6026 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6027 } else
6028 bnx2_set_link(bp);
6029 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6030 bnx2_set_link(bp);
b2fadeae 6031 }
48b01e2d
MC
6032 spin_unlock(&bp->phy_lock);
6033}
b6016b76 6034
f8dd064e
MC
6035static void
6036bnx2_5708_serdes_timer(struct bnx2 *bp)
6037{
583c28e5 6038 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
6039 return;
6040
583c28e5 6041 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
f8dd064e
MC
6042 bp->serdes_an_pending = 0;
6043 return;
6044 }
b6016b76 6045
f8dd064e
MC
6046 spin_lock(&bp->phy_lock);
6047 if (bp->serdes_an_pending)
6048 bp->serdes_an_pending--;
6049 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6050 u32 bmcr;
b6016b76 6051
ca58c3af 6052 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 6053 if (bmcr & BMCR_ANENABLE) {
605a9e20 6054 bnx2_enable_forced_2g5(bp);
40105c0b 6055 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
f8dd064e 6056 } else {
605a9e20 6057 bnx2_disable_forced_2g5(bp);
f8dd064e 6058 bp->serdes_an_pending = 2;
ac392abc 6059 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6060 }
b6016b76 6061
f8dd064e 6062 } else
ac392abc 6063 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6064
f8dd064e
MC
6065 spin_unlock(&bp->phy_lock);
6066}
6067
48b01e2d
MC
6068static void
6069bnx2_timer(unsigned long data)
6070{
6071 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 6072
48b01e2d
MC
6073 if (!netif_running(bp->dev))
6074 return;
b6016b76 6075
48b01e2d
MC
6076 if (atomic_read(&bp->intr_sem) != 0)
6077 goto bnx2_restart_timer;
b6016b76 6078
efba0180
MC
6079 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6080 BNX2_FLAG_USING_MSI)
6081 bnx2_chk_missed_msi(bp);
6082
df149d70 6083 bnx2_send_heart_beat(bp);
b6016b76 6084
2726d6e1
MC
6085 bp->stats_blk->stat_FwRxDrop =
6086 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 6087
02537b06 6088 /* workaround occasional corrupted counters */
61d9e3fa 6089 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
02537b06
MC
6090 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6091 BNX2_HC_COMMAND_STATS_NOW);
6092
583c28e5 6093 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
f8dd064e
MC
6094 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6095 bnx2_5706_serdes_timer(bp);
27a005b8 6096 else
f8dd064e 6097 bnx2_5708_serdes_timer(bp);
b6016b76
MC
6098 }
6099
6100bnx2_restart_timer:
cd339a0e 6101 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6102}
6103
8e6a72c4
MC
6104static int
6105bnx2_request_irq(struct bnx2 *bp)
6106{
6d866ffc 6107 unsigned long flags;
b4b36042
MC
6108 struct bnx2_irq *irq;
6109 int rc = 0, i;
8e6a72c4 6110
f86e82fb 6111 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6d866ffc
MC
6112 flags = 0;
6113 else
6114 flags = IRQF_SHARED;
b4b36042
MC
6115
6116 for (i = 0; i < bp->irq_nvecs; i++) {
6117 irq = &bp->irq_tbl[i];
c76c0475 6118 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
f0ea2e63 6119 &bp->bnx2_napi[i]);
b4b36042
MC
6120 if (rc)
6121 break;
6122 irq->requested = 1;
6123 }
8e6a72c4
MC
6124 return rc;
6125}
6126
6127static void
6128bnx2_free_irq(struct bnx2 *bp)
6129{
b4b36042
MC
6130 struct bnx2_irq *irq;
6131 int i;
8e6a72c4 6132
b4b36042
MC
6133 for (i = 0; i < bp->irq_nvecs; i++) {
6134 irq = &bp->irq_tbl[i];
6135 if (irq->requested)
f0ea2e63 6136 free_irq(irq->vector, &bp->bnx2_napi[i]);
b4b36042 6137 irq->requested = 0;
6d866ffc 6138 }
f86e82fb 6139 if (bp->flags & BNX2_FLAG_USING_MSI)
b4b36042 6140 pci_disable_msi(bp->pdev);
f86e82fb 6141 else if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
6142 pci_disable_msix(bp->pdev);
6143
f86e82fb 6144 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
b4b36042
MC
6145}
6146
6147static void
5e9ad9e1 6148bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
b4b36042 6149{
57851d84
MC
6150 int i, rc;
6151 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
4e1d0de9
MC
6152 struct net_device *dev = bp->dev;
6153 const int len = sizeof(bp->irq_tbl[0].name);
57851d84 6154
b4b36042
MC
6155 bnx2_setup_msix_tbl(bp);
6156 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6157 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6158 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
57851d84 6159
e2eb8e38
BL
6160 /* Need to flush the previous three writes to ensure MSI-X
6161 * is setup properly */
6162 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6163
57851d84
MC
6164 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6165 msix_ent[i].entry = i;
6166 msix_ent[i].vector = 0;
6167 }
6168
6169 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6170 if (rc != 0)
6171 return;
6172
5e9ad9e1 6173 bp->irq_nvecs = msix_vecs;
f86e82fb 6174 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
69010313 6175 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
57851d84 6176 bp->irq_tbl[i].vector = msix_ent[i].vector;
69010313
MC
6177 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6178 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6179 }
6d866ffc
MC
6180}
6181
6182static void
6183bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6184{
5e9ad9e1 6185 int cpus = num_online_cpus();
706bf240 6186 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5e9ad9e1 6187
6d866ffc
MC
6188 bp->irq_tbl[0].handler = bnx2_interrupt;
6189 strcpy(bp->irq_tbl[0].name, bp->dev->name);
b4b36042
MC
6190 bp->irq_nvecs = 1;
6191 bp->irq_tbl[0].vector = bp->pdev->irq;
6192
3d5f3a7b 6193 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5e9ad9e1 6194 bnx2_enable_msix(bp, msix_vecs);
6d866ffc 6195
f86e82fb
DM
6196 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6197 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6d866ffc 6198 if (pci_enable_msi(bp->pdev) == 0) {
f86e82fb 6199 bp->flags |= BNX2_FLAG_USING_MSI;
6d866ffc 6200 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
f86e82fb 6201 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6d866ffc
MC
6202 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6203 } else
6204 bp->irq_tbl[0].handler = bnx2_msi;
b4b36042
MC
6205
6206 bp->irq_tbl[0].vector = bp->pdev->irq;
6d866ffc
MC
6207 }
6208 }
706bf240
BL
6209
6210 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6211 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6212
5e9ad9e1 6213 bp->num_rx_rings = bp->irq_nvecs;
8e6a72c4
MC
6214}
6215
b6016b76
MC
6216/* Called with rtnl_lock */
6217static int
6218bnx2_open(struct net_device *dev)
6219{
972ec0d4 6220 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6221 int rc;
6222
1b2f922f
MC
6223 netif_carrier_off(dev);
6224
829ca9a3 6225 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6226 bnx2_disable_int(bp);
6227
35e9010b 6228 bnx2_setup_int_mode(bp, disable_msi);
4327ba43 6229 bnx2_init_napi(bp);
35e9010b 6230 bnx2_napi_enable(bp);
b6016b76 6231 rc = bnx2_alloc_mem(bp);
2739a8bb
MC
6232 if (rc)
6233 goto open_err;
b6016b76 6234
8e6a72c4 6235 rc = bnx2_request_irq(bp);
2739a8bb
MC
6236 if (rc)
6237 goto open_err;
b6016b76 6238
9a120bc5 6239 rc = bnx2_init_nic(bp, 1);
2739a8bb
MC
6240 if (rc)
6241 goto open_err;
6aa20a22 6242
cd339a0e 6243 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6244
6245 atomic_set(&bp->intr_sem, 0);
6246
354fcd77
MC
6247 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6248
b6016b76
MC
6249 bnx2_enable_int(bp);
6250
f86e82fb 6251 if (bp->flags & BNX2_FLAG_USING_MSI) {
b6016b76
MC
6252 /* Test MSI to make sure it is working
6253 * If MSI test fails, go back to INTx mode
6254 */
6255 if (bnx2_test_intr(bp) != 0) {
3a9c6a49 6256 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
b6016b76
MC
6257
6258 bnx2_disable_int(bp);
8e6a72c4 6259 bnx2_free_irq(bp);
b6016b76 6260
6d866ffc
MC
6261 bnx2_setup_int_mode(bp, 1);
6262
9a120bc5 6263 rc = bnx2_init_nic(bp, 0);
b6016b76 6264
8e6a72c4
MC
6265 if (!rc)
6266 rc = bnx2_request_irq(bp);
6267
b6016b76 6268 if (rc) {
b6016b76 6269 del_timer_sync(&bp->timer);
2739a8bb 6270 goto open_err;
b6016b76
MC
6271 }
6272 bnx2_enable_int(bp);
6273 }
6274 }
f86e82fb 6275 if (bp->flags & BNX2_FLAG_USING_MSI)
3a9c6a49 6276 netdev_info(dev, "using MSI\n");
f86e82fb 6277 else if (bp->flags & BNX2_FLAG_USING_MSIX)
3a9c6a49 6278 netdev_info(dev, "using MSIX\n");
b6016b76 6279
706bf240 6280 netif_tx_start_all_queues(dev);
b6016b76
MC
6281
6282 return 0;
2739a8bb
MC
6283
6284open_err:
6285 bnx2_napi_disable(bp);
6286 bnx2_free_skbs(bp);
6287 bnx2_free_irq(bp);
6288 bnx2_free_mem(bp);
f048fa9c 6289 bnx2_del_napi(bp);
2739a8bb 6290 return rc;
b6016b76
MC
6291}
6292
6293static void
c4028958 6294bnx2_reset_task(struct work_struct *work)
b6016b76 6295{
c4028958 6296 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 6297
51bf6bb4
MC
6298 rtnl_lock();
6299 if (!netif_running(bp->dev)) {
6300 rtnl_unlock();
afdc08b9 6301 return;
51bf6bb4 6302 }
afdc08b9 6303
212f9934 6304 bnx2_netif_stop(bp, true);
b6016b76 6305
9a120bc5 6306 bnx2_init_nic(bp, 1);
b6016b76
MC
6307
6308 atomic_set(&bp->intr_sem, 1);
212f9934 6309 bnx2_netif_start(bp, true);
51bf6bb4 6310 rtnl_unlock();
b6016b76
MC
6311}
6312
20175c57
MC
6313static void
6314bnx2_dump_state(struct bnx2 *bp)
6315{
6316 struct net_device *dev = bp->dev;
5804a8fb
MC
6317 u32 mcp_p0, mcp_p1, val1, val2;
6318
6319 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6320 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6321 atomic_read(&bp->intr_sem), val1);
6322 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6323 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6324 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
b98eba52 6325 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
3a9c6a49 6326 REG_RD(bp, BNX2_EMAC_TX_STATUS),
b98eba52
EW
6327 REG_RD(bp, BNX2_EMAC_RX_STATUS));
6328 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
3a9c6a49 6329 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
b98eba52
EW
6330 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6331 mcp_p0 = BNX2_MCP_STATE_P0;
6332 mcp_p1 = BNX2_MCP_STATE_P1;
6333 } else {
6334 mcp_p0 = BNX2_MCP_STATE_P0_5708;
6335 mcp_p1 = BNX2_MCP_STATE_P1_5708;
6336 }
3a9c6a49 6337 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
b98eba52 6338 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
3a9c6a49
JP
6339 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6340 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
20175c57 6341 if (bp->flags & BNX2_FLAG_USING_MSIX)
3a9c6a49
JP
6342 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6343 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
20175c57
MC
6344}
6345
b6016b76
MC
6346static void
6347bnx2_tx_timeout(struct net_device *dev)
6348{
972ec0d4 6349 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6350
20175c57
MC
6351 bnx2_dump_state(bp);
6352
b6016b76
MC
6353 /* This allows the netif to be shutdown gracefully before resetting */
6354 schedule_work(&bp->reset_task);
6355}
6356
6357#ifdef BCM_VLAN
6358/* Called with rtnl_lock */
6359static void
6360bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6361{
972ec0d4 6362 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6363
3767546c 6364 if (netif_running(dev))
212f9934 6365 bnx2_netif_stop(bp, false);
b6016b76
MC
6366
6367 bp->vlgrp = vlgrp;
3767546c
MC
6368
6369 if (!netif_running(dev))
6370 return;
6371
b6016b76 6372 bnx2_set_rx_mode(dev);
7c62e83b
MC
6373 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6374 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
b6016b76 6375
212f9934 6376 bnx2_netif_start(bp, false);
b6016b76 6377}
b6016b76
MC
6378#endif
6379
932ff279 6380/* Called with netif_tx_lock.
2f8af120
MC
6381 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6382 * netif_wake_queue().
b6016b76 6383 */
61357325 6384static netdev_tx_t
b6016b76
MC
6385bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6386{
972ec0d4 6387 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6388 dma_addr_t mapping;
6389 struct tx_bd *txbd;
3d16af86 6390 struct sw_tx_bd *tx_buf;
b6016b76
MC
6391 u32 len, vlan_tag_flags, last_frag, mss;
6392 u16 prod, ring_prod;
6393 int i;
706bf240
BL
6394 struct bnx2_napi *bnapi;
6395 struct bnx2_tx_ring_info *txr;
6396 struct netdev_queue *txq;
6397
6398 /* Determine which tx ring we will be placed on */
6399 i = skb_get_queue_mapping(skb);
6400 bnapi = &bp->bnx2_napi[i];
6401 txr = &bnapi->tx_ring;
6402 txq = netdev_get_tx_queue(dev, i);
b6016b76 6403
35e9010b 6404 if (unlikely(bnx2_tx_avail(bp, txr) <
a550c99b 6405 (skb_shinfo(skb)->nr_frags + 1))) {
706bf240 6406 netif_tx_stop_queue(txq);
3a9c6a49 6407 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
b6016b76
MC
6408
6409 return NETDEV_TX_BUSY;
6410 }
6411 len = skb_headlen(skb);
35e9010b 6412 prod = txr->tx_prod;
b6016b76
MC
6413 ring_prod = TX_RING_IDX(prod);
6414
6415 vlan_tag_flags = 0;
84fa7933 6416 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
6417 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6418 }
6419
729b85cd 6420#ifdef BCM_VLAN
79ea13ce 6421 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
b6016b76
MC
6422 vlan_tag_flags |=
6423 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6424 }
729b85cd 6425#endif
fde82055 6426 if ((mss = skb_shinfo(skb)->gso_size)) {
a1efb4b6 6427 u32 tcp_opt_len;
eddc9ec5 6428 struct iphdr *iph;
b6016b76 6429
b6016b76
MC
6430 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6431
4666f87a
MC
6432 tcp_opt_len = tcp_optlen(skb);
6433
6434 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6435 u32 tcp_off = skb_transport_offset(skb) -
6436 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 6437
4666f87a
MC
6438 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6439 TX_BD_FLAGS_SW_FLAGS;
6440 if (likely(tcp_off == 0))
6441 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6442 else {
6443 tcp_off >>= 3;
6444 vlan_tag_flags |= ((tcp_off & 0x3) <<
6445 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6446 ((tcp_off & 0x10) <<
6447 TX_BD_FLAGS_TCP6_OFF4_SHL);
6448 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6449 }
6450 } else {
4666f87a 6451 iph = ip_hdr(skb);
4666f87a
MC
6452 if (tcp_opt_len || (iph->ihl > 5)) {
6453 vlan_tag_flags |= ((iph->ihl - 5) +
6454 (tcp_opt_len >> 2)) << 8;
6455 }
b6016b76 6456 }
4666f87a 6457 } else
b6016b76 6458 mss = 0;
b6016b76 6459
e95524a7
AD
6460 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6461 if (pci_dma_mapping_error(bp->pdev, mapping)) {
3d16af86
BL
6462 dev_kfree_skb(skb);
6463 return NETDEV_TX_OK;
6464 }
6465
35e9010b 6466 tx_buf = &txr->tx_buf_ring[ring_prod];
b6016b76 6467 tx_buf->skb = skb;
1a4ccc2d 6468 dma_unmap_addr_set(tx_buf, mapping, mapping);
b6016b76 6469
35e9010b 6470 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6471
6472 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6473 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6474 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6475 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6476
6477 last_frag = skb_shinfo(skb)->nr_frags;
d62fda08
ED
6478 tx_buf->nr_frags = last_frag;
6479 tx_buf->is_gso = skb_is_gso(skb);
b6016b76
MC
6480
6481 for (i = 0; i < last_frag; i++) {
6482 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6483
6484 prod = NEXT_TX_BD(prod);
6485 ring_prod = TX_RING_IDX(prod);
35e9010b 6486 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6487
6488 len = frag->size;
e95524a7
AD
6489 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6490 len, PCI_DMA_TODEVICE);
6491 if (pci_dma_mapping_error(bp->pdev, mapping))
6492 goto dma_error;
1a4ccc2d 6493 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
e95524a7 6494 mapping);
b6016b76
MC
6495
6496 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6497 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6498 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6499 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6500
6501 }
6502 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6503
6504 prod = NEXT_TX_BD(prod);
35e9010b 6505 txr->tx_prod_bseq += skb->len;
b6016b76 6506
35e9010b
MC
6507 REG_WR16(bp, txr->tx_bidx_addr, prod);
6508 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
6509
6510 mmiowb();
6511
35e9010b 6512 txr->tx_prod = prod;
b6016b76 6513
35e9010b 6514 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
706bf240 6515 netif_tx_stop_queue(txq);
35e9010b 6516 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
706bf240 6517 netif_tx_wake_queue(txq);
b6016b76
MC
6518 }
6519
e95524a7
AD
6520 return NETDEV_TX_OK;
6521dma_error:
6522 /* save value of frag that failed */
6523 last_frag = i;
6524
6525 /* start back at beginning and unmap skb */
6526 prod = txr->tx_prod;
6527 ring_prod = TX_RING_IDX(prod);
6528 tx_buf = &txr->tx_buf_ring[ring_prod];
6529 tx_buf->skb = NULL;
1a4ccc2d 6530 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
6531 skb_headlen(skb), PCI_DMA_TODEVICE);
6532
6533 /* unmap remaining mapped pages */
6534 for (i = 0; i < last_frag; i++) {
6535 prod = NEXT_TX_BD(prod);
6536 ring_prod = TX_RING_IDX(prod);
6537 tx_buf = &txr->tx_buf_ring[ring_prod];
1a4ccc2d 6538 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
6539 skb_shinfo(skb)->frags[i].size,
6540 PCI_DMA_TODEVICE);
6541 }
6542
6543 dev_kfree_skb(skb);
b6016b76
MC
6544 return NETDEV_TX_OK;
6545}
6546
6547/* Called with rtnl_lock */
6548static int
6549bnx2_close(struct net_device *dev)
6550{
972ec0d4 6551 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6552
4bb073c0 6553 cancel_work_sync(&bp->reset_task);
afdc08b9 6554
bea3348e 6555 bnx2_disable_int_sync(bp);
35efa7c1 6556 bnx2_napi_disable(bp);
b6016b76 6557 del_timer_sync(&bp->timer);
74bf4ba3 6558 bnx2_shutdown_chip(bp);
8e6a72c4 6559 bnx2_free_irq(bp);
b6016b76
MC
6560 bnx2_free_skbs(bp);
6561 bnx2_free_mem(bp);
f048fa9c 6562 bnx2_del_napi(bp);
b6016b76
MC
6563 bp->link_up = 0;
6564 netif_carrier_off(bp->dev);
829ca9a3 6565 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
6566 return 0;
6567}
6568
354fcd77
MC
6569static void
6570bnx2_save_stats(struct bnx2 *bp)
6571{
6572 u32 *hw_stats = (u32 *) bp->stats_blk;
6573 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6574 int i;
6575
6576 /* The 1st 10 counters are 64-bit counters */
6577 for (i = 0; i < 20; i += 2) {
6578 u32 hi;
6579 u64 lo;
6580
c9885fe5
PR
6581 hi = temp_stats[i] + hw_stats[i];
6582 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
354fcd77
MC
6583 if (lo > 0xffffffff)
6584 hi++;
c9885fe5
PR
6585 temp_stats[i] = hi;
6586 temp_stats[i + 1] = lo & 0xffffffff;
354fcd77
MC
6587 }
6588
6589 for ( ; i < sizeof(struct statistics_block) / 4; i++)
c9885fe5 6590 temp_stats[i] += hw_stats[i];
354fcd77
MC
6591}
6592
5d07bf26
ED
6593#define GET_64BIT_NET_STATS64(ctr) \
6594 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
b6016b76 6595
a4743058 6596#define GET_64BIT_NET_STATS(ctr) \
354fcd77
MC
6597 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6598 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
b6016b76 6599
a4743058 6600#define GET_32BIT_NET_STATS(ctr) \
354fcd77
MC
6601 (unsigned long) (bp->stats_blk->ctr + \
6602 bp->temp_stats_blk->ctr)
a4743058 6603
5d07bf26
ED
6604static struct rtnl_link_stats64 *
6605bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
b6016b76 6606{
972ec0d4 6607 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6608
5d07bf26 6609 if (bp->stats_blk == NULL)
b6016b76 6610 return net_stats;
5d07bf26 6611
b6016b76 6612 net_stats->rx_packets =
a4743058
MC
6613 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6614 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6615 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
b6016b76
MC
6616
6617 net_stats->tx_packets =
a4743058
MC
6618 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6619 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6620 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
b6016b76
MC
6621
6622 net_stats->rx_bytes =
a4743058 6623 GET_64BIT_NET_STATS(stat_IfHCInOctets);
b6016b76
MC
6624
6625 net_stats->tx_bytes =
a4743058 6626 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
b6016b76 6627
6aa20a22 6628 net_stats->multicast =
a4743058 6629 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
b6016b76 6630
6aa20a22 6631 net_stats->collisions =
a4743058 6632 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
b6016b76 6633
6aa20a22 6634 net_stats->rx_length_errors =
a4743058
MC
6635 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6636 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
b6016b76 6637
6aa20a22 6638 net_stats->rx_over_errors =
a4743058
MC
6639 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6640 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
b6016b76 6641
6aa20a22 6642 net_stats->rx_frame_errors =
a4743058 6643 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
b6016b76 6644
6aa20a22 6645 net_stats->rx_crc_errors =
a4743058 6646 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
b6016b76
MC
6647
6648 net_stats->rx_errors = net_stats->rx_length_errors +
6649 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6650 net_stats->rx_crc_errors;
6651
6652 net_stats->tx_aborted_errors =
a4743058
MC
6653 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6654 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
b6016b76 6655
5b0c76ad
MC
6656 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6657 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
6658 net_stats->tx_carrier_errors = 0;
6659 else {
6660 net_stats->tx_carrier_errors =
a4743058 6661 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
b6016b76
MC
6662 }
6663
6664 net_stats->tx_errors =
a4743058 6665 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
b6016b76
MC
6666 net_stats->tx_aborted_errors +
6667 net_stats->tx_carrier_errors;
6668
cea94db9 6669 net_stats->rx_missed_errors =
a4743058
MC
6670 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6671 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6672 GET_32BIT_NET_STATS(stat_FwRxDrop);
cea94db9 6673
b6016b76
MC
6674 return net_stats;
6675}
6676
6677/* All ethtool functions called with rtnl_lock */
6678
6679static int
6680bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6681{
972ec0d4 6682 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 6683 int support_serdes = 0, support_copper = 0;
b6016b76
MC
6684
6685 cmd->supported = SUPPORTED_Autoneg;
583c28e5 6686 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6687 support_serdes = 1;
6688 support_copper = 1;
6689 } else if (bp->phy_port == PORT_FIBRE)
6690 support_serdes = 1;
6691 else
6692 support_copper = 1;
6693
6694 if (support_serdes) {
b6016b76
MC
6695 cmd->supported |= SUPPORTED_1000baseT_Full |
6696 SUPPORTED_FIBRE;
583c28e5 6697 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
605a9e20 6698 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 6699
b6016b76 6700 }
7b6b8347 6701 if (support_copper) {
b6016b76
MC
6702 cmd->supported |= SUPPORTED_10baseT_Half |
6703 SUPPORTED_10baseT_Full |
6704 SUPPORTED_100baseT_Half |
6705 SUPPORTED_100baseT_Full |
6706 SUPPORTED_1000baseT_Full |
6707 SUPPORTED_TP;
6708
b6016b76
MC
6709 }
6710
7b6b8347
MC
6711 spin_lock_bh(&bp->phy_lock);
6712 cmd->port = bp->phy_port;
b6016b76
MC
6713 cmd->advertising = bp->advertising;
6714
6715 if (bp->autoneg & AUTONEG_SPEED) {
6716 cmd->autoneg = AUTONEG_ENABLE;
6717 }
6718 else {
6719 cmd->autoneg = AUTONEG_DISABLE;
6720 }
6721
6722 if (netif_carrier_ok(dev)) {
6723 cmd->speed = bp->line_speed;
6724 cmd->duplex = bp->duplex;
6725 }
6726 else {
6727 cmd->speed = -1;
6728 cmd->duplex = -1;
6729 }
7b6b8347 6730 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6731
6732 cmd->transceiver = XCVR_INTERNAL;
6733 cmd->phy_address = bp->phy_addr;
6734
6735 return 0;
6736}
6aa20a22 6737
b6016b76
MC
6738static int
6739bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6740{
972ec0d4 6741 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6742 u8 autoneg = bp->autoneg;
6743 u8 req_duplex = bp->req_duplex;
6744 u16 req_line_speed = bp->req_line_speed;
6745 u32 advertising = bp->advertising;
7b6b8347
MC
6746 int err = -EINVAL;
6747
6748 spin_lock_bh(&bp->phy_lock);
6749
6750 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6751 goto err_out_unlock;
6752
583c28e5
MC
6753 if (cmd->port != bp->phy_port &&
6754 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
7b6b8347 6755 goto err_out_unlock;
b6016b76 6756
d6b14486
MC
6757 /* If device is down, we can store the settings only if the user
6758 * is setting the currently active port.
6759 */
6760 if (!netif_running(dev) && cmd->port != bp->phy_port)
6761 goto err_out_unlock;
6762
b6016b76
MC
6763 if (cmd->autoneg == AUTONEG_ENABLE) {
6764 autoneg |= AUTONEG_SPEED;
6765
beb499af
MC
6766 advertising = cmd->advertising;
6767 if (cmd->port == PORT_TP) {
6768 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6769 if (!advertising)
b6016b76 6770 advertising = ETHTOOL_ALL_COPPER_SPEED;
beb499af
MC
6771 } else {
6772 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6773 if (!advertising)
6774 advertising = ETHTOOL_ALL_FIBRE_SPEED;
b6016b76
MC
6775 }
6776 advertising |= ADVERTISED_Autoneg;
6777 }
6778 else {
7b6b8347 6779 if (cmd->port == PORT_FIBRE) {
80be4434
MC
6780 if ((cmd->speed != SPEED_1000 &&
6781 cmd->speed != SPEED_2500) ||
6782 (cmd->duplex != DUPLEX_FULL))
7b6b8347 6783 goto err_out_unlock;
80be4434
MC
6784
6785 if (cmd->speed == SPEED_2500 &&
583c28e5 6786 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7b6b8347 6787 goto err_out_unlock;
b6016b76 6788 }
7b6b8347
MC
6789 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6790 goto err_out_unlock;
6791
b6016b76
MC
6792 autoneg &= ~AUTONEG_SPEED;
6793 req_line_speed = cmd->speed;
6794 req_duplex = cmd->duplex;
6795 advertising = 0;
6796 }
6797
6798 bp->autoneg = autoneg;
6799 bp->advertising = advertising;
6800 bp->req_line_speed = req_line_speed;
6801 bp->req_duplex = req_duplex;
6802
d6b14486
MC
6803 err = 0;
6804 /* If device is down, the new settings will be picked up when it is
6805 * brought up.
6806 */
6807 if (netif_running(dev))
6808 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 6809
7b6b8347 6810err_out_unlock:
c770a65c 6811 spin_unlock_bh(&bp->phy_lock);
b6016b76 6812
7b6b8347 6813 return err;
b6016b76
MC
6814}
6815
6816static void
6817bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6818{
972ec0d4 6819 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6820
6821 strcpy(info->driver, DRV_MODULE_NAME);
6822 strcpy(info->version, DRV_MODULE_VERSION);
6823 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 6824 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
6825}
6826
244ac4f4
MC
6827#define BNX2_REGDUMP_LEN (32 * 1024)
6828
6829static int
6830bnx2_get_regs_len(struct net_device *dev)
6831{
6832 return BNX2_REGDUMP_LEN;
6833}
6834
6835static void
6836bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6837{
6838 u32 *p = _p, i, offset;
6839 u8 *orig_p = _p;
6840 struct bnx2 *bp = netdev_priv(dev);
6841 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6842 0x0800, 0x0880, 0x0c00, 0x0c10,
6843 0x0c30, 0x0d08, 0x1000, 0x101c,
6844 0x1040, 0x1048, 0x1080, 0x10a4,
6845 0x1400, 0x1490, 0x1498, 0x14f0,
6846 0x1500, 0x155c, 0x1580, 0x15dc,
6847 0x1600, 0x1658, 0x1680, 0x16d8,
6848 0x1800, 0x1820, 0x1840, 0x1854,
6849 0x1880, 0x1894, 0x1900, 0x1984,
6850 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6851 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6852 0x2000, 0x2030, 0x23c0, 0x2400,
6853 0x2800, 0x2820, 0x2830, 0x2850,
6854 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6855 0x3c00, 0x3c94, 0x4000, 0x4010,
6856 0x4080, 0x4090, 0x43c0, 0x4458,
6857 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6858 0x4fc0, 0x5010, 0x53c0, 0x5444,
6859 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6860 0x5fc0, 0x6000, 0x6400, 0x6428,
6861 0x6800, 0x6848, 0x684c, 0x6860,
6862 0x6888, 0x6910, 0x8000 };
6863
6864 regs->version = 0;
6865
6866 memset(p, 0, BNX2_REGDUMP_LEN);
6867
6868 if (!netif_running(bp->dev))
6869 return;
6870
6871 i = 0;
6872 offset = reg_boundaries[0];
6873 p += offset;
6874 while (offset < BNX2_REGDUMP_LEN) {
6875 *p++ = REG_RD(bp, offset);
6876 offset += 4;
6877 if (offset == reg_boundaries[i + 1]) {
6878 offset = reg_boundaries[i + 2];
6879 p = (u32 *) (orig_p + offset);
6880 i += 2;
6881 }
6882 }
6883}
6884
b6016b76
MC
6885static void
6886bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6887{
972ec0d4 6888 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6889
f86e82fb 6890 if (bp->flags & BNX2_FLAG_NO_WOL) {
b6016b76
MC
6891 wol->supported = 0;
6892 wol->wolopts = 0;
6893 }
6894 else {
6895 wol->supported = WAKE_MAGIC;
6896 if (bp->wol)
6897 wol->wolopts = WAKE_MAGIC;
6898 else
6899 wol->wolopts = 0;
6900 }
6901 memset(&wol->sopass, 0, sizeof(wol->sopass));
6902}
6903
6904static int
6905bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6906{
972ec0d4 6907 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6908
6909 if (wol->wolopts & ~WAKE_MAGIC)
6910 return -EINVAL;
6911
6912 if (wol->wolopts & WAKE_MAGIC) {
f86e82fb 6913 if (bp->flags & BNX2_FLAG_NO_WOL)
b6016b76
MC
6914 return -EINVAL;
6915
6916 bp->wol = 1;
6917 }
6918 else {
6919 bp->wol = 0;
6920 }
6921 return 0;
6922}
6923
6924static int
6925bnx2_nway_reset(struct net_device *dev)
6926{
972ec0d4 6927 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6928 u32 bmcr;
6929
9f52b564
MC
6930 if (!netif_running(dev))
6931 return -EAGAIN;
6932
b6016b76
MC
6933 if (!(bp->autoneg & AUTONEG_SPEED)) {
6934 return -EINVAL;
6935 }
6936
c770a65c 6937 spin_lock_bh(&bp->phy_lock);
b6016b76 6938
583c28e5 6939 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6940 int rc;
6941
6942 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6943 spin_unlock_bh(&bp->phy_lock);
6944 return rc;
6945 }
6946
b6016b76 6947 /* Force a link down visible on the other side */
583c28e5 6948 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
ca58c3af 6949 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 6950 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6951
6952 msleep(20);
6953
c770a65c 6954 spin_lock_bh(&bp->phy_lock);
f8dd064e 6955
40105c0b 6956 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
6957 bp->serdes_an_pending = 1;
6958 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6959 }
6960
ca58c3af 6961 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 6962 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 6963 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 6964
c770a65c 6965 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6966
6967 return 0;
6968}
6969
7959ea25
ON
6970static u32
6971bnx2_get_link(struct net_device *dev)
6972{
6973 struct bnx2 *bp = netdev_priv(dev);
6974
6975 return bp->link_up;
6976}
6977
b6016b76
MC
6978static int
6979bnx2_get_eeprom_len(struct net_device *dev)
6980{
972ec0d4 6981 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6982
1122db71 6983 if (bp->flash_info == NULL)
b6016b76
MC
6984 return 0;
6985
1122db71 6986 return (int) bp->flash_size;
b6016b76
MC
6987}
6988
6989static int
6990bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6991 u8 *eebuf)
6992{
972ec0d4 6993 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6994 int rc;
6995
9f52b564
MC
6996 if (!netif_running(dev))
6997 return -EAGAIN;
6998
1064e944 6999 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
7000
7001 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7002
7003 return rc;
7004}
7005
7006static int
7007bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7008 u8 *eebuf)
7009{
972ec0d4 7010 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7011 int rc;
7012
9f52b564
MC
7013 if (!netif_running(dev))
7014 return -EAGAIN;
7015
1064e944 7016 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
7017
7018 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7019
7020 return rc;
7021}
7022
7023static int
7024bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7025{
972ec0d4 7026 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7027
7028 memset(coal, 0, sizeof(struct ethtool_coalesce));
7029
7030 coal->rx_coalesce_usecs = bp->rx_ticks;
7031 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7032 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7033 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7034
7035 coal->tx_coalesce_usecs = bp->tx_ticks;
7036 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7037 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7038 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7039
7040 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7041
7042 return 0;
7043}
7044
7045static int
7046bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7047{
972ec0d4 7048 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7049
7050 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7051 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7052
6aa20a22 7053 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
7054 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7055
7056 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7057 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7058
7059 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7060 if (bp->rx_quick_cons_trip_int > 0xff)
7061 bp->rx_quick_cons_trip_int = 0xff;
7062
7063 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7064 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7065
7066 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7067 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7068
7069 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7070 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7071
7072 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7073 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7074 0xff;
7075
7076 bp->stats_ticks = coal->stats_block_coalesce_usecs;
61d9e3fa 7077 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
02537b06
MC
7078 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7079 bp->stats_ticks = USEC_PER_SEC;
7080 }
7ea6920e
MC
7081 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7082 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7083 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
7084
7085 if (netif_running(bp->dev)) {
212f9934 7086 bnx2_netif_stop(bp, true);
9a120bc5 7087 bnx2_init_nic(bp, 0);
212f9934 7088 bnx2_netif_start(bp, true);
b6016b76
MC
7089 }
7090
7091 return 0;
7092}
7093
7094static void
7095bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7096{
972ec0d4 7097 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7098
13daffa2 7099 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 7100 ering->rx_mini_max_pending = 0;
47bf4246 7101 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
7102
7103 ering->rx_pending = bp->rx_ring_size;
7104 ering->rx_mini_pending = 0;
47bf4246 7105 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
7106
7107 ering->tx_max_pending = MAX_TX_DESC_CNT;
7108 ering->tx_pending = bp->tx_ring_size;
7109}
7110
7111static int
5d5d0015 7112bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 7113{
13daffa2 7114 if (netif_running(bp->dev)) {
354fcd77
MC
7115 /* Reset will erase chipset stats; save them */
7116 bnx2_save_stats(bp);
7117
212f9934 7118 bnx2_netif_stop(bp, true);
13daffa2
MC
7119 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7120 bnx2_free_skbs(bp);
7121 bnx2_free_mem(bp);
7122 }
7123
5d5d0015
MC
7124 bnx2_set_rx_ring_size(bp, rx);
7125 bp->tx_ring_size = tx;
b6016b76
MC
7126
7127 if (netif_running(bp->dev)) {
13daffa2
MC
7128 int rc;
7129
7130 rc = bnx2_alloc_mem(bp);
6fefb65e
MC
7131 if (!rc)
7132 rc = bnx2_init_nic(bp, 0);
7133
7134 if (rc) {
7135 bnx2_napi_enable(bp);
7136 dev_close(bp->dev);
13daffa2 7137 return rc;
6fefb65e 7138 }
e9f26c49
MC
7139#ifdef BCM_CNIC
7140 mutex_lock(&bp->cnic_lock);
7141 /* Let cnic know about the new status block. */
7142 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7143 bnx2_setup_cnic_irq_info(bp);
7144 mutex_unlock(&bp->cnic_lock);
7145#endif
212f9934 7146 bnx2_netif_start(bp, true);
b6016b76 7147 }
b6016b76
MC
7148 return 0;
7149}
7150
5d5d0015
MC
7151static int
7152bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7153{
7154 struct bnx2 *bp = netdev_priv(dev);
7155 int rc;
7156
7157 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7158 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7159 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7160
7161 return -EINVAL;
7162 }
7163 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7164 return rc;
7165}
7166
b6016b76
MC
7167static void
7168bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7169{
972ec0d4 7170 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7171
7172 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7173 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7174 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7175}
7176
7177static int
7178bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7179{
972ec0d4 7180 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7181
7182 bp->req_flow_ctrl = 0;
7183 if (epause->rx_pause)
7184 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7185 if (epause->tx_pause)
7186 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7187
7188 if (epause->autoneg) {
7189 bp->autoneg |= AUTONEG_FLOW_CTRL;
7190 }
7191 else {
7192 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7193 }
7194
9f52b564
MC
7195 if (netif_running(dev)) {
7196 spin_lock_bh(&bp->phy_lock);
7197 bnx2_setup_phy(bp, bp->phy_port);
7198 spin_unlock_bh(&bp->phy_lock);
7199 }
b6016b76
MC
7200
7201 return 0;
7202}
7203
7204static u32
7205bnx2_get_rx_csum(struct net_device *dev)
7206{
972ec0d4 7207 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7208
7209 return bp->rx_csum;
7210}
7211
7212static int
7213bnx2_set_rx_csum(struct net_device *dev, u32 data)
7214{
972ec0d4 7215 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7216
7217 bp->rx_csum = data;
7218 return 0;
7219}
7220
b11d6213
MC
7221static int
7222bnx2_set_tso(struct net_device *dev, u32 data)
7223{
4666f87a
MC
7224 struct bnx2 *bp = netdev_priv(dev);
7225
7226 if (data) {
b11d6213 7227 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
7228 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7229 dev->features |= NETIF_F_TSO6;
7230 } else
7231 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7232 NETIF_F_TSO_ECN);
b11d6213
MC
7233 return 0;
7234}
7235
14ab9b86 7236static struct {
b6016b76 7237 char string[ETH_GSTRING_LEN];
790dab2f 7238} bnx2_stats_str_arr[] = {
b6016b76
MC
7239 { "rx_bytes" },
7240 { "rx_error_bytes" },
7241 { "tx_bytes" },
7242 { "tx_error_bytes" },
7243 { "rx_ucast_packets" },
7244 { "rx_mcast_packets" },
7245 { "rx_bcast_packets" },
7246 { "tx_ucast_packets" },
7247 { "tx_mcast_packets" },
7248 { "tx_bcast_packets" },
7249 { "tx_mac_errors" },
7250 { "tx_carrier_errors" },
7251 { "rx_crc_errors" },
7252 { "rx_align_errors" },
7253 { "tx_single_collisions" },
7254 { "tx_multi_collisions" },
7255 { "tx_deferred" },
7256 { "tx_excess_collisions" },
7257 { "tx_late_collisions" },
7258 { "tx_total_collisions" },
7259 { "rx_fragments" },
7260 { "rx_jabbers" },
7261 { "rx_undersize_packets" },
7262 { "rx_oversize_packets" },
7263 { "rx_64_byte_packets" },
7264 { "rx_65_to_127_byte_packets" },
7265 { "rx_128_to_255_byte_packets" },
7266 { "rx_256_to_511_byte_packets" },
7267 { "rx_512_to_1023_byte_packets" },
7268 { "rx_1024_to_1522_byte_packets" },
7269 { "rx_1523_to_9022_byte_packets" },
7270 { "tx_64_byte_packets" },
7271 { "tx_65_to_127_byte_packets" },
7272 { "tx_128_to_255_byte_packets" },
7273 { "tx_256_to_511_byte_packets" },
7274 { "tx_512_to_1023_byte_packets" },
7275 { "tx_1024_to_1522_byte_packets" },
7276 { "tx_1523_to_9022_byte_packets" },
7277 { "rx_xon_frames" },
7278 { "rx_xoff_frames" },
7279 { "tx_xon_frames" },
7280 { "tx_xoff_frames" },
7281 { "rx_mac_ctrl_frames" },
7282 { "rx_filtered_packets" },
790dab2f 7283 { "rx_ftq_discards" },
b6016b76 7284 { "rx_discards" },
cea94db9 7285 { "rx_fw_discards" },
b6016b76
MC
7286};
7287
790dab2f
MC
7288#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7289 sizeof(bnx2_stats_str_arr[0]))
7290
b6016b76
MC
7291#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7292
f71e1309 7293static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7294 STATS_OFFSET32(stat_IfHCInOctets_hi),
7295 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7296 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7297 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7298 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7299 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7300 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7301 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7302 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7303 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7304 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
7305 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7306 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7307 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7308 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7309 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7310 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7311 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7312 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7313 STATS_OFFSET32(stat_EtherStatsCollisions),
7314 STATS_OFFSET32(stat_EtherStatsFragments),
7315 STATS_OFFSET32(stat_EtherStatsJabbers),
7316 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7317 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7318 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7319 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7320 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7321 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7322 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7323 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7324 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7325 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7326 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7327 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7328 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7329 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7330 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7331 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7332 STATS_OFFSET32(stat_XonPauseFramesReceived),
7333 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7334 STATS_OFFSET32(stat_OutXonSent),
7335 STATS_OFFSET32(stat_OutXoffSent),
7336 STATS_OFFSET32(stat_MacControlFramesReceived),
7337 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
790dab2f 7338 STATS_OFFSET32(stat_IfInFTQDiscards),
6aa20a22 7339 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 7340 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
7341};
7342
7343/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7344 * skipped because of errata.
6aa20a22 7345 */
14ab9b86 7346static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7347 8,0,8,8,8,8,8,8,8,8,
7348 4,0,4,4,4,4,4,4,4,4,
7349 4,4,4,4,4,4,4,4,4,4,
7350 4,4,4,4,4,4,4,4,4,4,
790dab2f 7351 4,4,4,4,4,4,4,
b6016b76
MC
7352};
7353
5b0c76ad
MC
7354static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7355 8,0,8,8,8,8,8,8,8,8,
7356 4,4,4,4,4,4,4,4,4,4,
7357 4,4,4,4,4,4,4,4,4,4,
7358 4,4,4,4,4,4,4,4,4,4,
790dab2f 7359 4,4,4,4,4,4,4,
5b0c76ad
MC
7360};
7361
b6016b76
MC
7362#define BNX2_NUM_TESTS 6
7363
14ab9b86 7364static struct {
b6016b76
MC
7365 char string[ETH_GSTRING_LEN];
7366} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7367 { "register_test (offline)" },
7368 { "memory_test (offline)" },
7369 { "loopback_test (offline)" },
7370 { "nvram_test (online)" },
7371 { "interrupt_test (online)" },
7372 { "link_test (online)" },
7373};
7374
7375static int
b9f2c044 7376bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 7377{
b9f2c044
JG
7378 switch (sset) {
7379 case ETH_SS_TEST:
7380 return BNX2_NUM_TESTS;
7381 case ETH_SS_STATS:
7382 return BNX2_NUM_STATS;
7383 default:
7384 return -EOPNOTSUPP;
7385 }
b6016b76
MC
7386}
7387
7388static void
7389bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7390{
972ec0d4 7391 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7392
9f52b564
MC
7393 bnx2_set_power_state(bp, PCI_D0);
7394
b6016b76
MC
7395 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7396 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
7397 int i;
7398
212f9934 7399 bnx2_netif_stop(bp, true);
b6016b76
MC
7400 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7401 bnx2_free_skbs(bp);
7402
7403 if (bnx2_test_registers(bp) != 0) {
7404 buf[0] = 1;
7405 etest->flags |= ETH_TEST_FL_FAILED;
7406 }
7407 if (bnx2_test_memory(bp) != 0) {
7408 buf[1] = 1;
7409 etest->flags |= ETH_TEST_FL_FAILED;
7410 }
bc5a0690 7411 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 7412 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76 7413
9f52b564
MC
7414 if (!netif_running(bp->dev))
7415 bnx2_shutdown_chip(bp);
b6016b76 7416 else {
9a120bc5 7417 bnx2_init_nic(bp, 1);
212f9934 7418 bnx2_netif_start(bp, true);
b6016b76
MC
7419 }
7420
7421 /* wait for link up */
80be4434
MC
7422 for (i = 0; i < 7; i++) {
7423 if (bp->link_up)
7424 break;
7425 msleep_interruptible(1000);
7426 }
b6016b76
MC
7427 }
7428
7429 if (bnx2_test_nvram(bp) != 0) {
7430 buf[3] = 1;
7431 etest->flags |= ETH_TEST_FL_FAILED;
7432 }
7433 if (bnx2_test_intr(bp) != 0) {
7434 buf[4] = 1;
7435 etest->flags |= ETH_TEST_FL_FAILED;
7436 }
7437
7438 if (bnx2_test_link(bp) != 0) {
7439 buf[5] = 1;
7440 etest->flags |= ETH_TEST_FL_FAILED;
7441
7442 }
9f52b564
MC
7443 if (!netif_running(bp->dev))
7444 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
7445}
7446
7447static void
7448bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7449{
7450 switch (stringset) {
7451 case ETH_SS_STATS:
7452 memcpy(buf, bnx2_stats_str_arr,
7453 sizeof(bnx2_stats_str_arr));
7454 break;
7455 case ETH_SS_TEST:
7456 memcpy(buf, bnx2_tests_str_arr,
7457 sizeof(bnx2_tests_str_arr));
7458 break;
7459 }
7460}
7461
b6016b76
MC
7462static void
7463bnx2_get_ethtool_stats(struct net_device *dev,
7464 struct ethtool_stats *stats, u64 *buf)
7465{
972ec0d4 7466 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7467 int i;
7468 u32 *hw_stats = (u32 *) bp->stats_blk;
354fcd77 7469 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
14ab9b86 7470 u8 *stats_len_arr = NULL;
b6016b76
MC
7471
7472 if (hw_stats == NULL) {
7473 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7474 return;
7475 }
7476
5b0c76ad
MC
7477 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7478 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7479 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7480 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 7481 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
7482 else
7483 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
7484
7485 for (i = 0; i < BNX2_NUM_STATS; i++) {
354fcd77
MC
7486 unsigned long offset;
7487
b6016b76
MC
7488 if (stats_len_arr[i] == 0) {
7489 /* skip this counter */
7490 buf[i] = 0;
7491 continue;
7492 }
354fcd77
MC
7493
7494 offset = bnx2_stats_offset_arr[i];
b6016b76
MC
7495 if (stats_len_arr[i] == 4) {
7496 /* 4-byte counter */
354fcd77
MC
7497 buf[i] = (u64) *(hw_stats + offset) +
7498 *(temp_stats + offset);
b6016b76
MC
7499 continue;
7500 }
7501 /* 8-byte counter */
354fcd77
MC
7502 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7503 *(hw_stats + offset + 1) +
7504 (((u64) *(temp_stats + offset)) << 32) +
7505 *(temp_stats + offset + 1);
b6016b76
MC
7506 }
7507}
7508
7509static int
7510bnx2_phys_id(struct net_device *dev, u32 data)
7511{
972ec0d4 7512 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7513 int i;
7514 u32 save;
7515
9f52b564
MC
7516 bnx2_set_power_state(bp, PCI_D0);
7517
b6016b76
MC
7518 if (data == 0)
7519 data = 2;
7520
7521 save = REG_RD(bp, BNX2_MISC_CFG);
7522 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7523
7524 for (i = 0; i < (data * 2); i++) {
7525 if ((i % 2) == 0) {
7526 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7527 }
7528 else {
7529 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7530 BNX2_EMAC_LED_1000MB_OVERRIDE |
7531 BNX2_EMAC_LED_100MB_OVERRIDE |
7532 BNX2_EMAC_LED_10MB_OVERRIDE |
7533 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7534 BNX2_EMAC_LED_TRAFFIC);
7535 }
7536 msleep_interruptible(500);
7537 if (signal_pending(current))
7538 break;
7539 }
7540 REG_WR(bp, BNX2_EMAC_LED, 0);
7541 REG_WR(bp, BNX2_MISC_CFG, save);
9f52b564
MC
7542
7543 if (!netif_running(dev))
7544 bnx2_set_power_state(bp, PCI_D3hot);
7545
b6016b76
MC
7546 return 0;
7547}
7548
4666f87a
MC
7549static int
7550bnx2_set_tx_csum(struct net_device *dev, u32 data)
7551{
7552 struct bnx2 *bp = netdev_priv(dev);
7553
7554 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 7555 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
7556 else
7557 return (ethtool_op_set_tx_csum(dev, data));
7558}
7559
fdc8541d
MC
7560static int
7561bnx2_set_flags(struct net_device *dev, u32 data)
7562{
7563 return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
7564}
7565
7282d491 7566static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
7567 .get_settings = bnx2_get_settings,
7568 .set_settings = bnx2_set_settings,
7569 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
7570 .get_regs_len = bnx2_get_regs_len,
7571 .get_regs = bnx2_get_regs,
b6016b76
MC
7572 .get_wol = bnx2_get_wol,
7573 .set_wol = bnx2_set_wol,
7574 .nway_reset = bnx2_nway_reset,
7959ea25 7575 .get_link = bnx2_get_link,
b6016b76
MC
7576 .get_eeprom_len = bnx2_get_eeprom_len,
7577 .get_eeprom = bnx2_get_eeprom,
7578 .set_eeprom = bnx2_set_eeprom,
7579 .get_coalesce = bnx2_get_coalesce,
7580 .set_coalesce = bnx2_set_coalesce,
7581 .get_ringparam = bnx2_get_ringparam,
7582 .set_ringparam = bnx2_set_ringparam,
7583 .get_pauseparam = bnx2_get_pauseparam,
7584 .set_pauseparam = bnx2_set_pauseparam,
7585 .get_rx_csum = bnx2_get_rx_csum,
7586 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 7587 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 7588 .set_sg = ethtool_op_set_sg,
b11d6213 7589 .set_tso = bnx2_set_tso,
b6016b76
MC
7590 .self_test = bnx2_self_test,
7591 .get_strings = bnx2_get_strings,
7592 .phys_id = bnx2_phys_id,
b6016b76 7593 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 7594 .get_sset_count = bnx2_get_sset_count,
fdc8541d
MC
7595 .set_flags = bnx2_set_flags,
7596 .get_flags = ethtool_op_get_flags,
b6016b76
MC
7597};
7598
7599/* Called with rtnl_lock */
7600static int
7601bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7602{
14ab9b86 7603 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 7604 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7605 int err;
7606
7607 switch(cmd) {
7608 case SIOCGMIIPHY:
7609 data->phy_id = bp->phy_addr;
7610
7611 /* fallthru */
7612 case SIOCGMIIREG: {
7613 u32 mii_regval;
7614
583c28e5 7615 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7616 return -EOPNOTSUPP;
7617
dad3e452
MC
7618 if (!netif_running(dev))
7619 return -EAGAIN;
7620
c770a65c 7621 spin_lock_bh(&bp->phy_lock);
b6016b76 7622 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 7623 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7624
7625 data->val_out = mii_regval;
7626
7627 return err;
7628 }
7629
7630 case SIOCSMIIREG:
583c28e5 7631 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7632 return -EOPNOTSUPP;
7633
dad3e452
MC
7634 if (!netif_running(dev))
7635 return -EAGAIN;
7636
c770a65c 7637 spin_lock_bh(&bp->phy_lock);
b6016b76 7638 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 7639 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7640
7641 return err;
7642
7643 default:
7644 /* do nothing */
7645 break;
7646 }
7647 return -EOPNOTSUPP;
7648}
7649
7650/* Called with rtnl_lock */
7651static int
7652bnx2_change_mac_addr(struct net_device *dev, void *p)
7653{
7654 struct sockaddr *addr = p;
972ec0d4 7655 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7656
73eef4cd
MC
7657 if (!is_valid_ether_addr(addr->sa_data))
7658 return -EINVAL;
7659
b6016b76
MC
7660 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7661 if (netif_running(dev))
5fcaed01 7662 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
7663
7664 return 0;
7665}
7666
7667/* Called with rtnl_lock */
7668static int
7669bnx2_change_mtu(struct net_device *dev, int new_mtu)
7670{
972ec0d4 7671 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7672
7673 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7674 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7675 return -EINVAL;
7676
7677 dev->mtu = new_mtu;
5d5d0015 7678 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
b6016b76
MC
7679}
7680
257ddbda 7681#ifdef CONFIG_NET_POLL_CONTROLLER
b6016b76
MC
7682static void
7683poll_bnx2(struct net_device *dev)
7684{
972ec0d4 7685 struct bnx2 *bp = netdev_priv(dev);
b2af2c1d 7686 int i;
b6016b76 7687
b2af2c1d 7688 for (i = 0; i < bp->irq_nvecs; i++) {
1bf1e347
MC
7689 struct bnx2_irq *irq = &bp->irq_tbl[i];
7690
7691 disable_irq(irq->vector);
7692 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7693 enable_irq(irq->vector);
b2af2c1d 7694 }
b6016b76
MC
7695}
7696#endif
7697
253c8b75
MC
7698static void __devinit
7699bnx2_get_5709_media(struct bnx2 *bp)
7700{
7701 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7702 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7703 u32 strap;
7704
7705 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7706 return;
7707 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
583c28e5 7708 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7709 return;
7710 }
7711
7712 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7713 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7714 else
7715 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7716
7717 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7718 switch (strap) {
7719 case 0x4:
7720 case 0x5:
7721 case 0x6:
583c28e5 7722 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7723 return;
7724 }
7725 } else {
7726 switch (strap) {
7727 case 0x1:
7728 case 0x2:
7729 case 0x4:
583c28e5 7730 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7731 return;
7732 }
7733 }
7734}
7735
883e5151
MC
7736static void __devinit
7737bnx2_get_pci_speed(struct bnx2 *bp)
7738{
7739 u32 reg;
7740
7741 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7742 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7743 u32 clkreg;
7744
f86e82fb 7745 bp->flags |= BNX2_FLAG_PCIX;
883e5151
MC
7746
7747 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7748
7749 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7750 switch (clkreg) {
7751 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7752 bp->bus_speed_mhz = 133;
7753 break;
7754
7755 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7756 bp->bus_speed_mhz = 100;
7757 break;
7758
7759 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7760 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7761 bp->bus_speed_mhz = 66;
7762 break;
7763
7764 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7765 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7766 bp->bus_speed_mhz = 50;
7767 break;
7768
7769 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7770 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7771 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7772 bp->bus_speed_mhz = 33;
7773 break;
7774 }
7775 }
7776 else {
7777 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7778 bp->bus_speed_mhz = 66;
7779 else
7780 bp->bus_speed_mhz = 33;
7781 }
7782
7783 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
f86e82fb 7784 bp->flags |= BNX2_FLAG_PCI_32BIT;
883e5151
MC
7785
7786}
7787
76d99061
MC
7788static void __devinit
7789bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7790{
df25bc38 7791 int rc, i, j;
76d99061 7792 u8 *data;
df25bc38 7793 unsigned int block_end, rosize, len;
76d99061 7794
012093f6
MC
7795#define BNX2_VPD_NVRAM_OFFSET 0x300
7796#define BNX2_VPD_LEN 128
76d99061
MC
7797#define BNX2_MAX_VER_SLEN 30
7798
7799 data = kmalloc(256, GFP_KERNEL);
7800 if (!data)
7801 return;
7802
012093f6
MC
7803 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7804 BNX2_VPD_LEN);
76d99061
MC
7805 if (rc)
7806 goto vpd_done;
7807
012093f6
MC
7808 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7809 data[i] = data[i + BNX2_VPD_LEN + 3];
7810 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7811 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7812 data[i + 3] = data[i + BNX2_VPD_LEN];
76d99061
MC
7813 }
7814
df25bc38
MC
7815 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7816 if (i < 0)
7817 goto vpd_done;
76d99061 7818
df25bc38
MC
7819 rosize = pci_vpd_lrdt_size(&data[i]);
7820 i += PCI_VPD_LRDT_TAG_SIZE;
7821 block_end = i + rosize;
76d99061 7822
df25bc38
MC
7823 if (block_end > BNX2_VPD_LEN)
7824 goto vpd_done;
76d99061 7825
df25bc38
MC
7826 j = pci_vpd_find_info_keyword(data, i, rosize,
7827 PCI_VPD_RO_KEYWORD_MFR_ID);
7828 if (j < 0)
7829 goto vpd_done;
76d99061 7830
df25bc38 7831 len = pci_vpd_info_field_size(&data[j]);
76d99061 7832
df25bc38
MC
7833 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7834 if (j + len > block_end || len != 4 ||
7835 memcmp(&data[j], "1028", 4))
7836 goto vpd_done;
4067a854 7837
df25bc38
MC
7838 j = pci_vpd_find_info_keyword(data, i, rosize,
7839 PCI_VPD_RO_KEYWORD_VENDOR0);
7840 if (j < 0)
7841 goto vpd_done;
4067a854 7842
df25bc38 7843 len = pci_vpd_info_field_size(&data[j]);
4067a854 7844
df25bc38
MC
7845 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7846 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
76d99061 7847 goto vpd_done;
df25bc38
MC
7848
7849 memcpy(bp->fw_version, &data[j], len);
7850 bp->fw_version[len] = ' ';
76d99061
MC
7851
7852vpd_done:
7853 kfree(data);
7854}
7855
b6016b76
MC
7856static int __devinit
7857bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7858{
7859 struct bnx2 *bp;
7860 unsigned long mem_len;
58fc2ea4 7861 int rc, i, j;
b6016b76 7862 u32 reg;
40453c83 7863 u64 dma_mask, persist_dma_mask;
b6016b76 7864
b6016b76 7865 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 7866 bp = netdev_priv(dev);
b6016b76
MC
7867
7868 bp->flags = 0;
7869 bp->phy_flags = 0;
7870
354fcd77
MC
7871 bp->temp_stats_blk =
7872 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7873
7874 if (bp->temp_stats_blk == NULL) {
7875 rc = -ENOMEM;
7876 goto err_out;
7877 }
7878
b6016b76
MC
7879 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7880 rc = pci_enable_device(pdev);
7881 if (rc) {
3a9c6a49 7882 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
b6016b76
MC
7883 goto err_out;
7884 }
7885
7886 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 7887 dev_err(&pdev->dev,
3a9c6a49 7888 "Cannot find PCI device base address, aborting\n");
b6016b76
MC
7889 rc = -ENODEV;
7890 goto err_out_disable;
7891 }
7892
7893 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7894 if (rc) {
3a9c6a49 7895 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
b6016b76
MC
7896 goto err_out_disable;
7897 }
7898
7899 pci_set_master(pdev);
6ff2da49 7900 pci_save_state(pdev);
b6016b76
MC
7901
7902 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7903 if (bp->pm_cap == 0) {
9b91cf9d 7904 dev_err(&pdev->dev,
3a9c6a49 7905 "Cannot find power management capability, aborting\n");
b6016b76
MC
7906 rc = -EIO;
7907 goto err_out_release;
7908 }
7909
b6016b76
MC
7910 bp->dev = dev;
7911 bp->pdev = pdev;
7912
7913 spin_lock_init(&bp->phy_lock);
1b8227c4 7914 spin_lock_init(&bp->indirect_lock);
c5a88950
MC
7915#ifdef BCM_CNIC
7916 mutex_init(&bp->cnic_lock);
7917#endif
c4028958 7918 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
7919
7920 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
4edd473f 7921 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
b6016b76
MC
7922 dev->mem_end = dev->mem_start + mem_len;
7923 dev->irq = pdev->irq;
7924
7925 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7926
7927 if (!bp->regview) {
3a9c6a49 7928 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
b6016b76
MC
7929 rc = -ENOMEM;
7930 goto err_out_release;
7931 }
7932
7933 /* Configure byte swap and enable write to the reg_window registers.
7934 * Rely on CPU to do target byte swapping on big endian systems
7935 * The chip's target access swapping will not swap all accesses
7936 */
7937 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7938 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7939 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7940
829ca9a3 7941 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
7942
7943 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7944
883e5151
MC
7945 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7946 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7947 dev_err(&pdev->dev,
3a9c6a49 7948 "Cannot find PCIE capability, aborting\n");
883e5151
MC
7949 rc = -EIO;
7950 goto err_out_unmap;
7951 }
f86e82fb 7952 bp->flags |= BNX2_FLAG_PCIE;
2dd201d7 7953 if (CHIP_REV(bp) == CHIP_REV_Ax)
f86e82fb 7954 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
883e5151 7955 } else {
59b47d8a
MC
7956 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7957 if (bp->pcix_cap == 0) {
7958 dev_err(&pdev->dev,
3a9c6a49 7959 "Cannot find PCIX capability, aborting\n");
59b47d8a
MC
7960 rc = -EIO;
7961 goto err_out_unmap;
7962 }
61d9e3fa 7963 bp->flags |= BNX2_FLAG_BROKEN_STATS;
59b47d8a
MC
7964 }
7965
b4b36042
MC
7966 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7967 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
f86e82fb 7968 bp->flags |= BNX2_FLAG_MSIX_CAP;
b4b36042
MC
7969 }
7970
8e6a72c4
MC
7971 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7972 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
f86e82fb 7973 bp->flags |= BNX2_FLAG_MSI_CAP;
8e6a72c4
MC
7974 }
7975
40453c83
MC
7976 /* 5708 cannot support DMA addresses > 40-bit. */
7977 if (CHIP_NUM(bp) == CHIP_NUM_5708)
50cf156a 7978 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
40453c83 7979 else
6a35528a 7980 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
40453c83
MC
7981
7982 /* Configure DMA attributes. */
7983 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7984 dev->features |= NETIF_F_HIGHDMA;
7985 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7986 if (rc) {
7987 dev_err(&pdev->dev,
3a9c6a49 7988 "pci_set_consistent_dma_mask failed, aborting\n");
40453c83
MC
7989 goto err_out_unmap;
7990 }
284901a9 7991 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3a9c6a49 7992 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
40453c83
MC
7993 goto err_out_unmap;
7994 }
7995
f86e82fb 7996 if (!(bp->flags & BNX2_FLAG_PCIE))
883e5151 7997 bnx2_get_pci_speed(bp);
b6016b76
MC
7998
7999 /* 5706A0 may falsely detect SERR and PERR. */
8000 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8001 reg = REG_RD(bp, PCI_COMMAND);
8002 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8003 REG_WR(bp, PCI_COMMAND, reg);
8004 }
8005 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
f86e82fb 8006 !(bp->flags & BNX2_FLAG_PCIX)) {
b6016b76 8007
9b91cf9d 8008 dev_err(&pdev->dev,
3a9c6a49 8009 "5706 A1 can only be used in a PCIX bus, aborting\n");
b6016b76
MC
8010 goto err_out_unmap;
8011 }
8012
8013 bnx2_init_nvram(bp);
8014
2726d6e1 8015 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
e3648b3d
MC
8016
8017 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
8018 BNX2_SHM_HDR_SIGNATURE_SIG) {
8019 u32 off = PCI_FUNC(pdev->devfn) << 2;
8020
2726d6e1 8021 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
24cb230b 8022 } else
e3648b3d
MC
8023 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8024
b6016b76
MC
8025 /* Get the permanent MAC address. First we need to make sure the
8026 * firmware is actually running.
8027 */
2726d6e1 8028 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
8029
8030 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8031 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
3a9c6a49 8032 dev_err(&pdev->dev, "Firmware not running, aborting\n");
b6016b76
MC
8033 rc = -ENODEV;
8034 goto err_out_unmap;
8035 }
8036
76d99061
MC
8037 bnx2_read_vpd_fw_ver(bp);
8038
8039 j = strlen(bp->fw_version);
2726d6e1 8040 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
76d99061 8041 for (i = 0; i < 3 && j < 24; i++) {
58fc2ea4
MC
8042 u8 num, k, skip0;
8043
76d99061
MC
8044 if (i == 0) {
8045 bp->fw_version[j++] = 'b';
8046 bp->fw_version[j++] = 'c';
8047 bp->fw_version[j++] = ' ';
8048 }
58fc2ea4
MC
8049 num = (u8) (reg >> (24 - (i * 8)));
8050 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8051 if (num >= k || !skip0 || k == 1) {
8052 bp->fw_version[j++] = (num / k) + '0';
8053 skip0 = 0;
8054 }
8055 }
8056 if (i != 2)
8057 bp->fw_version[j++] = '.';
8058 }
2726d6e1 8059 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
846f5c62
MC
8060 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8061 bp->wol = 1;
8062
8063 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
f86e82fb 8064 bp->flags |= BNX2_FLAG_ASF_ENABLE;
c2d3db8c
MC
8065
8066 for (i = 0; i < 30; i++) {
2726d6e1 8067 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
c2d3db8c
MC
8068 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8069 break;
8070 msleep(10);
8071 }
8072 }
2726d6e1 8073 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
58fc2ea4
MC
8074 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8075 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8076 reg != BNX2_CONDITION_MFW_RUN_NONE) {
2726d6e1 8077 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
58fc2ea4 8078
76d99061
MC
8079 if (j < 32)
8080 bp->fw_version[j++] = ' ';
8081 for (i = 0; i < 3 && j < 28; i++) {
2726d6e1 8082 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
58fc2ea4
MC
8083 reg = swab32(reg);
8084 memcpy(&bp->fw_version[j], &reg, 4);
8085 j += 4;
8086 }
8087 }
b6016b76 8088
2726d6e1 8089 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
8090 bp->mac_addr[0] = (u8) (reg >> 8);
8091 bp->mac_addr[1] = (u8) reg;
8092
2726d6e1 8093 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
8094 bp->mac_addr[2] = (u8) (reg >> 24);
8095 bp->mac_addr[3] = (u8) (reg >> 16);
8096 bp->mac_addr[4] = (u8) (reg >> 8);
8097 bp->mac_addr[5] = (u8) reg;
8098
8099 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 8100 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
8101
8102 bp->rx_csum = 1;
8103
cf7474a6 8104 bp->tx_quick_cons_trip_int = 2;
b6016b76 8105 bp->tx_quick_cons_trip = 20;
cf7474a6 8106 bp->tx_ticks_int = 18;
b6016b76 8107 bp->tx_ticks = 80;
6aa20a22 8108
cf7474a6
MC
8109 bp->rx_quick_cons_trip_int = 2;
8110 bp->rx_quick_cons_trip = 12;
b6016b76
MC
8111 bp->rx_ticks_int = 18;
8112 bp->rx_ticks = 18;
8113
7ea6920e 8114 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76 8115
ac392abc 8116 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 8117
5b0c76ad
MC
8118 bp->phy_addr = 1;
8119
b6016b76 8120 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
8121 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8122 bnx2_get_5709_media(bp);
8123 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
583c28e5 8124 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
bac0dff6 8125
0d8a6571 8126 bp->phy_port = PORT_TP;
583c28e5 8127 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 8128 bp->phy_port = PORT_FIBRE;
2726d6e1 8129 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
846f5c62 8130 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
f86e82fb 8131 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
8132 bp->wol = 0;
8133 }
38ea3686
MC
8134 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8135 /* Don't do parallel detect on this board because of
8136 * some board problems. The link will not go down
8137 * if we do parallel detect.
8138 */
8139 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8140 pdev->subsystem_device == 0x310c)
8141 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8142 } else {
5b0c76ad 8143 bp->phy_addr = 2;
5b0c76ad 8144 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
583c28e5 8145 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
5b0c76ad 8146 }
261dd5ca
MC
8147 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8148 CHIP_NUM(bp) == CHIP_NUM_5708)
583c28e5 8149 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
fb0c18bd
MC
8150 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8151 (CHIP_REV(bp) == CHIP_REV_Ax ||
8152 CHIP_REV(bp) == CHIP_REV_Bx))
583c28e5 8153 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
b6016b76 8154
7c62e83b
MC
8155 bnx2_init_fw_cap(bp);
8156
16088272
MC
8157 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8158 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5ec6d7bf
MC
8159 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8160 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
f86e82fb 8161 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
8162 bp->wol = 0;
8163 }
dda1e390 8164
b6016b76
MC
8165 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8166 bp->tx_quick_cons_trip_int =
8167 bp->tx_quick_cons_trip;
8168 bp->tx_ticks_int = bp->tx_ticks;
8169 bp->rx_quick_cons_trip_int =
8170 bp->rx_quick_cons_trip;
8171 bp->rx_ticks_int = bp->rx_ticks;
8172 bp->comp_prod_trip_int = bp->comp_prod_trip;
8173 bp->com_ticks_int = bp->com_ticks;
8174 bp->cmd_ticks_int = bp->cmd_ticks;
8175 }
8176
f9317a40
MC
8177 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8178 *
8179 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8180 * with byte enables disabled on the unused 32-bit word. This is legal
8181 * but causes problems on the AMD 8132 which will eventually stop
8182 * responding after a while.
8183 *
8184 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 8185 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
8186 */
8187 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8188 struct pci_dev *amd_8132 = NULL;
8189
8190 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8191 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8192 amd_8132))) {
f9317a40 8193
44c10138
AK
8194 if (amd_8132->revision >= 0x10 &&
8195 amd_8132->revision <= 0x13) {
f9317a40
MC
8196 disable_msi = 1;
8197 pci_dev_put(amd_8132);
8198 break;
8199 }
8200 }
8201 }
8202
deaf391b 8203 bnx2_set_default_link(bp);
b6016b76
MC
8204 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8205
cd339a0e 8206 init_timer(&bp->timer);
ac392abc 8207 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
cd339a0e
MC
8208 bp->timer.data = (unsigned long) bp;
8209 bp->timer.function = bnx2_timer;
8210
b6016b76
MC
8211 return 0;
8212
8213err_out_unmap:
8214 if (bp->regview) {
8215 iounmap(bp->regview);
73eef4cd 8216 bp->regview = NULL;
b6016b76
MC
8217 }
8218
8219err_out_release:
8220 pci_release_regions(pdev);
8221
8222err_out_disable:
8223 pci_disable_device(pdev);
8224 pci_set_drvdata(pdev, NULL);
8225
8226err_out:
8227 return rc;
8228}
8229
883e5151
MC
8230static char * __devinit
8231bnx2_bus_string(struct bnx2 *bp, char *str)
8232{
8233 char *s = str;
8234
f86e82fb 8235 if (bp->flags & BNX2_FLAG_PCIE) {
883e5151
MC
8236 s += sprintf(s, "PCI Express");
8237 } else {
8238 s += sprintf(s, "PCI");
f86e82fb 8239 if (bp->flags & BNX2_FLAG_PCIX)
883e5151 8240 s += sprintf(s, "-X");
f86e82fb 8241 if (bp->flags & BNX2_FLAG_PCI_32BIT)
883e5151
MC
8242 s += sprintf(s, " 32-bit");
8243 else
8244 s += sprintf(s, " 64-bit");
8245 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8246 }
8247 return str;
8248}
8249
f048fa9c
MC
8250static void
8251bnx2_del_napi(struct bnx2 *bp)
8252{
8253 int i;
8254
8255 for (i = 0; i < bp->irq_nvecs; i++)
8256 netif_napi_del(&bp->bnx2_napi[i].napi);
8257}
8258
8259static void
35efa7c1
MC
8260bnx2_init_napi(struct bnx2 *bp)
8261{
b4b36042 8262 int i;
35efa7c1 8263
4327ba43 8264 for (i = 0; i < bp->irq_nvecs; i++) {
35e9010b
MC
8265 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8266 int (*poll)(struct napi_struct *, int);
8267
8268 if (i == 0)
8269 poll = bnx2_poll;
8270 else
f0ea2e63 8271 poll = bnx2_poll_msix;
35e9010b
MC
8272
8273 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
b4b36042
MC
8274 bnapi->bp = bp;
8275 }
35efa7c1
MC
8276}
8277
0421eae6
SH
8278static const struct net_device_ops bnx2_netdev_ops = {
8279 .ndo_open = bnx2_open,
8280 .ndo_start_xmit = bnx2_start_xmit,
8281 .ndo_stop = bnx2_close,
5d07bf26 8282 .ndo_get_stats64 = bnx2_get_stats64,
0421eae6
SH
8283 .ndo_set_rx_mode = bnx2_set_rx_mode,
8284 .ndo_do_ioctl = bnx2_ioctl,
8285 .ndo_validate_addr = eth_validate_addr,
8286 .ndo_set_mac_address = bnx2_change_mac_addr,
8287 .ndo_change_mtu = bnx2_change_mtu,
8288 .ndo_tx_timeout = bnx2_tx_timeout,
8289#ifdef BCM_VLAN
8290 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8291#endif
257ddbda 8292#ifdef CONFIG_NET_POLL_CONTROLLER
0421eae6
SH
8293 .ndo_poll_controller = poll_bnx2,
8294#endif
8295};
8296
72dccb01
ED
8297static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8298{
8299#ifdef BCM_VLAN
8300 dev->vlan_features |= flags;
8301#endif
8302}
8303
b6016b76
MC
8304static int __devinit
8305bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8306{
8307 static int version_printed = 0;
8308 struct net_device *dev = NULL;
8309 struct bnx2 *bp;
0795af57 8310 int rc;
883e5151 8311 char str[40];
b6016b76
MC
8312
8313 if (version_printed++ == 0)
3a9c6a49 8314 pr_info("%s", version);
b6016b76
MC
8315
8316 /* dev zeroed in init_etherdev */
706bf240 8317 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
b6016b76
MC
8318
8319 if (!dev)
8320 return -ENOMEM;
8321
8322 rc = bnx2_init_board(pdev, dev);
8323 if (rc < 0) {
8324 free_netdev(dev);
8325 return rc;
8326 }
8327
0421eae6 8328 dev->netdev_ops = &bnx2_netdev_ops;
b6016b76 8329 dev->watchdog_timeo = TX_TIMEOUT;
b6016b76 8330 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 8331
972ec0d4 8332 bp = netdev_priv(dev);
b6016b76 8333
1b2f922f
MC
8334 pci_set_drvdata(pdev, dev);
8335
57579f76
MC
8336 rc = bnx2_request_firmware(bp);
8337 if (rc)
8338 goto error;
8339
1b2f922f
MC
8340 memcpy(dev->dev_addr, bp->mac_addr, 6);
8341 memcpy(dev->perm_addr, bp->mac_addr, 6);
1b2f922f 8342
fdc8541d
MC
8343 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
8344 NETIF_F_RXHASH;
72dccb01
ED
8345 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8346 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
d212f87b 8347 dev->features |= NETIF_F_IPV6_CSUM;
72dccb01
ED
8348 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8349 }
1b2f922f
MC
8350#ifdef BCM_VLAN
8351 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8352#endif
8353 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
72dccb01
ED
8354 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8355 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4666f87a 8356 dev->features |= NETIF_F_TSO6;
72dccb01
ED
8357 vlan_features_add(dev, NETIF_F_TSO6);
8358 }
b6016b76 8359 if ((rc = register_netdev(dev))) {
9b91cf9d 8360 dev_err(&pdev->dev, "Cannot register net device\n");
57579f76 8361 goto error;
b6016b76
MC
8362 }
8363
3a9c6a49
JP
8364 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8365 board_info[ent->driver_data].name,
8366 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8367 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8368 bnx2_bus_string(bp, str),
8369 dev->base_addr,
8370 bp->pdev->irq, dev->dev_addr);
b6016b76 8371
b6016b76 8372 return 0;
57579f76
MC
8373
8374error:
8375 if (bp->mips_firmware)
8376 release_firmware(bp->mips_firmware);
8377 if (bp->rv2p_firmware)
8378 release_firmware(bp->rv2p_firmware);
8379
8380 if (bp->regview)
8381 iounmap(bp->regview);
8382 pci_release_regions(pdev);
8383 pci_disable_device(pdev);
8384 pci_set_drvdata(pdev, NULL);
8385 free_netdev(dev);
8386 return rc;
b6016b76
MC
8387}
8388
8389static void __devexit
8390bnx2_remove_one(struct pci_dev *pdev)
8391{
8392 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8393 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8394
afdc08b9
MC
8395 flush_scheduled_work();
8396
b6016b76
MC
8397 unregister_netdev(dev);
8398
57579f76
MC
8399 if (bp->mips_firmware)
8400 release_firmware(bp->mips_firmware);
8401 if (bp->rv2p_firmware)
8402 release_firmware(bp->rv2p_firmware);
8403
b6016b76
MC
8404 if (bp->regview)
8405 iounmap(bp->regview);
8406
354fcd77
MC
8407 kfree(bp->temp_stats_blk);
8408
b6016b76
MC
8409 free_netdev(dev);
8410 pci_release_regions(pdev);
8411 pci_disable_device(pdev);
8412 pci_set_drvdata(pdev, NULL);
8413}
8414
8415static int
829ca9a3 8416bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
8417{
8418 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8419 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8420
6caebb02
MC
8421 /* PCI register 4 needs to be saved whether netif_running() or not.
8422 * MSI address and data need to be saved if using MSI and
8423 * netif_running().
8424 */
8425 pci_save_state(pdev);
b6016b76
MC
8426 if (!netif_running(dev))
8427 return 0;
8428
1d60290f 8429 flush_scheduled_work();
212f9934 8430 bnx2_netif_stop(bp, true);
b6016b76
MC
8431 netif_device_detach(dev);
8432 del_timer_sync(&bp->timer);
74bf4ba3 8433 bnx2_shutdown_chip(bp);
b6016b76 8434 bnx2_free_skbs(bp);
829ca9a3 8435 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
8436 return 0;
8437}
8438
8439static int
8440bnx2_resume(struct pci_dev *pdev)
8441{
8442 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8443 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8444
6caebb02 8445 pci_restore_state(pdev);
b6016b76
MC
8446 if (!netif_running(dev))
8447 return 0;
8448
829ca9a3 8449 bnx2_set_power_state(bp, PCI_D0);
b6016b76 8450 netif_device_attach(dev);
9a120bc5 8451 bnx2_init_nic(bp, 1);
212f9934 8452 bnx2_netif_start(bp, true);
b6016b76
MC
8453 return 0;
8454}
8455
6ff2da49
WX
8456/**
8457 * bnx2_io_error_detected - called when PCI error is detected
8458 * @pdev: Pointer to PCI device
8459 * @state: The current pci connection state
8460 *
8461 * This function is called after a PCI bus error affecting
8462 * this device has been detected.
8463 */
8464static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8465 pci_channel_state_t state)
8466{
8467 struct net_device *dev = pci_get_drvdata(pdev);
8468 struct bnx2 *bp = netdev_priv(dev);
8469
8470 rtnl_lock();
8471 netif_device_detach(dev);
8472
2ec3de26
DN
8473 if (state == pci_channel_io_perm_failure) {
8474 rtnl_unlock();
8475 return PCI_ERS_RESULT_DISCONNECT;
8476 }
8477
6ff2da49 8478 if (netif_running(dev)) {
212f9934 8479 bnx2_netif_stop(bp, true);
6ff2da49
WX
8480 del_timer_sync(&bp->timer);
8481 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8482 }
8483
8484 pci_disable_device(pdev);
8485 rtnl_unlock();
8486
8487 /* Request a slot slot reset. */
8488 return PCI_ERS_RESULT_NEED_RESET;
8489}
8490
8491/**
8492 * bnx2_io_slot_reset - called after the pci bus has been reset.
8493 * @pdev: Pointer to PCI device
8494 *
8495 * Restart the card from scratch, as if from a cold-boot.
8496 */
8497static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8498{
8499 struct net_device *dev = pci_get_drvdata(pdev);
8500 struct bnx2 *bp = netdev_priv(dev);
8501
8502 rtnl_lock();
8503 if (pci_enable_device(pdev)) {
8504 dev_err(&pdev->dev,
3a9c6a49 8505 "Cannot re-enable PCI device after reset\n");
6ff2da49
WX
8506 rtnl_unlock();
8507 return PCI_ERS_RESULT_DISCONNECT;
8508 }
8509 pci_set_master(pdev);
8510 pci_restore_state(pdev);
529fab67 8511 pci_save_state(pdev);
6ff2da49
WX
8512
8513 if (netif_running(dev)) {
8514 bnx2_set_power_state(bp, PCI_D0);
8515 bnx2_init_nic(bp, 1);
8516 }
8517
8518 rtnl_unlock();
8519 return PCI_ERS_RESULT_RECOVERED;
8520}
8521
8522/**
8523 * bnx2_io_resume - called when traffic can start flowing again.
8524 * @pdev: Pointer to PCI device
8525 *
8526 * This callback is called when the error recovery driver tells us that
8527 * its OK to resume normal operation.
8528 */
8529static void bnx2_io_resume(struct pci_dev *pdev)
8530{
8531 struct net_device *dev = pci_get_drvdata(pdev);
8532 struct bnx2 *bp = netdev_priv(dev);
8533
8534 rtnl_lock();
8535 if (netif_running(dev))
212f9934 8536 bnx2_netif_start(bp, true);
6ff2da49
WX
8537
8538 netif_device_attach(dev);
8539 rtnl_unlock();
8540}
8541
8542static struct pci_error_handlers bnx2_err_handler = {
8543 .error_detected = bnx2_io_error_detected,
8544 .slot_reset = bnx2_io_slot_reset,
8545 .resume = bnx2_io_resume,
8546};
8547
b6016b76 8548static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
8549 .name = DRV_MODULE_NAME,
8550 .id_table = bnx2_pci_tbl,
8551 .probe = bnx2_init_one,
8552 .remove = __devexit_p(bnx2_remove_one),
8553 .suspend = bnx2_suspend,
8554 .resume = bnx2_resume,
6ff2da49 8555 .err_handler = &bnx2_err_handler,
b6016b76
MC
8556};
8557
8558static int __init bnx2_init(void)
8559{
29917620 8560 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
8561}
8562
8563static void __exit bnx2_cleanup(void)
8564{
8565 pci_unregister_driver(&bnx2_pci_driver);
8566}
8567
8568module_init(bnx2_init);
8569module_exit(bnx2_cleanup);
8570
8571
8572