]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/broadcom/bnx2.c
net: use jump_label to shortcut RPS if not setup
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
dc187cb3 3 * Copyright (c) 2004-2011 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
3a9c6a49 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
f2a4f052
MC
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16
17#include <linux/kernel.h>
18#include <linux/timer.h>
19#include <linux/errno.h>
20#include <linux/ioport.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include <linux/interrupt.h>
24#include <linux/pci.h>
25#include <linux/init.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/dma-mapping.h>
1977f032 30#include <linux/bitops.h>
f2a4f052
MC
31#include <asm/io.h>
32#include <asm/irq.h>
33#include <linux/delay.h>
34#include <asm/byteorder.h>
c86a31f4 35#include <asm/page.h>
f2a4f052
MC
36#include <linux/time.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
01789349 39#include <linux/if.h>
f2a4f052 40#include <linux/if_vlan.h>
f2a4f052 41#include <net/ip.h>
de081fa5 42#include <net/tcp.h>
f2a4f052 43#include <net/checksum.h>
f2a4f052
MC
44#include <linux/workqueue.h>
45#include <linux/crc32.h>
46#include <linux/prefetch.h>
29b12174 47#include <linux/cache.h>
57579f76 48#include <linux/firmware.h>
706bf240 49#include <linux/log2.h>
cd709aa9 50#include <linux/aer.h>
f2a4f052 51
4edd473f
MC
52#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
53#define BCM_CNIC 1
54#include "cnic_if.h"
55#endif
b6016b76
MC
56#include "bnx2.h"
57#include "bnx2_fw.h"
b3448b0b 58
b6016b76 59#define DRV_MODULE_NAME "bnx2"
3aeb7d22
MC
60#define DRV_MODULE_VERSION "2.1.11"
61#define DRV_MODULE_RELDATE "July 20, 2011"
0268102d 62#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
22fa159d 63#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
dc187cb3 64#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
22fa159d
MC
65#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
66#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
b6016b76
MC
67
68#define RUN_AT(x) (jiffies + (x))
69
70/* Time in jiffies before concluding the transmitter is hung. */
71#define TX_TIMEOUT (5*HZ)
72
fefa8645 73static char version[] __devinitdata =
b6016b76
MC
74 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
453a9c6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
b6016b76
MC
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
57579f76
MC
80MODULE_FIRMWARE(FW_MIPS_FILE_06);
81MODULE_FIRMWARE(FW_RV2P_FILE_06);
82MODULE_FIRMWARE(FW_MIPS_FILE_09);
83MODULE_FIRMWARE(FW_RV2P_FILE_09);
078b0735 84MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
b6016b76
MC
85
86static int disable_msi = 0;
87
88module_param(disable_msi, int, 0);
89MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90
91typedef enum {
92 BCM5706 = 0,
93 NC370T,
94 NC370I,
95 BCM5706S,
96 NC370F,
5b0c76ad
MC
97 BCM5708,
98 BCM5708S,
bac0dff6 99 BCM5709,
27a005b8 100 BCM5709S,
7bb0a04f 101 BCM5716,
1caacecb 102 BCM5716S,
b6016b76
MC
103} board_t;
104
105/* indexed by board_t, above */
fefa8645 106static struct {
b6016b76
MC
107 char *name;
108} board_info[] __devinitdata = {
109 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
110 { "HP NC370T Multifunction Gigabit Server Adapter" },
111 { "HP NC370i Multifunction Gigabit Server Adapter" },
112 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
114 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 116 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 117 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
7bb0a04f 118 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
1caacecb 119 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
b6016b76
MC
120 };
121
7bb0a04f 122static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
b6016b76
MC
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
7bb0a04f
MC
141 { PCI_VENDOR_ID_BROADCOM, 0x163b,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
1caacecb 143 { PCI_VENDOR_ID_BROADCOM, 0x163c,
1f2435e5 144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
b6016b76
MC
145 { 0, }
146};
147
0ced9d01 148static const struct flash_spec flash_table[] =
b6016b76 149{
e30372c9
MC
150#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 152 /* Slow EEPROM */
37137709 153 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 154 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
155 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
156 "EEPROM - slow"},
37137709
MC
157 /* Expansion entry 0001 */
158 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 159 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
160 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
161 "Entry 0001"},
b6016b76
MC
162 /* Saifun SA25F010 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
37137709 164 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 165 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
166 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
167 "Non-buffered flash (128kB)"},
168 /* Saifun SA25F020 (non-buffered flash) */
169 /* strap, cfg1, & write1 need updates */
37137709 170 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
172 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
173 "Non-buffered flash (256kB)"},
37137709
MC
174 /* Expansion entry 0100 */
175 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 176 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
177 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178 "Entry 0100"},
179 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 180 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 181 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
182 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
183 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
185 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 186 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
187 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
188 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189 /* Saifun SA25F005 (non-buffered flash) */
190 /* strap, cfg1, & write1 need updates */
191 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 192 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
193 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
194 "Non-buffered flash (64kB)"},
195 /* Fast EEPROM */
196 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 197 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
198 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
199 "EEPROM - fast"},
200 /* Expansion entry 1001 */
201 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 202 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
203 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 "Entry 1001"},
205 /* Expansion entry 1010 */
206 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
208 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209 "Entry 1010"},
210 /* ATMEL AT45DB011B (buffered flash) */
211 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 212 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
213 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
214 "Buffered flash (128kB)"},
215 /* Expansion entry 1100 */
216 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 217 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
218 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 "Entry 1100"},
220 /* Expansion entry 1101 */
221 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 222 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
223 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224 "Entry 1101"},
225 /* Ateml Expansion entry 1110 */
226 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 227 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
228 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
229 "Entry 1110 (Atmel)"},
230 /* ATMEL AT45DB021B (buffered flash) */
231 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 232 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
233 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
234 "Buffered flash (256kB)"},
b6016b76
MC
235};
236
0ced9d01 237static const struct flash_spec flash_5709 = {
e30372c9
MC
238 .flags = BNX2_NV_BUFFERED,
239 .page_bits = BCM5709_FLASH_PAGE_BITS,
240 .page_size = BCM5709_FLASH_PAGE_SIZE,
241 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
242 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
243 .name = "5709 Buffered flash (256kB)",
244};
245
b6016b76
MC
246MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
247
4327ba43 248static void bnx2_init_napi(struct bnx2 *bp);
f048fa9c 249static void bnx2_del_napi(struct bnx2 *bp);
4327ba43 250
35e9010b 251static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
e89bbf10 252{
2f8af120 253 u32 diff;
e89bbf10 254
11848b96
MC
255 /* Tell compiler to fetch tx_prod and tx_cons from memory. */
256 barrier();
faac9c4b
MC
257
258 /* The ring uses 256 indices for 255 entries, one of them
259 * needs to be skipped.
260 */
35e9010b 261 diff = txr->tx_prod - txr->tx_cons;
faac9c4b
MC
262 if (unlikely(diff >= TX_DESC_CNT)) {
263 diff &= 0xffff;
264 if (diff == TX_DESC_CNT)
265 diff = MAX_TX_DESC_CNT;
266 }
807540ba 267 return bp->tx_ring_size - diff;
e89bbf10
MC
268}
269
b6016b76
MC
270static u32
271bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272{
1b8227c4
MC
273 u32 val;
274
275 spin_lock_bh(&bp->indirect_lock);
b6016b76 276 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
277 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
278 spin_unlock_bh(&bp->indirect_lock);
279 return val;
b6016b76
MC
280}
281
282static void
283bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284{
1b8227c4 285 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
286 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 288 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
289}
290
2726d6e1
MC
291static void
292bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293{
294 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
295}
296
297static u32
298bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299{
807540ba 300 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
2726d6e1
MC
301}
302
b6016b76
MC
303static void
304bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305{
306 offset += cid_addr;
1b8227c4 307 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
308 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
309 int i;
310
311 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
312 REG_WR(bp, BNX2_CTX_CTX_CTRL,
313 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314 for (i = 0; i < 5; i++) {
59b47d8a
MC
315 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
316 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317 break;
318 udelay(5);
319 }
320 } else {
321 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
322 REG_WR(bp, BNX2_CTX_DATA, val);
323 }
1b8227c4 324 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
325}
326
4edd473f
MC
327#ifdef BCM_CNIC
328static int
329bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330{
331 struct bnx2 *bp = netdev_priv(dev);
332 struct drv_ctl_io *io = &info->data.io;
333
334 switch (info->cmd) {
335 case DRV_CTL_IO_WR_CMD:
336 bnx2_reg_wr_ind(bp, io->offset, io->data);
337 break;
338 case DRV_CTL_IO_RD_CMD:
339 io->data = bnx2_reg_rd_ind(bp, io->offset);
340 break;
341 case DRV_CTL_CTX_WR_CMD:
342 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343 break;
344 default:
345 return -EINVAL;
346 }
347 return 0;
348}
349
350static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351{
352 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354 int sb_id;
355
356 if (bp->flags & BNX2_FLAG_USING_MSIX) {
357 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358 bnapi->cnic_present = 0;
359 sb_id = bp->irq_nvecs;
360 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361 } else {
362 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363 bnapi->cnic_tag = bnapi->last_status_idx;
364 bnapi->cnic_present = 1;
365 sb_id = 0;
366 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367 }
368
369 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370 cp->irq_arr[0].status_blk = (void *)
371 ((unsigned long) bnapi->status_blk.msi +
372 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373 cp->irq_arr[0].status_blk_num = sb_id;
374 cp->num_irq = 1;
375}
376
377static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378 void *data)
379{
380 struct bnx2 *bp = netdev_priv(dev);
381 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382
383 if (ops == NULL)
384 return -EINVAL;
385
386 if (cp->drv_state & CNIC_DRV_STATE_REGD)
387 return -EBUSY;
388
41c2178a
MC
389 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
390 return -ENODEV;
391
4edd473f
MC
392 bp->cnic_data = data;
393 rcu_assign_pointer(bp->cnic_ops, ops);
394
395 cp->num_irq = 0;
396 cp->drv_state = CNIC_DRV_STATE_REGD;
397
398 bnx2_setup_cnic_irq_info(bp);
399
400 return 0;
401}
402
403static int bnx2_unregister_cnic(struct net_device *dev)
404{
405 struct bnx2 *bp = netdev_priv(dev);
406 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
407 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
408
c5a88950 409 mutex_lock(&bp->cnic_lock);
4edd473f
MC
410 cp->drv_state = 0;
411 bnapi->cnic_present = 0;
412 rcu_assign_pointer(bp->cnic_ops, NULL);
c5a88950 413 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
414 synchronize_rcu();
415 return 0;
416}
417
418struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
419{
420 struct bnx2 *bp = netdev_priv(dev);
421 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
422
7625eb2f
MC
423 if (!cp->max_iscsi_conn)
424 return NULL;
425
4edd473f
MC
426 cp->drv_owner = THIS_MODULE;
427 cp->chip_id = bp->chip_id;
428 cp->pdev = bp->pdev;
429 cp->io_base = bp->regview;
430 cp->drv_ctl = bnx2_drv_ctl;
431 cp->drv_register_cnic = bnx2_register_cnic;
432 cp->drv_unregister_cnic = bnx2_unregister_cnic;
433
434 return cp;
435}
436EXPORT_SYMBOL(bnx2_cnic_probe);
437
438static void
439bnx2_cnic_stop(struct bnx2 *bp)
440{
441 struct cnic_ops *c_ops;
442 struct cnic_ctl_info info;
443
c5a88950 444 mutex_lock(&bp->cnic_lock);
13707f9e
ED
445 c_ops = rcu_dereference_protected(bp->cnic_ops,
446 lockdep_is_held(&bp->cnic_lock));
4edd473f
MC
447 if (c_ops) {
448 info.cmd = CNIC_CTL_STOP_CMD;
449 c_ops->cnic_ctl(bp->cnic_data, &info);
450 }
c5a88950 451 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
452}
453
454static void
455bnx2_cnic_start(struct bnx2 *bp)
456{
457 struct cnic_ops *c_ops;
458 struct cnic_ctl_info info;
459
c5a88950 460 mutex_lock(&bp->cnic_lock);
13707f9e
ED
461 c_ops = rcu_dereference_protected(bp->cnic_ops,
462 lockdep_is_held(&bp->cnic_lock));
4edd473f
MC
463 if (c_ops) {
464 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466
467 bnapi->cnic_tag = bnapi->last_status_idx;
468 }
469 info.cmd = CNIC_CTL_START_CMD;
470 c_ops->cnic_ctl(bp->cnic_data, &info);
471 }
c5a88950 472 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
473}
474
475#else
476
477static void
478bnx2_cnic_stop(struct bnx2 *bp)
479{
480}
481
482static void
483bnx2_cnic_start(struct bnx2 *bp)
484{
485}
486
487#endif
488
b6016b76
MC
489static int
490bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491{
492 u32 val1;
493 int i, ret;
494
583c28e5 495 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
496 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
497 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498
499 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
501
502 udelay(40);
503 }
504
505 val1 = (bp->phy_addr << 21) | (reg << 16) |
506 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507 BNX2_EMAC_MDIO_COMM_START_BUSY;
508 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509
510 for (i = 0; i < 50; i++) {
511 udelay(10);
512
513 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
514 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515 udelay(5);
516
517 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
518 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519
520 break;
521 }
522 }
523
524 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525 *val = 0x0;
526 ret = -EBUSY;
527 }
528 else {
529 *val = val1;
530 ret = 0;
531 }
532
583c28e5 533 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
534 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
535 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536
537 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
539
540 udelay(40);
541 }
542
543 return ret;
544}
545
546static int
547bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548{
549 u32 val1;
550 int i, ret;
551
583c28e5 552 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
553 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
554 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555
556 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
558
559 udelay(40);
560 }
561
562 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 566
b6016b76
MC
567 for (i = 0; i < 50; i++) {
568 udelay(10);
569
570 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
571 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572 udelay(5);
573 break;
574 }
575 }
576
577 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578 ret = -EBUSY;
579 else
580 ret = 0;
581
583c28e5 582 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
583 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
584 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585
586 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
588
589 udelay(40);
590 }
591
592 return ret;
593}
594
595static void
596bnx2_disable_int(struct bnx2 *bp)
597{
b4b36042
MC
598 int i;
599 struct bnx2_napi *bnapi;
600
601 for (i = 0; i < bp->irq_nvecs; i++) {
602 bnapi = &bp->bnx2_napi[i];
603 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605 }
b6016b76
MC
606 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607}
608
609static void
610bnx2_enable_int(struct bnx2 *bp)
611{
b4b36042
MC
612 int i;
613 struct bnx2_napi *bnapi;
35efa7c1 614
b4b36042
MC
615 for (i = 0; i < bp->irq_nvecs; i++) {
616 bnapi = &bp->bnx2_napi[i];
1269a8a6 617
b4b36042
MC
618 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621 bnapi->last_status_idx);
b6016b76 622
b4b36042
MC
623 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 bnapi->last_status_idx);
626 }
bf5295bb 627 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
628}
629
630static void
631bnx2_disable_int_sync(struct bnx2 *bp)
632{
b4b36042
MC
633 int i;
634
b6016b76 635 atomic_inc(&bp->intr_sem);
3767546c
MC
636 if (!netif_running(bp->dev))
637 return;
638
b6016b76 639 bnx2_disable_int(bp);
b4b36042
MC
640 for (i = 0; i < bp->irq_nvecs; i++)
641 synchronize_irq(bp->irq_tbl[i].vector);
b6016b76
MC
642}
643
35efa7c1
MC
644static void
645bnx2_napi_disable(struct bnx2 *bp)
646{
b4b36042
MC
647 int i;
648
649 for (i = 0; i < bp->irq_nvecs; i++)
650 napi_disable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
651}
652
653static void
654bnx2_napi_enable(struct bnx2 *bp)
655{
b4b36042
MC
656 int i;
657
658 for (i = 0; i < bp->irq_nvecs; i++)
659 napi_enable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
660}
661
b6016b76 662static void
212f9934 663bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
b6016b76 664{
212f9934
MC
665 if (stop_cnic)
666 bnx2_cnic_stop(bp);
b6016b76 667 if (netif_running(bp->dev)) {
35efa7c1 668 bnx2_napi_disable(bp);
b6016b76 669 netif_tx_disable(bp->dev);
b6016b76 670 }
b7466560 671 bnx2_disable_int_sync(bp);
a0ba6760 672 netif_carrier_off(bp->dev); /* prevent tx timeout */
b6016b76
MC
673}
674
675static void
212f9934 676bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
b6016b76
MC
677{
678 if (atomic_dec_and_test(&bp->intr_sem)) {
679 if (netif_running(bp->dev)) {
706bf240 680 netif_tx_wake_all_queues(bp->dev);
a0ba6760
MC
681 spin_lock_bh(&bp->phy_lock);
682 if (bp->link_up)
683 netif_carrier_on(bp->dev);
684 spin_unlock_bh(&bp->phy_lock);
35efa7c1 685 bnx2_napi_enable(bp);
b6016b76 686 bnx2_enable_int(bp);
212f9934
MC
687 if (start_cnic)
688 bnx2_cnic_start(bp);
b6016b76
MC
689 }
690 }
691}
692
35e9010b
MC
693static void
694bnx2_free_tx_mem(struct bnx2 *bp)
695{
696 int i;
697
698 for (i = 0; i < bp->num_tx_rings; i++) {
699 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701
702 if (txr->tx_desc_ring) {
36227e88
SG
703 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704 txr->tx_desc_ring,
705 txr->tx_desc_mapping);
35e9010b
MC
706 txr->tx_desc_ring = NULL;
707 }
708 kfree(txr->tx_buf_ring);
709 txr->tx_buf_ring = NULL;
710 }
711}
712
bb4f98ab
MC
713static void
714bnx2_free_rx_mem(struct bnx2 *bp)
715{
716 int i;
717
718 for (i = 0; i < bp->num_rx_rings; i++) {
719 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721 int j;
722
723 for (j = 0; j < bp->rx_max_ring; j++) {
724 if (rxr->rx_desc_ring[j])
36227e88
SG
725 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726 rxr->rx_desc_ring[j],
727 rxr->rx_desc_mapping[j]);
bb4f98ab
MC
728 rxr->rx_desc_ring[j] = NULL;
729 }
25b0b999 730 vfree(rxr->rx_buf_ring);
bb4f98ab
MC
731 rxr->rx_buf_ring = NULL;
732
733 for (j = 0; j < bp->rx_max_pg_ring; j++) {
734 if (rxr->rx_pg_desc_ring[j])
36227e88
SG
735 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736 rxr->rx_pg_desc_ring[j],
737 rxr->rx_pg_desc_mapping[j]);
3298a738 738 rxr->rx_pg_desc_ring[j] = NULL;
bb4f98ab 739 }
25b0b999 740 vfree(rxr->rx_pg_ring);
bb4f98ab
MC
741 rxr->rx_pg_ring = NULL;
742 }
743}
744
35e9010b
MC
745static int
746bnx2_alloc_tx_mem(struct bnx2 *bp)
747{
748 int i;
749
750 for (i = 0; i < bp->num_tx_rings; i++) {
751 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753
754 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755 if (txr->tx_buf_ring == NULL)
756 return -ENOMEM;
757
758 txr->tx_desc_ring =
36227e88
SG
759 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760 &txr->tx_desc_mapping, GFP_KERNEL);
35e9010b
MC
761 if (txr->tx_desc_ring == NULL)
762 return -ENOMEM;
763 }
764 return 0;
765}
766
bb4f98ab
MC
767static int
768bnx2_alloc_rx_mem(struct bnx2 *bp)
769{
770 int i;
771
772 for (i = 0; i < bp->num_rx_rings; i++) {
773 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775 int j;
776
777 rxr->rx_buf_ring =
89bf67f1 778 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
bb4f98ab
MC
779 if (rxr->rx_buf_ring == NULL)
780 return -ENOMEM;
781
bb4f98ab
MC
782 for (j = 0; j < bp->rx_max_ring; j++) {
783 rxr->rx_desc_ring[j] =
36227e88
SG
784 dma_alloc_coherent(&bp->pdev->dev,
785 RXBD_RING_SIZE,
786 &rxr->rx_desc_mapping[j],
787 GFP_KERNEL);
bb4f98ab
MC
788 if (rxr->rx_desc_ring[j] == NULL)
789 return -ENOMEM;
790
791 }
792
793 if (bp->rx_pg_ring_size) {
89bf67f1 794 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
bb4f98ab
MC
795 bp->rx_max_pg_ring);
796 if (rxr->rx_pg_ring == NULL)
797 return -ENOMEM;
798
bb4f98ab
MC
799 }
800
801 for (j = 0; j < bp->rx_max_pg_ring; j++) {
802 rxr->rx_pg_desc_ring[j] =
36227e88
SG
803 dma_alloc_coherent(&bp->pdev->dev,
804 RXBD_RING_SIZE,
805 &rxr->rx_pg_desc_mapping[j],
806 GFP_KERNEL);
bb4f98ab
MC
807 if (rxr->rx_pg_desc_ring[j] == NULL)
808 return -ENOMEM;
809
810 }
811 }
812 return 0;
813}
814
b6016b76
MC
815static void
816bnx2_free_mem(struct bnx2 *bp)
817{
13daffa2 818 int i;
43e80b89 819 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
13daffa2 820
35e9010b 821 bnx2_free_tx_mem(bp);
bb4f98ab 822 bnx2_free_rx_mem(bp);
35e9010b 823
59b47d8a
MC
824 for (i = 0; i < bp->ctx_pages; i++) {
825 if (bp->ctx_blk[i]) {
36227e88
SG
826 dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
827 bp->ctx_blk[i],
828 bp->ctx_blk_mapping[i]);
59b47d8a
MC
829 bp->ctx_blk[i] = NULL;
830 }
831 }
43e80b89 832 if (bnapi->status_blk.msi) {
36227e88
SG
833 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834 bnapi->status_blk.msi,
835 bp->status_blk_mapping);
43e80b89 836 bnapi->status_blk.msi = NULL;
0f31f994 837 bp->stats_blk = NULL;
b6016b76 838 }
b6016b76
MC
839}
840
841static int
842bnx2_alloc_mem(struct bnx2 *bp)
843{
35e9010b 844 int i, status_blk_size, err;
43e80b89
MC
845 struct bnx2_napi *bnapi;
846 void *status_blk;
b6016b76 847
0f31f994
MC
848 /* Combine status and statistics blocks into one allocation. */
849 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
f86e82fb 850 if (bp->flags & BNX2_FLAG_MSIX_CAP)
b4b36042
MC
851 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852 BNX2_SBLK_MSIX_ALIGN_SIZE);
0f31f994
MC
853 bp->status_stats_size = status_blk_size +
854 sizeof(struct statistics_block);
855
36227e88
SG
856 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 &bp->status_blk_mapping, GFP_KERNEL);
43e80b89 858 if (status_blk == NULL)
b6016b76
MC
859 goto alloc_mem_err;
860
43e80b89 861 memset(status_blk, 0, bp->status_stats_size);
b6016b76 862
43e80b89
MC
863 bnapi = &bp->bnx2_napi[0];
864 bnapi->status_blk.msi = status_blk;
865 bnapi->hw_tx_cons_ptr =
866 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
867 bnapi->hw_rx_cons_ptr =
868 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
f86e82fb 869 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
379b39a2 870 for (i = 1; i < bp->irq_nvecs; i++) {
43e80b89
MC
871 struct status_block_msix *sblk;
872
873 bnapi = &bp->bnx2_napi[i];
b4b36042 874
43e80b89
MC
875 sblk = (void *) (status_blk +
876 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
877 bnapi->status_blk.msix = sblk;
878 bnapi->hw_tx_cons_ptr =
879 &sblk->status_tx_quick_consumer_index;
880 bnapi->hw_rx_cons_ptr =
881 &sblk->status_rx_quick_consumer_index;
b4b36042
MC
882 bnapi->int_num = i << 24;
883 }
884 }
35efa7c1 885
43e80b89 886 bp->stats_blk = status_blk + status_blk_size;
b6016b76 887
0f31f994 888 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 889
59b47d8a
MC
890 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
891 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
892 if (bp->ctx_pages == 0)
893 bp->ctx_pages = 1;
894 for (i = 0; i < bp->ctx_pages; i++) {
36227e88 895 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
59b47d8a 896 BCM_PAGE_SIZE,
36227e88
SG
897 &bp->ctx_blk_mapping[i],
898 GFP_KERNEL);
59b47d8a
MC
899 if (bp->ctx_blk[i] == NULL)
900 goto alloc_mem_err;
901 }
902 }
35e9010b 903
bb4f98ab
MC
904 err = bnx2_alloc_rx_mem(bp);
905 if (err)
906 goto alloc_mem_err;
907
35e9010b
MC
908 err = bnx2_alloc_tx_mem(bp);
909 if (err)
910 goto alloc_mem_err;
911
b6016b76
MC
912 return 0;
913
914alloc_mem_err:
915 bnx2_free_mem(bp);
916 return -ENOMEM;
917}
918
e3648b3d
MC
919static void
920bnx2_report_fw_link(struct bnx2 *bp)
921{
922 u32 fw_link_status = 0;
923
583c28e5 924 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
925 return;
926
e3648b3d
MC
927 if (bp->link_up) {
928 u32 bmsr;
929
930 switch (bp->line_speed) {
931 case SPEED_10:
932 if (bp->duplex == DUPLEX_HALF)
933 fw_link_status = BNX2_LINK_STATUS_10HALF;
934 else
935 fw_link_status = BNX2_LINK_STATUS_10FULL;
936 break;
937 case SPEED_100:
938 if (bp->duplex == DUPLEX_HALF)
939 fw_link_status = BNX2_LINK_STATUS_100HALF;
940 else
941 fw_link_status = BNX2_LINK_STATUS_100FULL;
942 break;
943 case SPEED_1000:
944 if (bp->duplex == DUPLEX_HALF)
945 fw_link_status = BNX2_LINK_STATUS_1000HALF;
946 else
947 fw_link_status = BNX2_LINK_STATUS_1000FULL;
948 break;
949 case SPEED_2500:
950 if (bp->duplex == DUPLEX_HALF)
951 fw_link_status = BNX2_LINK_STATUS_2500HALF;
952 else
953 fw_link_status = BNX2_LINK_STATUS_2500FULL;
954 break;
955 }
956
957 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
958
959 if (bp->autoneg) {
960 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
961
ca58c3af
MC
962 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
964
965 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583c28e5 966 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
e3648b3d
MC
967 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
968 else
969 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
970 }
971 }
972 else
973 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
974
2726d6e1 975 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
e3648b3d
MC
976}
977
9b1084b8
MC
978static char *
979bnx2_xceiver_str(struct bnx2 *bp)
980{
807540ba 981 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
583c28e5 982 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
807540ba 983 "Copper");
9b1084b8
MC
984}
985
b6016b76
MC
986static void
987bnx2_report_link(struct bnx2 *bp)
988{
989 if (bp->link_up) {
990 netif_carrier_on(bp->dev);
3a9c6a49
JP
991 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
992 bnx2_xceiver_str(bp),
993 bp->line_speed,
994 bp->duplex == DUPLEX_FULL ? "full" : "half");
b6016b76
MC
995
996 if (bp->flow_ctrl) {
997 if (bp->flow_ctrl & FLOW_CTRL_RX) {
3a9c6a49 998 pr_cont(", receive ");
b6016b76 999 if (bp->flow_ctrl & FLOW_CTRL_TX)
3a9c6a49 1000 pr_cont("& transmit ");
b6016b76
MC
1001 }
1002 else {
3a9c6a49 1003 pr_cont(", transmit ");
b6016b76 1004 }
3a9c6a49 1005 pr_cont("flow control ON");
b6016b76 1006 }
3a9c6a49
JP
1007 pr_cont("\n");
1008 } else {
b6016b76 1009 netif_carrier_off(bp->dev);
3a9c6a49
JP
1010 netdev_err(bp->dev, "NIC %s Link is Down\n",
1011 bnx2_xceiver_str(bp));
b6016b76 1012 }
e3648b3d
MC
1013
1014 bnx2_report_fw_link(bp);
b6016b76
MC
1015}
1016
1017static void
1018bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1019{
1020 u32 local_adv, remote_adv;
1021
1022 bp->flow_ctrl = 0;
6aa20a22 1023 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
1024 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1025
1026 if (bp->duplex == DUPLEX_FULL) {
1027 bp->flow_ctrl = bp->req_flow_ctrl;
1028 }
1029 return;
1030 }
1031
1032 if (bp->duplex != DUPLEX_FULL) {
1033 return;
1034 }
1035
583c28e5 1036 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
5b0c76ad
MC
1037 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1038 u32 val;
1039
1040 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1041 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1042 bp->flow_ctrl |= FLOW_CTRL_TX;
1043 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1044 bp->flow_ctrl |= FLOW_CTRL_RX;
1045 return;
1046 }
1047
ca58c3af
MC
1048 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1049 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76 1050
583c28e5 1051 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1052 u32 new_local_adv = 0;
1053 u32 new_remote_adv = 0;
1054
1055 if (local_adv & ADVERTISE_1000XPAUSE)
1056 new_local_adv |= ADVERTISE_PAUSE_CAP;
1057 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1058 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1059 if (remote_adv & ADVERTISE_1000XPAUSE)
1060 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1061 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1062 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1063
1064 local_adv = new_local_adv;
1065 remote_adv = new_remote_adv;
1066 }
1067
1068 /* See Table 28B-3 of 802.3ab-1999 spec. */
1069 if (local_adv & ADVERTISE_PAUSE_CAP) {
1070 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1071 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1072 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1073 }
1074 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1075 bp->flow_ctrl = FLOW_CTRL_RX;
1076 }
1077 }
1078 else {
1079 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1080 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1081 }
1082 }
1083 }
1084 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1085 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1086 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1087
1088 bp->flow_ctrl = FLOW_CTRL_TX;
1089 }
1090 }
1091}
1092
27a005b8
MC
1093static int
1094bnx2_5709s_linkup(struct bnx2 *bp)
1095{
1096 u32 val, speed;
1097
1098 bp->link_up = 1;
1099
1100 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1101 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1102 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1103
1104 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1105 bp->line_speed = bp->req_line_speed;
1106 bp->duplex = bp->req_duplex;
1107 return 0;
1108 }
1109 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1110 switch (speed) {
1111 case MII_BNX2_GP_TOP_AN_SPEED_10:
1112 bp->line_speed = SPEED_10;
1113 break;
1114 case MII_BNX2_GP_TOP_AN_SPEED_100:
1115 bp->line_speed = SPEED_100;
1116 break;
1117 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1118 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1119 bp->line_speed = SPEED_1000;
1120 break;
1121 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1122 bp->line_speed = SPEED_2500;
1123 break;
1124 }
1125 if (val & MII_BNX2_GP_TOP_AN_FD)
1126 bp->duplex = DUPLEX_FULL;
1127 else
1128 bp->duplex = DUPLEX_HALF;
1129 return 0;
1130}
1131
b6016b76 1132static int
5b0c76ad
MC
1133bnx2_5708s_linkup(struct bnx2 *bp)
1134{
1135 u32 val;
1136
1137 bp->link_up = 1;
1138 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1139 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1140 case BCM5708S_1000X_STAT1_SPEED_10:
1141 bp->line_speed = SPEED_10;
1142 break;
1143 case BCM5708S_1000X_STAT1_SPEED_100:
1144 bp->line_speed = SPEED_100;
1145 break;
1146 case BCM5708S_1000X_STAT1_SPEED_1G:
1147 bp->line_speed = SPEED_1000;
1148 break;
1149 case BCM5708S_1000X_STAT1_SPEED_2G5:
1150 bp->line_speed = SPEED_2500;
1151 break;
1152 }
1153 if (val & BCM5708S_1000X_STAT1_FD)
1154 bp->duplex = DUPLEX_FULL;
1155 else
1156 bp->duplex = DUPLEX_HALF;
1157
1158 return 0;
1159}
1160
1161static int
1162bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
1163{
1164 u32 bmcr, local_adv, remote_adv, common;
1165
1166 bp->link_up = 1;
1167 bp->line_speed = SPEED_1000;
1168
ca58c3af 1169 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1170 if (bmcr & BMCR_FULLDPLX) {
1171 bp->duplex = DUPLEX_FULL;
1172 }
1173 else {
1174 bp->duplex = DUPLEX_HALF;
1175 }
1176
1177 if (!(bmcr & BMCR_ANENABLE)) {
1178 return 0;
1179 }
1180
ca58c3af
MC
1181 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1182 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1183
1184 common = local_adv & remote_adv;
1185 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1186
1187 if (common & ADVERTISE_1000XFULL) {
1188 bp->duplex = DUPLEX_FULL;
1189 }
1190 else {
1191 bp->duplex = DUPLEX_HALF;
1192 }
1193 }
1194
1195 return 0;
1196}
1197
1198static int
1199bnx2_copper_linkup(struct bnx2 *bp)
1200{
1201 u32 bmcr;
1202
ca58c3af 1203 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1204 if (bmcr & BMCR_ANENABLE) {
1205 u32 local_adv, remote_adv, common;
1206
1207 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1208 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1209
1210 common = local_adv & (remote_adv >> 2);
1211 if (common & ADVERTISE_1000FULL) {
1212 bp->line_speed = SPEED_1000;
1213 bp->duplex = DUPLEX_FULL;
1214 }
1215 else if (common & ADVERTISE_1000HALF) {
1216 bp->line_speed = SPEED_1000;
1217 bp->duplex = DUPLEX_HALF;
1218 }
1219 else {
ca58c3af
MC
1220 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1221 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1222
1223 common = local_adv & remote_adv;
1224 if (common & ADVERTISE_100FULL) {
1225 bp->line_speed = SPEED_100;
1226 bp->duplex = DUPLEX_FULL;
1227 }
1228 else if (common & ADVERTISE_100HALF) {
1229 bp->line_speed = SPEED_100;
1230 bp->duplex = DUPLEX_HALF;
1231 }
1232 else if (common & ADVERTISE_10FULL) {
1233 bp->line_speed = SPEED_10;
1234 bp->duplex = DUPLEX_FULL;
1235 }
1236 else if (common & ADVERTISE_10HALF) {
1237 bp->line_speed = SPEED_10;
1238 bp->duplex = DUPLEX_HALF;
1239 }
1240 else {
1241 bp->line_speed = 0;
1242 bp->link_up = 0;
1243 }
1244 }
1245 }
1246 else {
1247 if (bmcr & BMCR_SPEED100) {
1248 bp->line_speed = SPEED_100;
1249 }
1250 else {
1251 bp->line_speed = SPEED_10;
1252 }
1253 if (bmcr & BMCR_FULLDPLX) {
1254 bp->duplex = DUPLEX_FULL;
1255 }
1256 else {
1257 bp->duplex = DUPLEX_HALF;
1258 }
1259 }
1260
1261 return 0;
1262}
1263
83e3fc89 1264static void
bb4f98ab 1265bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
83e3fc89 1266{
bb4f98ab 1267 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
83e3fc89
MC
1268
1269 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1270 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1271 val |= 0x02 << 8;
1272
22fa159d
MC
1273 if (bp->flow_ctrl & FLOW_CTRL_TX)
1274 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
83e3fc89 1275
83e3fc89
MC
1276 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1277}
1278
bb4f98ab
MC
1279static void
1280bnx2_init_all_rx_contexts(struct bnx2 *bp)
1281{
1282 int i;
1283 u32 cid;
1284
1285 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1286 if (i == 1)
1287 cid = RX_RSS_CID;
1288 bnx2_init_rx_context(bp, cid);
1289 }
1290}
1291
344478db 1292static void
b6016b76
MC
1293bnx2_set_mac_link(struct bnx2 *bp)
1294{
1295 u32 val;
1296
1297 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1298 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1299 (bp->duplex == DUPLEX_HALF)) {
1300 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1301 }
1302
1303 /* Configure the EMAC mode register. */
1304 val = REG_RD(bp, BNX2_EMAC_MODE);
1305
1306 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 1307 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1308 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
1309
1310 if (bp->link_up) {
5b0c76ad
MC
1311 switch (bp->line_speed) {
1312 case SPEED_10:
59b47d8a
MC
1313 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1314 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
1315 break;
1316 }
1317 /* fall through */
1318 case SPEED_100:
1319 val |= BNX2_EMAC_MODE_PORT_MII;
1320 break;
1321 case SPEED_2500:
59b47d8a 1322 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
1323 /* fall through */
1324 case SPEED_1000:
1325 val |= BNX2_EMAC_MODE_PORT_GMII;
1326 break;
1327 }
b6016b76
MC
1328 }
1329 else {
1330 val |= BNX2_EMAC_MODE_PORT_GMII;
1331 }
1332
1333 /* Set the MAC to operate in the appropriate duplex mode. */
1334 if (bp->duplex == DUPLEX_HALF)
1335 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1336 REG_WR(bp, BNX2_EMAC_MODE, val);
1337
1338 /* Enable/disable rx PAUSE. */
1339 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1340
1341 if (bp->flow_ctrl & FLOW_CTRL_RX)
1342 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1343 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1344
1345 /* Enable/disable tx PAUSE. */
1346 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1347 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1348
1349 if (bp->flow_ctrl & FLOW_CTRL_TX)
1350 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1351 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1352
1353 /* Acknowledge the interrupt. */
1354 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1355
22fa159d 1356 bnx2_init_all_rx_contexts(bp);
b6016b76
MC
1357}
1358
27a005b8
MC
1359static void
1360bnx2_enable_bmsr1(struct bnx2 *bp)
1361{
583c28e5 1362 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1363 (CHIP_NUM(bp) == CHIP_NUM_5709))
1364 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1365 MII_BNX2_BLK_ADDR_GP_STATUS);
1366}
1367
1368static void
1369bnx2_disable_bmsr1(struct bnx2 *bp)
1370{
583c28e5 1371 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1372 (CHIP_NUM(bp) == CHIP_NUM_5709))
1373 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1374 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1375}
1376
605a9e20
MC
1377static int
1378bnx2_test_and_enable_2g5(struct bnx2 *bp)
1379{
1380 u32 up1;
1381 int ret = 1;
1382
583c28e5 1383 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1384 return 0;
1385
1386 if (bp->autoneg & AUTONEG_SPEED)
1387 bp->advertising |= ADVERTISED_2500baseX_Full;
1388
27a005b8
MC
1389 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1390 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1391
605a9e20
MC
1392 bnx2_read_phy(bp, bp->mii_up1, &up1);
1393 if (!(up1 & BCM5708S_UP1_2G5)) {
1394 up1 |= BCM5708S_UP1_2G5;
1395 bnx2_write_phy(bp, bp->mii_up1, up1);
1396 ret = 0;
1397 }
1398
27a005b8
MC
1399 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1400 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1401 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1402
605a9e20
MC
1403 return ret;
1404}
1405
1406static int
1407bnx2_test_and_disable_2g5(struct bnx2 *bp)
1408{
1409 u32 up1;
1410 int ret = 0;
1411
583c28e5 1412 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1413 return 0;
1414
27a005b8
MC
1415 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1416 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1417
605a9e20
MC
1418 bnx2_read_phy(bp, bp->mii_up1, &up1);
1419 if (up1 & BCM5708S_UP1_2G5) {
1420 up1 &= ~BCM5708S_UP1_2G5;
1421 bnx2_write_phy(bp, bp->mii_up1, up1);
1422 ret = 1;
1423 }
1424
27a005b8
MC
1425 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1426 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1427 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1428
605a9e20
MC
1429 return ret;
1430}
1431
1432static void
1433bnx2_enable_forced_2g5(struct bnx2 *bp)
1434{
cbd6890c
MC
1435 u32 uninitialized_var(bmcr);
1436 int err;
605a9e20 1437
583c28e5 1438 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1439 return;
1440
27a005b8
MC
1441 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1442 u32 val;
1443
1444 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1445 MII_BNX2_BLK_ADDR_SERDES_DIG);
cbd6890c
MC
1446 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1447 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1448 val |= MII_BNX2_SD_MISC1_FORCE |
1449 MII_BNX2_SD_MISC1_FORCE_2_5G;
1450 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1451 }
27a005b8
MC
1452
1453 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1454 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
cbd6890c 1455 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
27a005b8
MC
1456
1457 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
cbd6890c
MC
1458 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1459 if (!err)
1460 bmcr |= BCM5708S_BMCR_FORCE_2500;
c7079857
ED
1461 } else {
1462 return;
605a9e20
MC
1463 }
1464
cbd6890c
MC
1465 if (err)
1466 return;
1467
605a9e20
MC
1468 if (bp->autoneg & AUTONEG_SPEED) {
1469 bmcr &= ~BMCR_ANENABLE;
1470 if (bp->req_duplex == DUPLEX_FULL)
1471 bmcr |= BMCR_FULLDPLX;
1472 }
1473 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1474}
1475
1476static void
1477bnx2_disable_forced_2g5(struct bnx2 *bp)
1478{
cbd6890c
MC
1479 u32 uninitialized_var(bmcr);
1480 int err;
605a9e20 1481
583c28e5 1482 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1483 return;
1484
27a005b8
MC
1485 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1486 u32 val;
1487
1488 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1489 MII_BNX2_BLK_ADDR_SERDES_DIG);
cbd6890c
MC
1490 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1491 val &= ~MII_BNX2_SD_MISC1_FORCE;
1492 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1493 }
27a005b8
MC
1494
1495 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1496 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
cbd6890c 1497 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
27a005b8
MC
1498
1499 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
cbd6890c
MC
1500 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501 if (!err)
1502 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
c7079857
ED
1503 } else {
1504 return;
605a9e20
MC
1505 }
1506
cbd6890c
MC
1507 if (err)
1508 return;
1509
605a9e20
MC
1510 if (bp->autoneg & AUTONEG_SPEED)
1511 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1512 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1513}
1514
b2fadeae
MC
1515static void
1516bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1517{
1518 u32 val;
1519
1520 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1521 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1522 if (start)
1523 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1524 else
1525 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1526}
1527
b6016b76
MC
1528static int
1529bnx2_set_link(struct bnx2 *bp)
1530{
1531 u32 bmsr;
1532 u8 link_up;
1533
80be4434 1534 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1535 bp->link_up = 1;
1536 return 0;
1537 }
1538
583c28e5 1539 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1540 return 0;
1541
b6016b76
MC
1542 link_up = bp->link_up;
1543
27a005b8
MC
1544 bnx2_enable_bmsr1(bp);
1545 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1547 bnx2_disable_bmsr1(bp);
b6016b76 1548
583c28e5 1549 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
b6016b76 1550 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
a2724e25 1551 u32 val, an_dbg;
b6016b76 1552
583c28e5 1553 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
b2fadeae 1554 bnx2_5706s_force_link_dn(bp, 0);
583c28e5 1555 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
b2fadeae 1556 }
b6016b76 1557 val = REG_RD(bp, BNX2_EMAC_STATUS);
a2724e25
MC
1558
1559 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1562
1563 if ((val & BNX2_EMAC_STATUS_LINK) &&
1564 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
b6016b76
MC
1565 bmsr |= BMSR_LSTATUS;
1566 else
1567 bmsr &= ~BMSR_LSTATUS;
1568 }
1569
1570 if (bmsr & BMSR_LSTATUS) {
1571 bp->link_up = 1;
1572
583c28e5 1573 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad
MC
1574 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1575 bnx2_5706s_linkup(bp);
1576 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1577 bnx2_5708s_linkup(bp);
27a005b8
MC
1578 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1579 bnx2_5709s_linkup(bp);
b6016b76
MC
1580 }
1581 else {
1582 bnx2_copper_linkup(bp);
1583 }
1584 bnx2_resolve_flow_ctrl(bp);
1585 }
1586 else {
583c28e5 1587 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
605a9e20
MC
1588 (bp->autoneg & AUTONEG_SPEED))
1589 bnx2_disable_forced_2g5(bp);
b6016b76 1590
583c28e5 1591 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
b2fadeae
MC
1592 u32 bmcr;
1593
1594 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1595 bmcr |= BMCR_ANENABLE;
1596 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1597
583c28e5 1598 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b2fadeae 1599 }
b6016b76
MC
1600 bp->link_up = 0;
1601 }
1602
1603 if (bp->link_up != link_up) {
1604 bnx2_report_link(bp);
1605 }
1606
1607 bnx2_set_mac_link(bp);
1608
1609 return 0;
1610}
1611
1612static int
1613bnx2_reset_phy(struct bnx2 *bp)
1614{
1615 int i;
1616 u32 reg;
1617
ca58c3af 1618 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1619
1620#define PHY_RESET_MAX_WAIT 100
1621 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1622 udelay(10);
1623
ca58c3af 1624 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1625 if (!(reg & BMCR_RESET)) {
1626 udelay(20);
1627 break;
1628 }
1629 }
1630 if (i == PHY_RESET_MAX_WAIT) {
1631 return -EBUSY;
1632 }
1633 return 0;
1634}
1635
1636static u32
1637bnx2_phy_get_pause_adv(struct bnx2 *bp)
1638{
1639 u32 adv = 0;
1640
1641 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1642 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1643
583c28e5 1644 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1645 adv = ADVERTISE_1000XPAUSE;
1646 }
1647 else {
1648 adv = ADVERTISE_PAUSE_CAP;
1649 }
1650 }
1651 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
583c28e5 1652 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1653 adv = ADVERTISE_1000XPSE_ASYM;
1654 }
1655 else {
1656 adv = ADVERTISE_PAUSE_ASYM;
1657 }
1658 }
1659 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
583c28e5 1660 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1661 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1662 }
1663 else {
1664 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1665 }
1666 }
1667 return adv;
1668}
1669
a2f13890 1670static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
0d8a6571 1671
b6016b76 1672static int
0d8a6571 1673bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1674__releases(&bp->phy_lock)
1675__acquires(&bp->phy_lock)
0d8a6571
MC
1676{
1677 u32 speed_arg = 0, pause_adv;
1678
1679 pause_adv = bnx2_phy_get_pause_adv(bp);
1680
1681 if (bp->autoneg & AUTONEG_SPEED) {
1682 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1683 if (bp->advertising & ADVERTISED_10baseT_Half)
1684 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1685 if (bp->advertising & ADVERTISED_10baseT_Full)
1686 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1687 if (bp->advertising & ADVERTISED_100baseT_Half)
1688 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1689 if (bp->advertising & ADVERTISED_100baseT_Full)
1690 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1691 if (bp->advertising & ADVERTISED_1000baseT_Full)
1692 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693 if (bp->advertising & ADVERTISED_2500baseX_Full)
1694 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1695 } else {
1696 if (bp->req_line_speed == SPEED_2500)
1697 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1698 else if (bp->req_line_speed == SPEED_1000)
1699 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1700 else if (bp->req_line_speed == SPEED_100) {
1701 if (bp->req_duplex == DUPLEX_FULL)
1702 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1703 else
1704 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1705 } else if (bp->req_line_speed == SPEED_10) {
1706 if (bp->req_duplex == DUPLEX_FULL)
1707 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1708 else
1709 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1710 }
1711 }
1712
1713 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1714 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
c26736ec 1715 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
0d8a6571
MC
1716 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1717
1718 if (port == PORT_TP)
1719 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1720 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1721
2726d6e1 1722 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
0d8a6571
MC
1723
1724 spin_unlock_bh(&bp->phy_lock);
a2f13890 1725 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
0d8a6571
MC
1726 spin_lock_bh(&bp->phy_lock);
1727
1728 return 0;
1729}
1730
1731static int
1732bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1733__releases(&bp->phy_lock)
1734__acquires(&bp->phy_lock)
b6016b76 1735{
605a9e20 1736 u32 adv, bmcr;
b6016b76
MC
1737 u32 new_adv = 0;
1738
583c28e5 1739 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
807540ba 1740 return bnx2_setup_remote_phy(bp, port);
0d8a6571 1741
b6016b76
MC
1742 if (!(bp->autoneg & AUTONEG_SPEED)) {
1743 u32 new_bmcr;
5b0c76ad
MC
1744 int force_link_down = 0;
1745
605a9e20
MC
1746 if (bp->req_line_speed == SPEED_2500) {
1747 if (!bnx2_test_and_enable_2g5(bp))
1748 force_link_down = 1;
1749 } else if (bp->req_line_speed == SPEED_1000) {
1750 if (bnx2_test_and_disable_2g5(bp))
1751 force_link_down = 1;
1752 }
ca58c3af 1753 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1754 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1755
ca58c3af 1756 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1757 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1758 new_bmcr |= BMCR_SPEED1000;
605a9e20 1759
27a005b8
MC
1760 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1761 if (bp->req_line_speed == SPEED_2500)
1762 bnx2_enable_forced_2g5(bp);
1763 else if (bp->req_line_speed == SPEED_1000) {
1764 bnx2_disable_forced_2g5(bp);
1765 new_bmcr &= ~0x2000;
1766 }
1767
1768 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1769 if (bp->req_line_speed == SPEED_2500)
1770 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771 else
1772 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1773 }
1774
b6016b76 1775 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1776 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1777 new_bmcr |= BMCR_FULLDPLX;
1778 }
1779 else {
5b0c76ad 1780 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1781 new_bmcr &= ~BMCR_FULLDPLX;
1782 }
5b0c76ad 1783 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1784 /* Force a link down visible on the other side */
1785 if (bp->link_up) {
ca58c3af 1786 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1787 ~(ADVERTISE_1000XFULL |
1788 ADVERTISE_1000XHALF));
ca58c3af 1789 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1790 BMCR_ANRESTART | BMCR_ANENABLE);
1791
1792 bp->link_up = 0;
1793 netif_carrier_off(bp->dev);
ca58c3af 1794 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1795 bnx2_report_link(bp);
b6016b76 1796 }
ca58c3af
MC
1797 bnx2_write_phy(bp, bp->mii_adv, adv);
1798 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1799 } else {
1800 bnx2_resolve_flow_ctrl(bp);
1801 bnx2_set_mac_link(bp);
b6016b76
MC
1802 }
1803 return 0;
1804 }
1805
605a9e20 1806 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1807
b6016b76
MC
1808 if (bp->advertising & ADVERTISED_1000baseT_Full)
1809 new_adv |= ADVERTISE_1000XFULL;
1810
1811 new_adv |= bnx2_phy_get_pause_adv(bp);
1812
ca58c3af
MC
1813 bnx2_read_phy(bp, bp->mii_adv, &adv);
1814 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1815
1816 bp->serdes_an_pending = 0;
1817 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1818 /* Force a link down visible on the other side */
1819 if (bp->link_up) {
ca58c3af 1820 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1821 spin_unlock_bh(&bp->phy_lock);
1822 msleep(20);
1823 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1824 }
1825
ca58c3af
MC
1826 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1827 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1828 BMCR_ANENABLE);
f8dd064e
MC
1829 /* Speed up link-up time when the link partner
1830 * does not autonegotiate which is very common
1831 * in blade servers. Some blade servers use
1832 * IPMI for kerboard input and it's important
1833 * to minimize link disruptions. Autoneg. involves
1834 * exchanging base pages plus 3 next pages and
1835 * normally completes in about 120 msec.
1836 */
40105c0b 1837 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
1838 bp->serdes_an_pending = 1;
1839 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1840 } else {
1841 bnx2_resolve_flow_ctrl(bp);
1842 bnx2_set_mac_link(bp);
b6016b76
MC
1843 }
1844
1845 return 0;
1846}
1847
1848#define ETHTOOL_ALL_FIBRE_SPEED \
583c28e5 1849 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
deaf391b
MC
1850 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1851 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1852
1853#define ETHTOOL_ALL_COPPER_SPEED \
1854 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1855 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1856 ADVERTISED_1000baseT_Full)
1857
1858#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1859 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1860
b6016b76
MC
1861#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1862
0d8a6571
MC
1863static void
1864bnx2_set_default_remote_link(struct bnx2 *bp)
1865{
1866 u32 link;
1867
1868 if (bp->phy_port == PORT_TP)
2726d6e1 1869 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
0d8a6571 1870 else
2726d6e1 1871 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
0d8a6571
MC
1872
1873 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1874 bp->req_line_speed = 0;
1875 bp->autoneg |= AUTONEG_SPEED;
1876 bp->advertising = ADVERTISED_Autoneg;
1877 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1878 bp->advertising |= ADVERTISED_10baseT_Half;
1879 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1880 bp->advertising |= ADVERTISED_10baseT_Full;
1881 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1882 bp->advertising |= ADVERTISED_100baseT_Half;
1883 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1884 bp->advertising |= ADVERTISED_100baseT_Full;
1885 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1886 bp->advertising |= ADVERTISED_1000baseT_Full;
1887 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1888 bp->advertising |= ADVERTISED_2500baseX_Full;
1889 } else {
1890 bp->autoneg = 0;
1891 bp->advertising = 0;
1892 bp->req_duplex = DUPLEX_FULL;
1893 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1894 bp->req_line_speed = SPEED_10;
1895 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1896 bp->req_duplex = DUPLEX_HALF;
1897 }
1898 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1899 bp->req_line_speed = SPEED_100;
1900 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1901 bp->req_duplex = DUPLEX_HALF;
1902 }
1903 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1904 bp->req_line_speed = SPEED_1000;
1905 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1906 bp->req_line_speed = SPEED_2500;
1907 }
1908}
1909
deaf391b
MC
1910static void
1911bnx2_set_default_link(struct bnx2 *bp)
1912{
ab59859d
HH
1913 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1914 bnx2_set_default_remote_link(bp);
1915 return;
1916 }
0d8a6571 1917
deaf391b
MC
1918 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1919 bp->req_line_speed = 0;
583c28e5 1920 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
deaf391b
MC
1921 u32 reg;
1922
1923 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1924
2726d6e1 1925 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
deaf391b
MC
1926 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1927 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1928 bp->autoneg = 0;
1929 bp->req_line_speed = bp->line_speed = SPEED_1000;
1930 bp->req_duplex = DUPLEX_FULL;
1931 }
1932 } else
1933 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1934}
1935
df149d70
MC
1936static void
1937bnx2_send_heart_beat(struct bnx2 *bp)
1938{
1939 u32 msg;
1940 u32 addr;
1941
1942 spin_lock(&bp->indirect_lock);
1943 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1944 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1945 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1946 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947 spin_unlock(&bp->indirect_lock);
1948}
1949
0d8a6571
MC
1950static void
1951bnx2_remote_phy_event(struct bnx2 *bp)
1952{
1953 u32 msg;
1954 u8 link_up = bp->link_up;
1955 u8 old_port;
1956
2726d6e1 1957 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
0d8a6571 1958
df149d70
MC
1959 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1960 bnx2_send_heart_beat(bp);
1961
1962 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1963
0d8a6571
MC
1964 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1965 bp->link_up = 0;
1966 else {
1967 u32 speed;
1968
1969 bp->link_up = 1;
1970 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1971 bp->duplex = DUPLEX_FULL;
1972 switch (speed) {
1973 case BNX2_LINK_STATUS_10HALF:
1974 bp->duplex = DUPLEX_HALF;
1975 case BNX2_LINK_STATUS_10FULL:
1976 bp->line_speed = SPEED_10;
1977 break;
1978 case BNX2_LINK_STATUS_100HALF:
1979 bp->duplex = DUPLEX_HALF;
1980 case BNX2_LINK_STATUS_100BASE_T4:
1981 case BNX2_LINK_STATUS_100FULL:
1982 bp->line_speed = SPEED_100;
1983 break;
1984 case BNX2_LINK_STATUS_1000HALF:
1985 bp->duplex = DUPLEX_HALF;
1986 case BNX2_LINK_STATUS_1000FULL:
1987 bp->line_speed = SPEED_1000;
1988 break;
1989 case BNX2_LINK_STATUS_2500HALF:
1990 bp->duplex = DUPLEX_HALF;
1991 case BNX2_LINK_STATUS_2500FULL:
1992 bp->line_speed = SPEED_2500;
1993 break;
1994 default:
1995 bp->line_speed = 0;
1996 break;
1997 }
1998
0d8a6571
MC
1999 bp->flow_ctrl = 0;
2000 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2001 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2002 if (bp->duplex == DUPLEX_FULL)
2003 bp->flow_ctrl = bp->req_flow_ctrl;
2004 } else {
2005 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2006 bp->flow_ctrl |= FLOW_CTRL_TX;
2007 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2008 bp->flow_ctrl |= FLOW_CTRL_RX;
2009 }
2010
2011 old_port = bp->phy_port;
2012 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2013 bp->phy_port = PORT_FIBRE;
2014 else
2015 bp->phy_port = PORT_TP;
2016
2017 if (old_port != bp->phy_port)
2018 bnx2_set_default_link(bp);
2019
0d8a6571
MC
2020 }
2021 if (bp->link_up != link_up)
2022 bnx2_report_link(bp);
2023
2024 bnx2_set_mac_link(bp);
2025}
2026
2027static int
2028bnx2_set_remote_link(struct bnx2 *bp)
2029{
2030 u32 evt_code;
2031
2726d6e1 2032 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
0d8a6571
MC
2033 switch (evt_code) {
2034 case BNX2_FW_EVT_CODE_LINK_EVENT:
2035 bnx2_remote_phy_event(bp);
2036 break;
2037 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2038 default:
df149d70 2039 bnx2_send_heart_beat(bp);
0d8a6571
MC
2040 break;
2041 }
2042 return 0;
2043}
2044
b6016b76
MC
2045static int
2046bnx2_setup_copper_phy(struct bnx2 *bp)
52d07b1f
HH
2047__releases(&bp->phy_lock)
2048__acquires(&bp->phy_lock)
b6016b76
MC
2049{
2050 u32 bmcr;
2051 u32 new_bmcr;
2052
ca58c3af 2053 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
2054
2055 if (bp->autoneg & AUTONEG_SPEED) {
2056 u32 adv_reg, adv1000_reg;
2057 u32 new_adv_reg = 0;
2058 u32 new_adv1000_reg = 0;
2059
ca58c3af 2060 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
2061 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2062 ADVERTISE_PAUSE_ASYM);
2063
2064 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2065 adv1000_reg &= PHY_ALL_1000_SPEED;
2066
28011cf1 2067 new_adv_reg = ethtool_adv_to_mii_100bt(bp->advertising);
b6016b76 2068 new_adv_reg |= ADVERTISE_CSMA;
b6016b76
MC
2069 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2070
28011cf1
MC
2071 new_adv1000_reg |= ethtool_adv_to_mii_1000T(bp->advertising);
2072
b6016b76
MC
2073 if ((adv1000_reg != new_adv1000_reg) ||
2074 (adv_reg != new_adv_reg) ||
2075 ((bmcr & BMCR_ANENABLE) == 0)) {
2076
ca58c3af 2077 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 2078 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 2079 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
2080 BMCR_ANENABLE);
2081 }
2082 else if (bp->link_up) {
2083 /* Flow ctrl may have changed from auto to forced */
2084 /* or vice-versa. */
2085
2086 bnx2_resolve_flow_ctrl(bp);
2087 bnx2_set_mac_link(bp);
2088 }
2089 return 0;
2090 }
2091
2092 new_bmcr = 0;
2093 if (bp->req_line_speed == SPEED_100) {
2094 new_bmcr |= BMCR_SPEED100;
2095 }
2096 if (bp->req_duplex == DUPLEX_FULL) {
2097 new_bmcr |= BMCR_FULLDPLX;
2098 }
2099 if (new_bmcr != bmcr) {
2100 u32 bmsr;
b6016b76 2101
ca58c3af
MC
2102 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2103 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 2104
b6016b76
MC
2105 if (bmsr & BMSR_LSTATUS) {
2106 /* Force link down */
ca58c3af 2107 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
2108 spin_unlock_bh(&bp->phy_lock);
2109 msleep(50);
2110 spin_lock_bh(&bp->phy_lock);
2111
ca58c3af
MC
2112 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2113 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
2114 }
2115
ca58c3af 2116 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
2117
2118 /* Normally, the new speed is setup after the link has
2119 * gone down and up again. In some cases, link will not go
2120 * down so we need to set up the new speed here.
2121 */
2122 if (bmsr & BMSR_LSTATUS) {
2123 bp->line_speed = bp->req_line_speed;
2124 bp->duplex = bp->req_duplex;
2125 bnx2_resolve_flow_ctrl(bp);
2126 bnx2_set_mac_link(bp);
2127 }
27a005b8
MC
2128 } else {
2129 bnx2_resolve_flow_ctrl(bp);
2130 bnx2_set_mac_link(bp);
b6016b76
MC
2131 }
2132 return 0;
2133}
2134
2135static int
0d8a6571 2136bnx2_setup_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
2137__releases(&bp->phy_lock)
2138__acquires(&bp->phy_lock)
b6016b76
MC
2139{
2140 if (bp->loopback == MAC_LOOPBACK)
2141 return 0;
2142
583c28e5 2143 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
807540ba 2144 return bnx2_setup_serdes_phy(bp, port);
b6016b76
MC
2145 }
2146 else {
807540ba 2147 return bnx2_setup_copper_phy(bp);
b6016b76
MC
2148 }
2149}
2150
27a005b8 2151static int
9a120bc5 2152bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
27a005b8
MC
2153{
2154 u32 val;
2155
2156 bp->mii_bmcr = MII_BMCR + 0x10;
2157 bp->mii_bmsr = MII_BMSR + 0x10;
2158 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2159 bp->mii_adv = MII_ADVERTISE + 0x10;
2160 bp->mii_lpa = MII_LPA + 0x10;
2161 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2162
2163 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2164 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2165
2166 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
9a120bc5
MC
2167 if (reset_phy)
2168 bnx2_reset_phy(bp);
27a005b8
MC
2169
2170 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2171
2172 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2173 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2174 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2175 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2176
2177 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2178 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
583c28e5 2179 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
27a005b8
MC
2180 val |= BCM5708S_UP1_2G5;
2181 else
2182 val &= ~BCM5708S_UP1_2G5;
2183 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2184
2185 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2186 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2187 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2188 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2189
2190 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2191
2192 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2193 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2194 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2195
2196 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2197
2198 return 0;
2199}
2200
b6016b76 2201static int
9a120bc5 2202bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
5b0c76ad
MC
2203{
2204 u32 val;
2205
9a120bc5
MC
2206 if (reset_phy)
2207 bnx2_reset_phy(bp);
27a005b8
MC
2208
2209 bp->mii_up1 = BCM5708S_UP1;
2210
5b0c76ad
MC
2211 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2212 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2213 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2214
2215 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2216 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2217 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2218
2219 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2220 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2221 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2222
583c28e5 2223 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
5b0c76ad
MC
2224 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2225 val |= BCM5708S_UP1_2G5;
2226 bnx2_write_phy(bp, BCM5708S_UP1, val);
2227 }
2228
2229 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
2230 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2231 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
2232 /* increase tx signal amplitude */
2233 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2234 BCM5708S_BLK_ADDR_TX_MISC);
2235 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2236 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2237 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2238 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2239 }
2240
2726d6e1 2241 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
2242 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2243
2244 if (val) {
2245 u32 is_backplane;
2246
2726d6e1 2247 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
5b0c76ad
MC
2248 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2249 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2250 BCM5708S_BLK_ADDR_TX_MISC);
2251 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2252 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2253 BCM5708S_BLK_ADDR_DIG);
2254 }
2255 }
2256 return 0;
2257}
2258
2259static int
9a120bc5 2260bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2261{
9a120bc5
MC
2262 if (reset_phy)
2263 bnx2_reset_phy(bp);
27a005b8 2264
583c28e5 2265 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b6016b76 2266
59b47d8a
MC
2267 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2268 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
2269
2270 if (bp->dev->mtu > 1500) {
2271 u32 val;
2272
2273 /* Set extended packet length bit */
2274 bnx2_write_phy(bp, 0x18, 0x7);
2275 bnx2_read_phy(bp, 0x18, &val);
2276 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2277
2278 bnx2_write_phy(bp, 0x1c, 0x6c00);
2279 bnx2_read_phy(bp, 0x1c, &val);
2280 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2281 }
2282 else {
2283 u32 val;
2284
2285 bnx2_write_phy(bp, 0x18, 0x7);
2286 bnx2_read_phy(bp, 0x18, &val);
2287 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2288
2289 bnx2_write_phy(bp, 0x1c, 0x6c00);
2290 bnx2_read_phy(bp, 0x1c, &val);
2291 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2292 }
2293
2294 return 0;
2295}
2296
2297static int
9a120bc5 2298bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2299{
5b0c76ad
MC
2300 u32 val;
2301
9a120bc5
MC
2302 if (reset_phy)
2303 bnx2_reset_phy(bp);
27a005b8 2304
583c28e5 2305 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
b6016b76
MC
2306 bnx2_write_phy(bp, 0x18, 0x0c00);
2307 bnx2_write_phy(bp, 0x17, 0x000a);
2308 bnx2_write_phy(bp, 0x15, 0x310b);
2309 bnx2_write_phy(bp, 0x17, 0x201f);
2310 bnx2_write_phy(bp, 0x15, 0x9506);
2311 bnx2_write_phy(bp, 0x17, 0x401f);
2312 bnx2_write_phy(bp, 0x15, 0x14e2);
2313 bnx2_write_phy(bp, 0x18, 0x0400);
2314 }
2315
583c28e5 2316 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
b659f44e
MC
2317 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2318 MII_BNX2_DSP_EXPAND_REG | 0x8);
2319 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2320 val &= ~(1 << 8);
2321 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2322 }
2323
b6016b76 2324 if (bp->dev->mtu > 1500) {
b6016b76
MC
2325 /* Set extended packet length bit */
2326 bnx2_write_phy(bp, 0x18, 0x7);
2327 bnx2_read_phy(bp, 0x18, &val);
2328 bnx2_write_phy(bp, 0x18, val | 0x4000);
2329
2330 bnx2_read_phy(bp, 0x10, &val);
2331 bnx2_write_phy(bp, 0x10, val | 0x1);
2332 }
2333 else {
b6016b76
MC
2334 bnx2_write_phy(bp, 0x18, 0x7);
2335 bnx2_read_phy(bp, 0x18, &val);
2336 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2337
2338 bnx2_read_phy(bp, 0x10, &val);
2339 bnx2_write_phy(bp, 0x10, val & ~0x1);
2340 }
2341
5b0c76ad
MC
2342 /* ethernet@wirespeed */
2343 bnx2_write_phy(bp, 0x18, 0x7007);
2344 bnx2_read_phy(bp, 0x18, &val);
2345 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
2346 return 0;
2347}
2348
2349
2350static int
9a120bc5 2351bnx2_init_phy(struct bnx2 *bp, int reset_phy)
52d07b1f
HH
2352__releases(&bp->phy_lock)
2353__acquires(&bp->phy_lock)
b6016b76
MC
2354{
2355 u32 val;
2356 int rc = 0;
2357
583c28e5
MC
2358 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2359 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
b6016b76 2360
ca58c3af
MC
2361 bp->mii_bmcr = MII_BMCR;
2362 bp->mii_bmsr = MII_BMSR;
27a005b8 2363 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
2364 bp->mii_adv = MII_ADVERTISE;
2365 bp->mii_lpa = MII_LPA;
2366
b6016b76
MC
2367 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2368
583c28e5 2369 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
2370 goto setup_phy;
2371
b6016b76
MC
2372 bnx2_read_phy(bp, MII_PHYSID1, &val);
2373 bp->phy_id = val << 16;
2374 bnx2_read_phy(bp, MII_PHYSID2, &val);
2375 bp->phy_id |= val & 0xffff;
2376
583c28e5 2377 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad 2378 if (CHIP_NUM(bp) == CHIP_NUM_5706)
9a120bc5 2379 rc = bnx2_init_5706s_phy(bp, reset_phy);
5b0c76ad 2380 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
9a120bc5 2381 rc = bnx2_init_5708s_phy(bp, reset_phy);
27a005b8 2382 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
9a120bc5 2383 rc = bnx2_init_5709s_phy(bp, reset_phy);
b6016b76
MC
2384 }
2385 else {
9a120bc5 2386 rc = bnx2_init_copper_phy(bp, reset_phy);
b6016b76
MC
2387 }
2388
0d8a6571
MC
2389setup_phy:
2390 if (!rc)
2391 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
2392
2393 return rc;
2394}
2395
2396static int
2397bnx2_set_mac_loopback(struct bnx2 *bp)
2398{
2399 u32 mac_mode;
2400
2401 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2402 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2403 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2404 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2405 bp->link_up = 1;
2406 return 0;
2407}
2408
bc5a0690
MC
2409static int bnx2_test_link(struct bnx2 *);
2410
2411static int
2412bnx2_set_phy_loopback(struct bnx2 *bp)
2413{
2414 u32 mac_mode;
2415 int rc, i;
2416
2417 spin_lock_bh(&bp->phy_lock);
ca58c3af 2418 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2419 BMCR_SPEED1000);
2420 spin_unlock_bh(&bp->phy_lock);
2421 if (rc)
2422 return rc;
2423
2424 for (i = 0; i < 10; i++) {
2425 if (bnx2_test_link(bp) == 0)
2426 break;
80be4434 2427 msleep(100);
bc5a0690
MC
2428 }
2429
2430 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2431 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2432 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2433 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2434
2435 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2436 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2437 bp->link_up = 1;
2438 return 0;
2439}
2440
ecdbf6e0
JH
2441static void
2442bnx2_dump_mcp_state(struct bnx2 *bp)
2443{
2444 struct net_device *dev = bp->dev;
2445 u32 mcp_p0, mcp_p1;
2446
2447 netdev_err(dev, "<--- start MCP states dump --->\n");
2448 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2449 mcp_p0 = BNX2_MCP_STATE_P0;
2450 mcp_p1 = BNX2_MCP_STATE_P1;
2451 } else {
2452 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2453 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2454 }
2455 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2456 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2457 netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2458 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2459 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2460 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2461 netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2462 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2463 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2464 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2465 netdev_err(dev, "DEBUG: shmem states:\n");
2466 netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2467 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2468 bnx2_shmem_rd(bp, BNX2_FW_MB),
2469 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2470 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2471 netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2472 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2473 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2474 pr_cont(" condition[%08x]\n",
2475 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2476 DP_SHMEM_LINE(bp, 0x3cc);
2477 DP_SHMEM_LINE(bp, 0x3dc);
2478 DP_SHMEM_LINE(bp, 0x3ec);
2479 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2480 netdev_err(dev, "<--- end MCP states dump --->\n");
2481}
2482
b6016b76 2483static int
a2f13890 2484bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
b6016b76
MC
2485{
2486 int i;
2487 u32 val;
2488
b6016b76
MC
2489 bp->fw_wr_seq++;
2490 msg_data |= bp->fw_wr_seq;
2491
2726d6e1 2492 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2493
a2f13890
MC
2494 if (!ack)
2495 return 0;
2496
b6016b76 2497 /* wait for an acknowledgement. */
40105c0b 2498 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
b090ae2b 2499 msleep(10);
b6016b76 2500
2726d6e1 2501 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
b6016b76
MC
2502
2503 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2504 break;
2505 }
b090ae2b
MC
2506 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2507 return 0;
b6016b76
MC
2508
2509 /* If we timed out, inform the firmware that this is the case. */
b090ae2b 2510 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
b6016b76
MC
2511 msg_data &= ~BNX2_DRV_MSG_CODE;
2512 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2513
2726d6e1 2514 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
ecdbf6e0
JH
2515 if (!silent) {
2516 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2517 bnx2_dump_mcp_state(bp);
2518 }
b6016b76 2519
b6016b76
MC
2520 return -EBUSY;
2521 }
2522
b090ae2b
MC
2523 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2524 return -EIO;
2525
b6016b76
MC
2526 return 0;
2527}
2528
59b47d8a
MC
2529static int
2530bnx2_init_5709_context(struct bnx2 *bp)
2531{
2532 int i, ret = 0;
2533 u32 val;
2534
2535 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2536 val |= (BCM_PAGE_BITS - 8) << 16;
2537 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2538 for (i = 0; i < 10; i++) {
2539 val = REG_RD(bp, BNX2_CTX_COMMAND);
2540 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2541 break;
2542 udelay(2);
2543 }
2544 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2545 return -EBUSY;
2546
59b47d8a
MC
2547 for (i = 0; i < bp->ctx_pages; i++) {
2548 int j;
2549
352f7687
MC
2550 if (bp->ctx_blk[i])
2551 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2552 else
2553 return -ENOMEM;
2554
59b47d8a
MC
2555 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2556 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2557 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2558 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2559 (u64) bp->ctx_blk_mapping[i] >> 32);
2560 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2561 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2562 for (j = 0; j < 10; j++) {
2563
2564 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2565 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2566 break;
2567 udelay(5);
2568 }
2569 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2570 ret = -EBUSY;
2571 break;
2572 }
2573 }
2574 return ret;
2575}
2576
b6016b76
MC
2577static void
2578bnx2_init_context(struct bnx2 *bp)
2579{
2580 u32 vcid;
2581
2582 vcid = 96;
2583 while (vcid) {
2584 u32 vcid_addr, pcid_addr, offset;
7947b20e 2585 int i;
b6016b76
MC
2586
2587 vcid--;
2588
2589 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2590 u32 new_vcid;
2591
2592 vcid_addr = GET_PCID_ADDR(vcid);
2593 if (vcid & 0x8) {
2594 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2595 }
2596 else {
2597 new_vcid = vcid;
2598 }
2599 pcid_addr = GET_PCID_ADDR(new_vcid);
2600 }
2601 else {
2602 vcid_addr = GET_CID_ADDR(vcid);
2603 pcid_addr = vcid_addr;
2604 }
2605
7947b20e
MC
2606 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2607 vcid_addr += (i << PHY_CTX_SHIFT);
2608 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2609
5d5d0015 2610 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2611 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2612
7947b20e
MC
2613 /* Zero out the context. */
2614 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
62a8313c 2615 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
7947b20e 2616 }
b6016b76
MC
2617 }
2618}
2619
2620static int
2621bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2622{
2623 u16 *good_mbuf;
2624 u32 good_mbuf_cnt;
2625 u32 val;
2626
2627 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2628 if (good_mbuf == NULL) {
3a9c6a49 2629 pr_err("Failed to allocate memory in %s\n", __func__);
b6016b76
MC
2630 return -ENOMEM;
2631 }
2632
2633 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2634 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2635
2636 good_mbuf_cnt = 0;
2637
2638 /* Allocate a bunch of mbufs and save the good ones in an array. */
2726d6e1 2639 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76 2640 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2726d6e1
MC
2641 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2642 BNX2_RBUF_COMMAND_ALLOC_REQ);
b6016b76 2643
2726d6e1 2644 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
b6016b76
MC
2645
2646 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2647
2648 /* The addresses with Bit 9 set are bad memory blocks. */
2649 if (!(val & (1 << 9))) {
2650 good_mbuf[good_mbuf_cnt] = (u16) val;
2651 good_mbuf_cnt++;
2652 }
2653
2726d6e1 2654 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76
MC
2655 }
2656
2657 /* Free the good ones back to the mbuf pool thus discarding
2658 * all the bad ones. */
2659 while (good_mbuf_cnt) {
2660 good_mbuf_cnt--;
2661
2662 val = good_mbuf[good_mbuf_cnt];
2663 val = (val << 9) | val | 1;
2664
2726d6e1 2665 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
b6016b76
MC
2666 }
2667 kfree(good_mbuf);
2668 return 0;
2669}
2670
2671static void
5fcaed01 2672bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
b6016b76
MC
2673{
2674 u32 val;
b6016b76
MC
2675
2676 val = (mac_addr[0] << 8) | mac_addr[1];
2677
5fcaed01 2678 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
b6016b76 2679
6aa20a22 2680 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2681 (mac_addr[4] << 8) | mac_addr[5];
2682
5fcaed01 2683 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
b6016b76
MC
2684}
2685
47bf4246 2686static inline int
a2df00aa 2687bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
47bf4246
MC
2688{
2689 dma_addr_t mapping;
bb4f98ab 2690 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246 2691 struct rx_bd *rxbd =
bb4f98ab 2692 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
a2df00aa 2693 struct page *page = alloc_page(gfp);
47bf4246
MC
2694
2695 if (!page)
2696 return -ENOMEM;
36227e88 2697 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
47bf4246 2698 PCI_DMA_FROMDEVICE);
36227e88 2699 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
3d16af86
BL
2700 __free_page(page);
2701 return -EIO;
2702 }
2703
47bf4246 2704 rx_pg->page = page;
1a4ccc2d 2705 dma_unmap_addr_set(rx_pg, mapping, mapping);
47bf4246
MC
2706 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2707 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2708 return 0;
2709}
2710
2711static void
bb4f98ab 2712bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246 2713{
bb4f98ab 2714 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246
MC
2715 struct page *page = rx_pg->page;
2716
2717 if (!page)
2718 return;
2719
36227e88
SG
2720 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2721 PAGE_SIZE, PCI_DMA_FROMDEVICE);
47bf4246
MC
2722
2723 __free_page(page);
2724 rx_pg->page = NULL;
2725}
2726
b6016b76 2727static inline int
a2df00aa 2728bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
b6016b76
MC
2729{
2730 struct sk_buff *skb;
bb4f98ab 2731 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
b6016b76 2732 dma_addr_t mapping;
bb4f98ab 2733 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2734 unsigned long align;
2735
a2df00aa 2736 skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
b6016b76
MC
2737 if (skb == NULL) {
2738 return -ENOMEM;
2739 }
2740
59b47d8a
MC
2741 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2742 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2743
36227e88
SG
2744 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2745 PCI_DMA_FROMDEVICE);
2746 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
3d16af86
BL
2747 dev_kfree_skb(skb);
2748 return -EIO;
2749 }
b6016b76
MC
2750
2751 rx_buf->skb = skb;
a33fa66b 2752 rx_buf->desc = (struct l2_fhdr *) skb->data;
1a4ccc2d 2753 dma_unmap_addr_set(rx_buf, mapping, mapping);
b6016b76
MC
2754
2755 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2756 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2757
bb4f98ab 2758 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76
MC
2759
2760 return 0;
2761}
2762
da3e4fbe 2763static int
35efa7c1 2764bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2765{
43e80b89 2766 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76 2767 u32 new_link_state, old_link_state;
da3e4fbe 2768 int is_set = 1;
b6016b76 2769
da3e4fbe
MC
2770 new_link_state = sblk->status_attn_bits & event;
2771 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2772 if (new_link_state != old_link_state) {
da3e4fbe
MC
2773 if (new_link_state)
2774 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2775 else
2776 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2777 } else
2778 is_set = 0;
2779
2780 return is_set;
2781}
2782
2783static void
35efa7c1 2784bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2785{
74ecc62d
MC
2786 spin_lock(&bp->phy_lock);
2787
2788 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
b6016b76 2789 bnx2_set_link(bp);
35efa7c1 2790 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2791 bnx2_set_remote_link(bp);
2792
74ecc62d
MC
2793 spin_unlock(&bp->phy_lock);
2794
b6016b76
MC
2795}
2796
ead7270b 2797static inline u16
35efa7c1 2798bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2799{
2800 u16 cons;
2801
43e80b89
MC
2802 /* Tell compiler that status block fields can change. */
2803 barrier();
2804 cons = *bnapi->hw_tx_cons_ptr;
581daf7e 2805 barrier();
ead7270b
MC
2806 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2807 cons++;
2808 return cons;
2809}
2810
57851d84
MC
2811static int
2812bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2813{
35e9010b 2814 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
b6016b76 2815 u16 hw_cons, sw_cons, sw_ring_cons;
706bf240
BL
2816 int tx_pkt = 0, index;
2817 struct netdev_queue *txq;
2818
2819 index = (bnapi - bp->bnx2_napi);
2820 txq = netdev_get_tx_queue(bp->dev, index);
b6016b76 2821
35efa7c1 2822 hw_cons = bnx2_get_hw_tx_cons(bnapi);
35e9010b 2823 sw_cons = txr->tx_cons;
b6016b76
MC
2824
2825 while (sw_cons != hw_cons) {
3d16af86 2826 struct sw_tx_bd *tx_buf;
b6016b76
MC
2827 struct sk_buff *skb;
2828 int i, last;
2829
2830 sw_ring_cons = TX_RING_IDX(sw_cons);
2831
35e9010b 2832 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
b6016b76 2833 skb = tx_buf->skb;
1d39ed56 2834
d62fda08
ED
2835 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2836 prefetch(&skb->end);
2837
b6016b76 2838 /* partial BD completions possible with TSO packets */
d62fda08 2839 if (tx_buf->is_gso) {
b6016b76
MC
2840 u16 last_idx, last_ring_idx;
2841
d62fda08
ED
2842 last_idx = sw_cons + tx_buf->nr_frags + 1;
2843 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
b6016b76
MC
2844 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2845 last_idx++;
2846 }
2847 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2848 break;
2849 }
2850 }
1d39ed56 2851
36227e88 2852 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
e95524a7 2853 skb_headlen(skb), PCI_DMA_TODEVICE);
b6016b76
MC
2854
2855 tx_buf->skb = NULL;
d62fda08 2856 last = tx_buf->nr_frags;
b6016b76
MC
2857
2858 for (i = 0; i < last; i++) {
2859 sw_cons = NEXT_TX_BD(sw_cons);
e95524a7 2860
36227e88 2861 dma_unmap_page(&bp->pdev->dev,
1a4ccc2d 2862 dma_unmap_addr(
e95524a7
AD
2863 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2864 mapping),
9e903e08 2865 skb_frag_size(&skb_shinfo(skb)->frags[i]),
e95524a7 2866 PCI_DMA_TODEVICE);
b6016b76
MC
2867 }
2868
2869 sw_cons = NEXT_TX_BD(sw_cons);
2870
745720e5 2871 dev_kfree_skb(skb);
57851d84
MC
2872 tx_pkt++;
2873 if (tx_pkt == budget)
2874 break;
b6016b76 2875
d62fda08
ED
2876 if (hw_cons == sw_cons)
2877 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2878 }
2879
35e9010b
MC
2880 txr->hw_tx_cons = hw_cons;
2881 txr->tx_cons = sw_cons;
706bf240 2882
2f8af120 2883 /* Need to make the tx_cons update visible to bnx2_start_xmit()
706bf240 2884 * before checking for netif_tx_queue_stopped(). Without the
2f8af120
MC
2885 * memory barrier, there is a small possibility that bnx2_start_xmit()
2886 * will miss it and cause the queue to be stopped forever.
2887 */
2888 smp_mb();
b6016b76 2889
706bf240 2890 if (unlikely(netif_tx_queue_stopped(txq)) &&
35e9010b 2891 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
706bf240
BL
2892 __netif_tx_lock(txq, smp_processor_id());
2893 if ((netif_tx_queue_stopped(txq)) &&
35e9010b 2894 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
706bf240
BL
2895 netif_tx_wake_queue(txq);
2896 __netif_tx_unlock(txq);
b6016b76 2897 }
706bf240 2898
57851d84 2899 return tx_pkt;
b6016b76
MC
2900}
2901
1db82f2a 2902static void
bb4f98ab 2903bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
a1f60190 2904 struct sk_buff *skb, int count)
1db82f2a
MC
2905{
2906 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2907 struct rx_bd *cons_bd, *prod_bd;
1db82f2a 2908 int i;
3d16af86 2909 u16 hw_prod, prod;
bb4f98ab 2910 u16 cons = rxr->rx_pg_cons;
1db82f2a 2911
3d16af86
BL
2912 cons_rx_pg = &rxr->rx_pg_ring[cons];
2913
2914 /* The caller was unable to allocate a new page to replace the
2915 * last one in the frags array, so we need to recycle that page
2916 * and then free the skb.
2917 */
2918 if (skb) {
2919 struct page *page;
2920 struct skb_shared_info *shinfo;
2921
2922 shinfo = skb_shinfo(skb);
2923 shinfo->nr_frags--;
b7b6a688
IC
2924 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2925 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
3d16af86
BL
2926
2927 cons_rx_pg->page = page;
2928 dev_kfree_skb(skb);
2929 }
2930
2931 hw_prod = rxr->rx_pg_prod;
2932
1db82f2a
MC
2933 for (i = 0; i < count; i++) {
2934 prod = RX_PG_RING_IDX(hw_prod);
2935
bb4f98ab
MC
2936 prod_rx_pg = &rxr->rx_pg_ring[prod];
2937 cons_rx_pg = &rxr->rx_pg_ring[cons];
2938 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2939 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1db82f2a 2940
1db82f2a
MC
2941 if (prod != cons) {
2942 prod_rx_pg->page = cons_rx_pg->page;
2943 cons_rx_pg->page = NULL;
1a4ccc2d
FT
2944 dma_unmap_addr_set(prod_rx_pg, mapping,
2945 dma_unmap_addr(cons_rx_pg, mapping));
1db82f2a
MC
2946
2947 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2948 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2949
2950 }
2951 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2952 hw_prod = NEXT_RX_BD(hw_prod);
2953 }
bb4f98ab
MC
2954 rxr->rx_pg_prod = hw_prod;
2955 rxr->rx_pg_cons = cons;
1db82f2a
MC
2956}
2957
b6016b76 2958static inline void
bb4f98ab
MC
2959bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2960 struct sk_buff *skb, u16 cons, u16 prod)
b6016b76 2961{
236b6394
MC
2962 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2963 struct rx_bd *cons_bd, *prod_bd;
2964
bb4f98ab
MC
2965 cons_rx_buf = &rxr->rx_buf_ring[cons];
2966 prod_rx_buf = &rxr->rx_buf_ring[prod];
b6016b76 2967
36227e88 2968 dma_sync_single_for_device(&bp->pdev->dev,
1a4ccc2d 2969 dma_unmap_addr(cons_rx_buf, mapping),
601d3d18 2970 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
b6016b76 2971
bb4f98ab 2972 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2973
236b6394 2974 prod_rx_buf->skb = skb;
a33fa66b 2975 prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
b6016b76 2976
236b6394
MC
2977 if (cons == prod)
2978 return;
b6016b76 2979
1a4ccc2d
FT
2980 dma_unmap_addr_set(prod_rx_buf, mapping,
2981 dma_unmap_addr(cons_rx_buf, mapping));
236b6394 2982
bb4f98ab
MC
2983 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2984 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2985 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2986 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2987}
2988
85833c62 2989static int
bb4f98ab 2990bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
a1f60190
MC
2991 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2992 u32 ring_idx)
85833c62
MC
2993{
2994 int err;
2995 u16 prod = ring_idx & 0xffff;
2996
a2df00aa 2997 err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
85833c62 2998 if (unlikely(err)) {
bb4f98ab 2999 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
3000 if (hdr_len) {
3001 unsigned int raw_len = len + 4;
3002 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3003
bb4f98ab 3004 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
1db82f2a 3005 }
85833c62
MC
3006 return err;
3007 }
3008
d89cb6af 3009 skb_reserve(skb, BNX2_RX_OFFSET);
36227e88 3010 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
85833c62
MC
3011 PCI_DMA_FROMDEVICE);
3012
1db82f2a
MC
3013 if (hdr_len == 0) {
3014 skb_put(skb, len);
3015 return 0;
3016 } else {
3017 unsigned int i, frag_len, frag_size, pages;
3018 struct sw_pg *rx_pg;
bb4f98ab
MC
3019 u16 pg_cons = rxr->rx_pg_cons;
3020 u16 pg_prod = rxr->rx_pg_prod;
1db82f2a
MC
3021
3022 frag_size = len + 4 - hdr_len;
3023 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3024 skb_put(skb, hdr_len);
3025
3026 for (i = 0; i < pages; i++) {
3d16af86
BL
3027 dma_addr_t mapping_old;
3028
1db82f2a
MC
3029 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3030 if (unlikely(frag_len <= 4)) {
3031 unsigned int tail = 4 - frag_len;
3032
bb4f98ab
MC
3033 rxr->rx_pg_cons = pg_cons;
3034 rxr->rx_pg_prod = pg_prod;
3035 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
a1f60190 3036 pages - i);
1db82f2a
MC
3037 skb->len -= tail;
3038 if (i == 0) {
3039 skb->tail -= tail;
3040 } else {
3041 skb_frag_t *frag =
3042 &skb_shinfo(skb)->frags[i - 1];
9e903e08 3043 skb_frag_size_sub(frag, tail);
1db82f2a 3044 skb->data_len -= tail;
1db82f2a
MC
3045 }
3046 return 0;
3047 }
bb4f98ab 3048 rx_pg = &rxr->rx_pg_ring[pg_cons];
1db82f2a 3049
3d16af86
BL
3050 /* Don't unmap yet. If we're unable to allocate a new
3051 * page, we need to recycle the page and the DMA addr.
3052 */
1a4ccc2d 3053 mapping_old = dma_unmap_addr(rx_pg, mapping);
1db82f2a
MC
3054 if (i == pages - 1)
3055 frag_len -= 4;
3056
3057 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3058 rx_pg->page = NULL;
3059
bb4f98ab 3060 err = bnx2_alloc_rx_page(bp, rxr,
a2df00aa
SG
3061 RX_PG_RING_IDX(pg_prod),
3062 GFP_ATOMIC);
1db82f2a 3063 if (unlikely(err)) {
bb4f98ab
MC
3064 rxr->rx_pg_cons = pg_cons;
3065 rxr->rx_pg_prod = pg_prod;
3066 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
a1f60190 3067 pages - i);
1db82f2a
MC
3068 return err;
3069 }
3070
36227e88 3071 dma_unmap_page(&bp->pdev->dev, mapping_old,
3d16af86
BL
3072 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3073
1db82f2a
MC
3074 frag_size -= frag_len;
3075 skb->data_len += frag_len;
a1f4e8bc 3076 skb->truesize += PAGE_SIZE;
1db82f2a
MC
3077 skb->len += frag_len;
3078
3079 pg_prod = NEXT_RX_BD(pg_prod);
3080 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3081 }
bb4f98ab
MC
3082 rxr->rx_pg_prod = pg_prod;
3083 rxr->rx_pg_cons = pg_cons;
1db82f2a 3084 }
85833c62
MC
3085 return 0;
3086}
3087
c09c2627 3088static inline u16
35efa7c1 3089bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 3090{
bb4f98ab
MC
3091 u16 cons;
3092
43e80b89
MC
3093 /* Tell compiler that status block fields can change. */
3094 barrier();
3095 cons = *bnapi->hw_rx_cons_ptr;
581daf7e 3096 barrier();
c09c2627
MC
3097 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3098 cons++;
3099 return cons;
3100}
3101
b6016b76 3102static int
35efa7c1 3103bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 3104{
bb4f98ab 3105 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76
MC
3106 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3107 struct l2_fhdr *rx_hdr;
1db82f2a 3108 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 3109
35efa7c1 3110 hw_cons = bnx2_get_hw_rx_cons(bnapi);
bb4f98ab
MC
3111 sw_cons = rxr->rx_cons;
3112 sw_prod = rxr->rx_prod;
b6016b76
MC
3113
3114 /* Memory barrier necessary as speculative reads of the rx
3115 * buffer can be ahead of the index in the status block
3116 */
3117 rmb();
3118 while (sw_cons != hw_cons) {
1db82f2a 3119 unsigned int len, hdr_len;
ade2bfe7 3120 u32 status;
a33fa66b 3121 struct sw_bd *rx_buf, *next_rx_buf;
b6016b76 3122 struct sk_buff *skb;
236b6394 3123 dma_addr_t dma_addr;
b6016b76
MC
3124
3125 sw_ring_cons = RX_RING_IDX(sw_cons);
3126 sw_ring_prod = RX_RING_IDX(sw_prod);
3127
bb4f98ab 3128 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
b6016b76 3129 skb = rx_buf->skb;
a33fa66b 3130 prefetchw(skb);
236b6394 3131
aabef8b2
FT
3132 next_rx_buf =
3133 &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3134 prefetch(next_rx_buf->desc);
3135
236b6394
MC
3136 rx_buf->skb = NULL;
3137
1a4ccc2d 3138 dma_addr = dma_unmap_addr(rx_buf, mapping);
236b6394 3139
36227e88 3140 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
601d3d18
BL
3141 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3142 PCI_DMA_FROMDEVICE);
b6016b76 3143
a33fa66b 3144 rx_hdr = rx_buf->desc;
1db82f2a 3145 len = rx_hdr->l2_fhdr_pkt_len;
990ec380 3146 status = rx_hdr->l2_fhdr_status;
b6016b76 3147
1db82f2a
MC
3148 hdr_len = 0;
3149 if (status & L2_FHDR_STATUS_SPLIT) {
3150 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3151 pg_ring_used = 1;
3152 } else if (len > bp->rx_jumbo_thresh) {
3153 hdr_len = bp->rx_jumbo_thresh;
3154 pg_ring_used = 1;
3155 }
3156
990ec380
MC
3157 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3158 L2_FHDR_ERRORS_PHY_DECODE |
3159 L2_FHDR_ERRORS_ALIGNMENT |
3160 L2_FHDR_ERRORS_TOO_SHORT |
3161 L2_FHDR_ERRORS_GIANT_FRAME))) {
3162
3163 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3164 sw_ring_prod);
3165 if (pg_ring_used) {
3166 int pages;
3167
3168 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3169
3170 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3171 }
3172 goto next_rx;
3173 }
3174
1db82f2a 3175 len -= 4;
b6016b76 3176
5d5d0015 3177 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
3178 struct sk_buff *new_skb;
3179
f22828e8 3180 new_skb = netdev_alloc_skb(bp->dev, len + 6);
85833c62 3181 if (new_skb == NULL) {
bb4f98ab 3182 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
85833c62
MC
3183 sw_ring_prod);
3184 goto next_rx;
3185 }
b6016b76
MC
3186
3187 /* aligned copy */
d89cb6af 3188 skb_copy_from_linear_data_offset(skb,
f22828e8
MC
3189 BNX2_RX_OFFSET - 6,
3190 new_skb->data, len + 6);
3191 skb_reserve(new_skb, 6);
b6016b76 3192 skb_put(new_skb, len);
b6016b76 3193
bb4f98ab 3194 bnx2_reuse_rx_skb(bp, rxr, skb,
b6016b76
MC
3195 sw_ring_cons, sw_ring_prod);
3196
3197 skb = new_skb;
bb4f98ab 3198 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
a1f60190 3199 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 3200 goto next_rx;
b6016b76 3201
f22828e8 3202 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
7d0fd211
JG
3203 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3204 __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
f22828e8 3205
b6016b76
MC
3206 skb->protocol = eth_type_trans(skb, bp->dev);
3207
3208 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 3209 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 3210
745720e5 3211 dev_kfree_skb(skb);
b6016b76
MC
3212 goto next_rx;
3213
3214 }
3215
bc8acf2c 3216 skb_checksum_none_assert(skb);
8d7dfc2b 3217 if ((bp->dev->features & NETIF_F_RXCSUM) &&
b6016b76
MC
3218 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3219 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3220
ade2bfe7
MC
3221 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3222 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
3223 skb->ip_summed = CHECKSUM_UNNECESSARY;
3224 }
fdc8541d
MC
3225 if ((bp->dev->features & NETIF_F_RXHASH) &&
3226 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3227 L2_FHDR_STATUS_USE_RXHASH))
3228 skb->rxhash = rx_hdr->l2_fhdr_hash;
b6016b76 3229
0c8dfc83 3230 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
7d0fd211 3231 napi_gro_receive(&bnapi->napi, skb);
b6016b76
MC
3232 rx_pkt++;
3233
3234next_rx:
b6016b76
MC
3235 sw_cons = NEXT_RX_BD(sw_cons);
3236 sw_prod = NEXT_RX_BD(sw_prod);
3237
3238 if ((rx_pkt == budget))
3239 break;
f4e418f7
MC
3240
3241 /* Refresh hw_cons to see if there is new work */
3242 if (sw_cons == hw_cons) {
35efa7c1 3243 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
3244 rmb();
3245 }
b6016b76 3246 }
bb4f98ab
MC
3247 rxr->rx_cons = sw_cons;
3248 rxr->rx_prod = sw_prod;
b6016b76 3249
1db82f2a 3250 if (pg_ring_used)
bb4f98ab 3251 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
1db82f2a 3252
bb4f98ab 3253 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
b6016b76 3254
bb4f98ab 3255 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
3256
3257 mmiowb();
3258
3259 return rx_pkt;
3260
3261}
3262
3263/* MSI ISR - The only difference between this and the INTx ISR
3264 * is that the MSI interrupt is always serviced.
3265 */
3266static irqreturn_t
7d12e780 3267bnx2_msi(int irq, void *dev_instance)
b6016b76 3268{
f0ea2e63
MC
3269 struct bnx2_napi *bnapi = dev_instance;
3270 struct bnx2 *bp = bnapi->bp;
b6016b76 3271
43e80b89 3272 prefetch(bnapi->status_blk.msi);
b6016b76
MC
3273 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3274 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3275 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3276
3277 /* Return here if interrupt is disabled. */
73eef4cd
MC
3278 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3279 return IRQ_HANDLED;
b6016b76 3280
288379f0 3281 napi_schedule(&bnapi->napi);
b6016b76 3282
73eef4cd 3283 return IRQ_HANDLED;
b6016b76
MC
3284}
3285
8e6a72c4
MC
3286static irqreturn_t
3287bnx2_msi_1shot(int irq, void *dev_instance)
3288{
f0ea2e63
MC
3289 struct bnx2_napi *bnapi = dev_instance;
3290 struct bnx2 *bp = bnapi->bp;
8e6a72c4 3291
43e80b89 3292 prefetch(bnapi->status_blk.msi);
8e6a72c4
MC
3293
3294 /* Return here if interrupt is disabled. */
3295 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3296 return IRQ_HANDLED;
3297
288379f0 3298 napi_schedule(&bnapi->napi);
8e6a72c4
MC
3299
3300 return IRQ_HANDLED;
3301}
3302
b6016b76 3303static irqreturn_t
7d12e780 3304bnx2_interrupt(int irq, void *dev_instance)
b6016b76 3305{
f0ea2e63
MC
3306 struct bnx2_napi *bnapi = dev_instance;
3307 struct bnx2 *bp = bnapi->bp;
43e80b89 3308 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76
MC
3309
3310 /* When using INTx, it is possible for the interrupt to arrive
3311 * at the CPU before the status block posted prior to the
3312 * interrupt. Reading a register will flush the status block.
3313 * When using MSI, the MSI message will always complete after
3314 * the status block write.
3315 */
35efa7c1 3316 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
3317 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3318 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 3319 return IRQ_NONE;
b6016b76
MC
3320
3321 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3322 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3323 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3324
b8a7ce7b
MC
3325 /* Read back to deassert IRQ immediately to avoid too many
3326 * spurious interrupts.
3327 */
3328 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3329
b6016b76 3330 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
3331 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3332 return IRQ_HANDLED;
b6016b76 3333
288379f0 3334 if (napi_schedule_prep(&bnapi->napi)) {
35efa7c1 3335 bnapi->last_status_idx = sblk->status_idx;
288379f0 3336 __napi_schedule(&bnapi->napi);
b8a7ce7b 3337 }
b6016b76 3338
73eef4cd 3339 return IRQ_HANDLED;
b6016b76
MC
3340}
3341
f4e418f7 3342static inline int
43e80b89 3343bnx2_has_fast_work(struct bnx2_napi *bnapi)
f4e418f7 3344{
35e9010b 3345 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 3346 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
f4e418f7 3347
bb4f98ab 3348 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
35e9010b 3349 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
f4e418f7 3350 return 1;
43e80b89
MC
3351 return 0;
3352}
3353
3354#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3355 STATUS_ATTN_BITS_TIMER_ABORT)
3356
3357static inline int
3358bnx2_has_work(struct bnx2_napi *bnapi)
3359{
3360 struct status_block *sblk = bnapi->status_blk.msi;
3361
3362 if (bnx2_has_fast_work(bnapi))
3363 return 1;
f4e418f7 3364
4edd473f
MC
3365#ifdef BCM_CNIC
3366 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3367 return 1;
3368#endif
3369
da3e4fbe
MC
3370 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3371 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
3372 return 1;
3373
3374 return 0;
3375}
3376
efba0180
MC
3377static void
3378bnx2_chk_missed_msi(struct bnx2 *bp)
3379{
3380 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3381 u32 msi_ctrl;
3382
3383 if (bnx2_has_work(bnapi)) {
3384 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3385 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3386 return;
3387
3388 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3389 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3390 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3391 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3392 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3393 }
3394 }
3395
3396 bp->idle_chk_status_idx = bnapi->last_status_idx;
3397}
3398
4edd473f
MC
3399#ifdef BCM_CNIC
3400static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3401{
3402 struct cnic_ops *c_ops;
3403
3404 if (!bnapi->cnic_present)
3405 return;
3406
3407 rcu_read_lock();
3408 c_ops = rcu_dereference(bp->cnic_ops);
3409 if (c_ops)
3410 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3411 bnapi->status_blk.msi);
3412 rcu_read_unlock();
3413}
3414#endif
3415
43e80b89 3416static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76 3417{
43e80b89 3418 struct status_block *sblk = bnapi->status_blk.msi;
da3e4fbe
MC
3419 u32 status_attn_bits = sblk->status_attn_bits;
3420 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 3421
da3e4fbe
MC
3422 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3423 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 3424
35efa7c1 3425 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
3426
3427 /* This is needed to take care of transient status
3428 * during link changes.
3429 */
3430 REG_WR(bp, BNX2_HC_COMMAND,
3431 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3432 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76 3433 }
43e80b89
MC
3434}
3435
3436static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3437 int work_done, int budget)
3438{
3439 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3440 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76 3441
35e9010b 3442 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
57851d84 3443 bnx2_tx_int(bp, bnapi, 0);
b6016b76 3444
bb4f98ab 3445 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
35efa7c1 3446 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 3447
6f535763
DM
3448 return work_done;
3449}
3450
f0ea2e63
MC
3451static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3452{
3453 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3454 struct bnx2 *bp = bnapi->bp;
3455 int work_done = 0;
3456 struct status_block_msix *sblk = bnapi->status_blk.msix;
3457
3458 while (1) {
3459 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3460 if (unlikely(work_done >= budget))
3461 break;
3462
3463 bnapi->last_status_idx = sblk->status_idx;
3464 /* status idx must be read before checking for more work. */
3465 rmb();
3466 if (likely(!bnx2_has_fast_work(bnapi))) {
3467
288379f0 3468 napi_complete(napi);
f0ea2e63
MC
3469 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3470 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3471 bnapi->last_status_idx);
3472 break;
3473 }
3474 }
3475 return work_done;
3476}
3477
6f535763
DM
3478static int bnx2_poll(struct napi_struct *napi, int budget)
3479{
35efa7c1
MC
3480 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3481 struct bnx2 *bp = bnapi->bp;
6f535763 3482 int work_done = 0;
43e80b89 3483 struct status_block *sblk = bnapi->status_blk.msi;
6f535763
DM
3484
3485 while (1) {
43e80b89
MC
3486 bnx2_poll_link(bp, bnapi);
3487
35efa7c1 3488 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 3489
4edd473f
MC
3490#ifdef BCM_CNIC
3491 bnx2_poll_cnic(bp, bnapi);
3492#endif
3493
35efa7c1 3494 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
3495 * much work has been processed, so we must read it before
3496 * checking for more work.
3497 */
35efa7c1 3498 bnapi->last_status_idx = sblk->status_idx;
efba0180
MC
3499
3500 if (unlikely(work_done >= budget))
3501 break;
3502
6dee6421 3503 rmb();
35efa7c1 3504 if (likely(!bnx2_has_work(bnapi))) {
288379f0 3505 napi_complete(napi);
f86e82fb 3506 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
6f535763
DM
3507 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3508 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3509 bnapi->last_status_idx);
6dee6421 3510 break;
6f535763 3511 }
1269a8a6
MC
3512 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3513 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 3514 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 3515 bnapi->last_status_idx);
1269a8a6 3516
6f535763
DM
3517 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3518 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3519 bnapi->last_status_idx);
6f535763
DM
3520 break;
3521 }
b6016b76
MC
3522 }
3523
bea3348e 3524 return work_done;
b6016b76
MC
3525}
3526
932ff279 3527/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
3528 * from set_multicast.
3529 */
3530static void
3531bnx2_set_rx_mode(struct net_device *dev)
3532{
972ec0d4 3533 struct bnx2 *bp = netdev_priv(dev);
b6016b76 3534 u32 rx_mode, sort_mode;
ccffad25 3535 struct netdev_hw_addr *ha;
b6016b76 3536 int i;
b6016b76 3537
9f52b564
MC
3538 if (!netif_running(dev))
3539 return;
3540
c770a65c 3541 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
3542
3543 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3544 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3545 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
7d0fd211
JG
3546 if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3547 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
b6016b76 3548 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
3549 if (dev->flags & IFF_PROMISC) {
3550 /* Promiscuous mode. */
3551 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
3552 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3553 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
3554 }
3555 else if (dev->flags & IFF_ALLMULTI) {
3556 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3557 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3558 0xffffffff);
3559 }
3560 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3561 }
3562 else {
3563 /* Accept one or more multicast(s). */
b6016b76
MC
3564 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3565 u32 regidx;
3566 u32 bit;
3567 u32 crc;
3568
3569 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3570
22bedad3
JP
3571 netdev_for_each_mc_addr(ha, dev) {
3572 crc = ether_crc_le(ETH_ALEN, ha->addr);
b6016b76
MC
3573 bit = crc & 0xff;
3574 regidx = (bit & 0xe0) >> 5;
3575 bit &= 0x1f;
3576 mc_filter[regidx] |= (1 << bit);
3577 }
3578
3579 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3580 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3581 mc_filter[i]);
3582 }
3583
3584 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3585 }
3586
32e7bfc4 3587 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
5fcaed01
BL
3588 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3589 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3590 BNX2_RPM_SORT_USER0_PROM_VLAN;
3591 } else if (!(dev->flags & IFF_PROMISC)) {
5fcaed01 3592 /* Add all entries into to the match filter list */
ccffad25 3593 i = 0;
32e7bfc4 3594 netdev_for_each_uc_addr(ha, dev) {
ccffad25 3595 bnx2_set_mac_addr(bp, ha->addr,
5fcaed01
BL
3596 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3597 sort_mode |= (1 <<
3598 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
ccffad25 3599 i++;
5fcaed01
BL
3600 }
3601
3602 }
3603
b6016b76
MC
3604 if (rx_mode != bp->rx_mode) {
3605 bp->rx_mode = rx_mode;
3606 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3607 }
3608
3609 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3610 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3611 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3612
c770a65c 3613 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3614}
3615
7880b72e 3616static int
57579f76
MC
3617check_fw_section(const struct firmware *fw,
3618 const struct bnx2_fw_file_section *section,
3619 u32 alignment, bool non_empty)
3620{
3621 u32 offset = be32_to_cpu(section->offset);
3622 u32 len = be32_to_cpu(section->len);
3623
3624 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3625 return -EINVAL;
3626 if ((non_empty && len == 0) || len > fw->size - offset ||
3627 len & (alignment - 1))
3628 return -EINVAL;
3629 return 0;
3630}
3631
7880b72e 3632static int
57579f76
MC
3633check_mips_fw_entry(const struct firmware *fw,
3634 const struct bnx2_mips_fw_file_entry *entry)
3635{
3636 if (check_fw_section(fw, &entry->text, 4, true) ||
3637 check_fw_section(fw, &entry->data, 4, false) ||
3638 check_fw_section(fw, &entry->rodata, 4, false))
3639 return -EINVAL;
3640 return 0;
3641}
3642
7880b72e 3643static void bnx2_release_firmware(struct bnx2 *bp)
3644{
3645 if (bp->rv2p_firmware) {
3646 release_firmware(bp->mips_firmware);
3647 release_firmware(bp->rv2p_firmware);
3648 bp->rv2p_firmware = NULL;
3649 }
3650}
3651
3652static int bnx2_request_uncached_firmware(struct bnx2 *bp)
b6016b76 3653{
57579f76 3654 const char *mips_fw_file, *rv2p_fw_file;
5ee1c326
BB
3655 const struct bnx2_mips_fw_file *mips_fw;
3656 const struct bnx2_rv2p_fw_file *rv2p_fw;
57579f76
MC
3657 int rc;
3658
3659 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3660 mips_fw_file = FW_MIPS_FILE_09;
078b0735
MC
3661 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3662 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3663 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3664 else
3665 rv2p_fw_file = FW_RV2P_FILE_09;
57579f76
MC
3666 } else {
3667 mips_fw_file = FW_MIPS_FILE_06;
3668 rv2p_fw_file = FW_RV2P_FILE_06;
3669 }
3670
3671 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3672 if (rc) {
3a9c6a49 3673 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
7880b72e 3674 goto out;
57579f76
MC
3675 }
3676
3677 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3678 if (rc) {
3a9c6a49 3679 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
7880b72e 3680 goto err_release_mips_firmware;
57579f76 3681 }
5ee1c326
BB
3682 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3683 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3684 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3685 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3686 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3687 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3688 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3689 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3a9c6a49 3690 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
7880b72e 3691 rc = -EINVAL;
3692 goto err_release_firmware;
57579f76 3693 }
5ee1c326
BB
3694 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3695 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3696 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3a9c6a49 3697 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
7880b72e 3698 rc = -EINVAL;
3699 goto err_release_firmware;
57579f76 3700 }
7880b72e 3701out:
3702 return rc;
57579f76 3703
7880b72e 3704err_release_firmware:
3705 release_firmware(bp->rv2p_firmware);
3706 bp->rv2p_firmware = NULL;
3707err_release_mips_firmware:
3708 release_firmware(bp->mips_firmware);
3709 goto out;
3710}
3711
3712static int bnx2_request_firmware(struct bnx2 *bp)
3713{
3714 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
57579f76
MC
3715}
3716
3717static u32
3718rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3719{
3720 switch (idx) {
3721 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3722 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3723 rv2p_code |= RV2P_BD_PAGE_SIZE;
3724 break;
3725 }
3726 return rv2p_code;
3727}
3728
3729static int
3730load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3731 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3732{
3733 u32 rv2p_code_len, file_offset;
3734 __be32 *rv2p_code;
b6016b76 3735 int i;
57579f76
MC
3736 u32 val, cmd, addr;
3737
3738 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3739 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3740
3741 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
b6016b76 3742
57579f76
MC
3743 if (rv2p_proc == RV2P_PROC1) {
3744 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3745 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3746 } else {
3747 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3748 addr = BNX2_RV2P_PROC2_ADDR_CMD;
d25be1d3 3749 }
b6016b76
MC
3750
3751 for (i = 0; i < rv2p_code_len; i += 8) {
57579f76 3752 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
b6016b76 3753 rv2p_code++;
57579f76 3754 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
b6016b76
MC
3755 rv2p_code++;
3756
57579f76
MC
3757 val = (i / 8) | cmd;
3758 REG_WR(bp, addr, val);
3759 }
3760
3761 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3762 for (i = 0; i < 8; i++) {
3763 u32 loc, code;
3764
3765 loc = be32_to_cpu(fw_entry->fixup[i]);
3766 if (loc && ((loc * 4) < rv2p_code_len)) {
3767 code = be32_to_cpu(*(rv2p_code + loc - 1));
3768 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3769 code = be32_to_cpu(*(rv2p_code + loc));
3770 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3771 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3772
3773 val = (loc / 2) | cmd;
3774 REG_WR(bp, addr, val);
b6016b76
MC
3775 }
3776 }
3777
3778 /* Reset the processor, un-stall is done later. */
3779 if (rv2p_proc == RV2P_PROC1) {
3780 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3781 }
3782 else {
3783 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3784 }
57579f76
MC
3785
3786 return 0;
b6016b76
MC
3787}
3788
af3ee519 3789static int
57579f76
MC
3790load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3791 const struct bnx2_mips_fw_file_entry *fw_entry)
b6016b76 3792{
57579f76
MC
3793 u32 addr, len, file_offset;
3794 __be32 *data;
b6016b76
MC
3795 u32 offset;
3796 u32 val;
3797
3798 /* Halt the CPU. */
2726d6e1 3799 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3800 val |= cpu_reg->mode_value_halt;
2726d6e1
MC
3801 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3802 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
b6016b76
MC
3803
3804 /* Load the Text area. */
57579f76
MC
3805 addr = be32_to_cpu(fw_entry->text.addr);
3806 len = be32_to_cpu(fw_entry->text.len);
3807 file_offset = be32_to_cpu(fw_entry->text.offset);
3808 data = (__be32 *)(bp->mips_firmware->data + file_offset);
ea1f8d5c 3809
57579f76
MC
3810 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3811 if (len) {
b6016b76
MC
3812 int j;
3813
57579f76
MC
3814 for (j = 0; j < (len / 4); j++, offset += 4)
3815 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3816 }
3817
57579f76
MC
3818 /* Load the Data area. */
3819 addr = be32_to_cpu(fw_entry->data.addr);
3820 len = be32_to_cpu(fw_entry->data.len);
3821 file_offset = be32_to_cpu(fw_entry->data.offset);
3822 data = (__be32 *)(bp->mips_firmware->data + file_offset);
b6016b76 3823
57579f76
MC
3824 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3825 if (len) {
b6016b76
MC
3826 int j;
3827
57579f76
MC
3828 for (j = 0; j < (len / 4); j++, offset += 4)
3829 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3830 }
3831
3832 /* Load the Read-Only area. */
57579f76
MC
3833 addr = be32_to_cpu(fw_entry->rodata.addr);
3834 len = be32_to_cpu(fw_entry->rodata.len);
3835 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3836 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3837
3838 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3839 if (len) {
b6016b76
MC
3840 int j;
3841
57579f76
MC
3842 for (j = 0; j < (len / 4); j++, offset += 4)
3843 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3844 }
3845
3846 /* Clear the pre-fetch instruction. */
2726d6e1 3847 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
57579f76
MC
3848
3849 val = be32_to_cpu(fw_entry->start_addr);
3850 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
b6016b76
MC
3851
3852 /* Start the CPU. */
2726d6e1 3853 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3854 val &= ~cpu_reg->mode_value_halt;
2726d6e1
MC
3855 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3856 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
af3ee519
MC
3857
3858 return 0;
b6016b76
MC
3859}
3860
fba9fe91 3861static int
b6016b76
MC
3862bnx2_init_cpus(struct bnx2 *bp)
3863{
57579f76
MC
3864 const struct bnx2_mips_fw_file *mips_fw =
3865 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3866 const struct bnx2_rv2p_fw_file *rv2p_fw =
3867 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3868 int rc;
b6016b76
MC
3869
3870 /* Initialize the RV2P processor. */
57579f76
MC
3871 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3872 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
b6016b76
MC
3873
3874 /* Initialize the RX Processor. */
57579f76 3875 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
fba9fe91
MC
3876 if (rc)
3877 goto init_cpu_err;
3878
b6016b76 3879 /* Initialize the TX Processor. */
57579f76 3880 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
fba9fe91
MC
3881 if (rc)
3882 goto init_cpu_err;
3883
b6016b76 3884 /* Initialize the TX Patch-up Processor. */
57579f76 3885 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
fba9fe91
MC
3886 if (rc)
3887 goto init_cpu_err;
3888
b6016b76 3889 /* Initialize the Completion Processor. */
57579f76 3890 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
fba9fe91
MC
3891 if (rc)
3892 goto init_cpu_err;
3893
d43584c8 3894 /* Initialize the Command Processor. */
57579f76 3895 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
b6016b76 3896
fba9fe91 3897init_cpu_err:
fba9fe91 3898 return rc;
b6016b76
MC
3899}
3900
3901static int
829ca9a3 3902bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3903{
3904 u16 pmcsr;
3905
3906 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3907
3908 switch (state) {
829ca9a3 3909 case PCI_D0: {
b6016b76
MC
3910 u32 val;
3911
3912 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3913 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3914 PCI_PM_CTRL_PME_STATUS);
3915
3916 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3917 /* delay required during transition out of D3hot */
3918 msleep(20);
3919
3920 val = REG_RD(bp, BNX2_EMAC_MODE);
3921 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3922 val &= ~BNX2_EMAC_MODE_MPKT;
3923 REG_WR(bp, BNX2_EMAC_MODE, val);
3924
3925 val = REG_RD(bp, BNX2_RPM_CONFIG);
3926 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3927 REG_WR(bp, BNX2_RPM_CONFIG, val);
3928 break;
3929 }
829ca9a3 3930 case PCI_D3hot: {
b6016b76
MC
3931 int i;
3932 u32 val, wol_msg;
3933
3934 if (bp->wol) {
3935 u32 advertising;
3936 u8 autoneg;
3937
3938 autoneg = bp->autoneg;
3939 advertising = bp->advertising;
3940
239cd343
MC
3941 if (bp->phy_port == PORT_TP) {
3942 bp->autoneg = AUTONEG_SPEED;
3943 bp->advertising = ADVERTISED_10baseT_Half |
3944 ADVERTISED_10baseT_Full |
3945 ADVERTISED_100baseT_Half |
3946 ADVERTISED_100baseT_Full |
3947 ADVERTISED_Autoneg;
3948 }
b6016b76 3949
239cd343
MC
3950 spin_lock_bh(&bp->phy_lock);
3951 bnx2_setup_phy(bp, bp->phy_port);
3952 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3953
3954 bp->autoneg = autoneg;
3955 bp->advertising = advertising;
3956
5fcaed01 3957 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
3958
3959 val = REG_RD(bp, BNX2_EMAC_MODE);
3960
3961 /* Enable port mode. */
3962 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3963 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3964 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3965 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3966 if (bp->phy_port == PORT_TP)
3967 val |= BNX2_EMAC_MODE_PORT_MII;
3968 else {
3969 val |= BNX2_EMAC_MODE_PORT_GMII;
3970 if (bp->line_speed == SPEED_2500)
3971 val |= BNX2_EMAC_MODE_25G_MODE;
3972 }
b6016b76
MC
3973
3974 REG_WR(bp, BNX2_EMAC_MODE, val);
3975
3976 /* receive all multicast */
3977 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3978 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3979 0xffffffff);
3980 }
3981 REG_WR(bp, BNX2_EMAC_RX_MODE,
3982 BNX2_EMAC_RX_MODE_SORT_MODE);
3983
3984 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3985 BNX2_RPM_SORT_USER0_MC_EN;
3986 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3987 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3988 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3989 BNX2_RPM_SORT_USER0_ENA);
3990
3991 /* Need to enable EMAC and RPM for WOL. */
3992 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3993 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3994 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3995 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3996
3997 val = REG_RD(bp, BNX2_RPM_CONFIG);
3998 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3999 REG_WR(bp, BNX2_RPM_CONFIG, val);
4000
4001 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4002 }
4003 else {
4004 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4005 }
4006
f86e82fb 4007 if (!(bp->flags & BNX2_FLAG_NO_WOL))
a2f13890
MC
4008 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4009 1, 0);
b6016b76
MC
4010
4011 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4012 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4013 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4014
4015 if (bp->wol)
4016 pmcsr |= 3;
4017 }
4018 else {
4019 pmcsr |= 3;
4020 }
4021 if (bp->wol) {
4022 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4023 }
4024 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4025 pmcsr);
4026
4027 /* No more memory access after this point until
4028 * device is brought back to D0.
4029 */
4030 udelay(50);
4031 break;
4032 }
4033 default:
4034 return -EINVAL;
4035 }
4036 return 0;
4037}
4038
4039static int
4040bnx2_acquire_nvram_lock(struct bnx2 *bp)
4041{
4042 u32 val;
4043 int j;
4044
4045 /* Request access to the flash interface. */
4046 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4047 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4048 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4049 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4050 break;
4051
4052 udelay(5);
4053 }
4054
4055 if (j >= NVRAM_TIMEOUT_COUNT)
4056 return -EBUSY;
4057
4058 return 0;
4059}
4060
4061static int
4062bnx2_release_nvram_lock(struct bnx2 *bp)
4063{
4064 int j;
4065 u32 val;
4066
4067 /* Relinquish nvram interface. */
4068 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4069
4070 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4071 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4072 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4073 break;
4074
4075 udelay(5);
4076 }
4077
4078 if (j >= NVRAM_TIMEOUT_COUNT)
4079 return -EBUSY;
4080
4081 return 0;
4082}
4083
4084
4085static int
4086bnx2_enable_nvram_write(struct bnx2 *bp)
4087{
4088 u32 val;
4089
4090 val = REG_RD(bp, BNX2_MISC_CFG);
4091 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4092
e30372c9 4093 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
4094 int j;
4095
4096 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4097 REG_WR(bp, BNX2_NVM_COMMAND,
4098 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4099
4100 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4101 udelay(5);
4102
4103 val = REG_RD(bp, BNX2_NVM_COMMAND);
4104 if (val & BNX2_NVM_COMMAND_DONE)
4105 break;
4106 }
4107
4108 if (j >= NVRAM_TIMEOUT_COUNT)
4109 return -EBUSY;
4110 }
4111 return 0;
4112}
4113
4114static void
4115bnx2_disable_nvram_write(struct bnx2 *bp)
4116{
4117 u32 val;
4118
4119 val = REG_RD(bp, BNX2_MISC_CFG);
4120 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4121}
4122
4123
4124static void
4125bnx2_enable_nvram_access(struct bnx2 *bp)
4126{
4127 u32 val;
4128
4129 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4130 /* Enable both bits, even on read. */
6aa20a22 4131 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4132 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4133}
4134
4135static void
4136bnx2_disable_nvram_access(struct bnx2 *bp)
4137{
4138 u32 val;
4139
4140 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4141 /* Disable both bits, even after read. */
6aa20a22 4142 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4143 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4144 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4145}
4146
4147static int
4148bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4149{
4150 u32 cmd;
4151 int j;
4152
e30372c9 4153 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
4154 /* Buffered flash, no erase needed */
4155 return 0;
4156
4157 /* Build an erase command */
4158 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4159 BNX2_NVM_COMMAND_DOIT;
4160
4161 /* Need to clear DONE bit separately. */
4162 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4163
4164 /* Address of the NVRAM to read from. */
4165 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4166
4167 /* Issue an erase command. */
4168 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4169
4170 /* Wait for completion. */
4171 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4172 u32 val;
4173
4174 udelay(5);
4175
4176 val = REG_RD(bp, BNX2_NVM_COMMAND);
4177 if (val & BNX2_NVM_COMMAND_DONE)
4178 break;
4179 }
4180
4181 if (j >= NVRAM_TIMEOUT_COUNT)
4182 return -EBUSY;
4183
4184 return 0;
4185}
4186
4187static int
4188bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4189{
4190 u32 cmd;
4191 int j;
4192
4193 /* Build the command word. */
4194 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4195
e30372c9
MC
4196 /* Calculate an offset of a buffered flash, not needed for 5709. */
4197 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4198 offset = ((offset / bp->flash_info->page_size) <<
4199 bp->flash_info->page_bits) +
4200 (offset % bp->flash_info->page_size);
4201 }
4202
4203 /* Need to clear DONE bit separately. */
4204 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4205
4206 /* Address of the NVRAM to read from. */
4207 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4208
4209 /* Issue a read command. */
4210 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4211
4212 /* Wait for completion. */
4213 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4214 u32 val;
4215
4216 udelay(5);
4217
4218 val = REG_RD(bp, BNX2_NVM_COMMAND);
4219 if (val & BNX2_NVM_COMMAND_DONE) {
b491edd5
AV
4220 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4221 memcpy(ret_val, &v, 4);
b6016b76
MC
4222 break;
4223 }
4224 }
4225 if (j >= NVRAM_TIMEOUT_COUNT)
4226 return -EBUSY;
4227
4228 return 0;
4229}
4230
4231
4232static int
4233bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4234{
b491edd5
AV
4235 u32 cmd;
4236 __be32 val32;
b6016b76
MC
4237 int j;
4238
4239 /* Build the command word. */
4240 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4241
e30372c9
MC
4242 /* Calculate an offset of a buffered flash, not needed for 5709. */
4243 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4244 offset = ((offset / bp->flash_info->page_size) <<
4245 bp->flash_info->page_bits) +
4246 (offset % bp->flash_info->page_size);
4247 }
4248
4249 /* Need to clear DONE bit separately. */
4250 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4251
4252 memcpy(&val32, val, 4);
b6016b76
MC
4253
4254 /* Write the data. */
b491edd5 4255 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
b6016b76
MC
4256
4257 /* Address of the NVRAM to write to. */
4258 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4259
4260 /* Issue the write command. */
4261 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4262
4263 /* Wait for completion. */
4264 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4265 udelay(5);
4266
4267 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4268 break;
4269 }
4270 if (j >= NVRAM_TIMEOUT_COUNT)
4271 return -EBUSY;
4272
4273 return 0;
4274}
4275
4276static int
4277bnx2_init_nvram(struct bnx2 *bp)
4278{
4279 u32 val;
e30372c9 4280 int j, entry_count, rc = 0;
0ced9d01 4281 const struct flash_spec *flash;
b6016b76 4282
e30372c9
MC
4283 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4284 bp->flash_info = &flash_5709;
4285 goto get_flash_size;
4286 }
4287
b6016b76
MC
4288 /* Determine the selected interface. */
4289 val = REG_RD(bp, BNX2_NVM_CFG1);
4290
ff8ac609 4291 entry_count = ARRAY_SIZE(flash_table);
b6016b76 4292
b6016b76
MC
4293 if (val & 0x40000000) {
4294
4295 /* Flash interface has been reconfigured */
4296 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
4297 j++, flash++) {
4298 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4299 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
4300 bp->flash_info = flash;
4301 break;
4302 }
4303 }
4304 }
4305 else {
37137709 4306 u32 mask;
b6016b76
MC
4307 /* Not yet been reconfigured */
4308
37137709
MC
4309 if (val & (1 << 23))
4310 mask = FLASH_BACKUP_STRAP_MASK;
4311 else
4312 mask = FLASH_STRAP_MASK;
4313
b6016b76
MC
4314 for (j = 0, flash = &flash_table[0]; j < entry_count;
4315 j++, flash++) {
4316
37137709 4317 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
4318 bp->flash_info = flash;
4319
4320 /* Request access to the flash interface. */
4321 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4322 return rc;
4323
4324 /* Enable access to flash interface */
4325 bnx2_enable_nvram_access(bp);
4326
4327 /* Reconfigure the flash interface */
4328 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4329 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4330 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4331 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4332
4333 /* Disable access to flash interface */
4334 bnx2_disable_nvram_access(bp);
4335 bnx2_release_nvram_lock(bp);
4336
4337 break;
4338 }
4339 }
4340 } /* if (val & 0x40000000) */
4341
4342 if (j == entry_count) {
4343 bp->flash_info = NULL;
3a9c6a49 4344 pr_alert("Unknown flash/EEPROM type\n");
1122db71 4345 return -ENODEV;
b6016b76
MC
4346 }
4347
e30372c9 4348get_flash_size:
2726d6e1 4349 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
1122db71
MC
4350 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4351 if (val)
4352 bp->flash_size = val;
4353 else
4354 bp->flash_size = bp->flash_info->total_size;
4355
b6016b76
MC
4356 return rc;
4357}
4358
4359static int
4360bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4361 int buf_size)
4362{
4363 int rc = 0;
4364 u32 cmd_flags, offset32, len32, extra;
4365
4366 if (buf_size == 0)
4367 return 0;
4368
4369 /* Request access to the flash interface. */
4370 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4371 return rc;
4372
4373 /* Enable access to flash interface */
4374 bnx2_enable_nvram_access(bp);
4375
4376 len32 = buf_size;
4377 offset32 = offset;
4378 extra = 0;
4379
4380 cmd_flags = 0;
4381
4382 if (offset32 & 3) {
4383 u8 buf[4];
4384 u32 pre_len;
4385
4386 offset32 &= ~3;
4387 pre_len = 4 - (offset & 3);
4388
4389 if (pre_len >= len32) {
4390 pre_len = len32;
4391 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4392 BNX2_NVM_COMMAND_LAST;
4393 }
4394 else {
4395 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4396 }
4397
4398 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4399
4400 if (rc)
4401 return rc;
4402
4403 memcpy(ret_buf, buf + (offset & 3), pre_len);
4404
4405 offset32 += 4;
4406 ret_buf += pre_len;
4407 len32 -= pre_len;
4408 }
4409 if (len32 & 3) {
4410 extra = 4 - (len32 & 3);
4411 len32 = (len32 + 4) & ~3;
4412 }
4413
4414 if (len32 == 4) {
4415 u8 buf[4];
4416
4417 if (cmd_flags)
4418 cmd_flags = BNX2_NVM_COMMAND_LAST;
4419 else
4420 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4421 BNX2_NVM_COMMAND_LAST;
4422
4423 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4424
4425 memcpy(ret_buf, buf, 4 - extra);
4426 }
4427 else if (len32 > 0) {
4428 u8 buf[4];
4429
4430 /* Read the first word. */
4431 if (cmd_flags)
4432 cmd_flags = 0;
4433 else
4434 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4435
4436 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4437
4438 /* Advance to the next dword. */
4439 offset32 += 4;
4440 ret_buf += 4;
4441 len32 -= 4;
4442
4443 while (len32 > 4 && rc == 0) {
4444 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4445
4446 /* Advance to the next dword. */
4447 offset32 += 4;
4448 ret_buf += 4;
4449 len32 -= 4;
4450 }
4451
4452 if (rc)
4453 return rc;
4454
4455 cmd_flags = BNX2_NVM_COMMAND_LAST;
4456 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4457
4458 memcpy(ret_buf, buf, 4 - extra);
4459 }
4460
4461 /* Disable access to flash interface */
4462 bnx2_disable_nvram_access(bp);
4463
4464 bnx2_release_nvram_lock(bp);
4465
4466 return rc;
4467}
4468
4469static int
4470bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4471 int buf_size)
4472{
4473 u32 written, offset32, len32;
e6be763f 4474 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
4475 int rc = 0;
4476 int align_start, align_end;
4477
4478 buf = data_buf;
4479 offset32 = offset;
4480 len32 = buf_size;
4481 align_start = align_end = 0;
4482
4483 if ((align_start = (offset32 & 3))) {
4484 offset32 &= ~3;
c873879c
MC
4485 len32 += align_start;
4486 if (len32 < 4)
4487 len32 = 4;
b6016b76
MC
4488 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4489 return rc;
4490 }
4491
4492 if (len32 & 3) {
c873879c
MC
4493 align_end = 4 - (len32 & 3);
4494 len32 += align_end;
4495 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4496 return rc;
b6016b76
MC
4497 }
4498
4499 if (align_start || align_end) {
e6be763f
MC
4500 align_buf = kmalloc(len32, GFP_KERNEL);
4501 if (align_buf == NULL)
b6016b76
MC
4502 return -ENOMEM;
4503 if (align_start) {
e6be763f 4504 memcpy(align_buf, start, 4);
b6016b76
MC
4505 }
4506 if (align_end) {
e6be763f 4507 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 4508 }
e6be763f
MC
4509 memcpy(align_buf + align_start, data_buf, buf_size);
4510 buf = align_buf;
b6016b76
MC
4511 }
4512
e30372c9 4513 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
4514 flash_buffer = kmalloc(264, GFP_KERNEL);
4515 if (flash_buffer == NULL) {
4516 rc = -ENOMEM;
4517 goto nvram_write_end;
4518 }
4519 }
4520
b6016b76
MC
4521 written = 0;
4522 while ((written < len32) && (rc == 0)) {
4523 u32 page_start, page_end, data_start, data_end;
4524 u32 addr, cmd_flags;
4525 int i;
b6016b76
MC
4526
4527 /* Find the page_start addr */
4528 page_start = offset32 + written;
4529 page_start -= (page_start % bp->flash_info->page_size);
4530 /* Find the page_end addr */
4531 page_end = page_start + bp->flash_info->page_size;
4532 /* Find the data_start addr */
4533 data_start = (written == 0) ? offset32 : page_start;
4534 /* Find the data_end addr */
6aa20a22 4535 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
4536 (offset32 + len32) : page_end;
4537
4538 /* Request access to the flash interface. */
4539 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4540 goto nvram_write_end;
4541
4542 /* Enable access to flash interface */
4543 bnx2_enable_nvram_access(bp);
4544
4545 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 4546 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4547 int j;
4548
4549 /* Read the whole page into the buffer
4550 * (non-buffer flash only) */
4551 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4552 if (j == (bp->flash_info->page_size - 4)) {
4553 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4554 }
4555 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
4556 page_start + j,
4557 &flash_buffer[j],
b6016b76
MC
4558 cmd_flags);
4559
4560 if (rc)
4561 goto nvram_write_end;
4562
4563 cmd_flags = 0;
4564 }
4565 }
4566
4567 /* Enable writes to flash interface (unlock write-protect) */
4568 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4569 goto nvram_write_end;
4570
b6016b76
MC
4571 /* Loop to write back the buffer data from page_start to
4572 * data_start */
4573 i = 0;
e30372c9 4574 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
4575 /* Erase the page */
4576 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4577 goto nvram_write_end;
4578
4579 /* Re-enable the write again for the actual write */
4580 bnx2_enable_nvram_write(bp);
4581
b6016b76
MC
4582 for (addr = page_start; addr < data_start;
4583 addr += 4, i += 4) {
6aa20a22 4584
b6016b76
MC
4585 rc = bnx2_nvram_write_dword(bp, addr,
4586 &flash_buffer[i], cmd_flags);
4587
4588 if (rc != 0)
4589 goto nvram_write_end;
4590
4591 cmd_flags = 0;
4592 }
4593 }
4594
4595 /* Loop to write the new data from data_start to data_end */
bae25761 4596 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 4597 if ((addr == page_end - 4) ||
e30372c9 4598 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
4599 (addr == data_end - 4))) {
4600
4601 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4602 }
4603 rc = bnx2_nvram_write_dword(bp, addr, buf,
4604 cmd_flags);
4605
4606 if (rc != 0)
4607 goto nvram_write_end;
4608
4609 cmd_flags = 0;
4610 buf += 4;
4611 }
4612
4613 /* Loop to write back the buffer data from data_end
4614 * to page_end */
e30372c9 4615 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4616 for (addr = data_end; addr < page_end;
4617 addr += 4, i += 4) {
6aa20a22 4618
b6016b76
MC
4619 if (addr == page_end-4) {
4620 cmd_flags = BNX2_NVM_COMMAND_LAST;
4621 }
4622 rc = bnx2_nvram_write_dword(bp, addr,
4623 &flash_buffer[i], cmd_flags);
4624
4625 if (rc != 0)
4626 goto nvram_write_end;
4627
4628 cmd_flags = 0;
4629 }
4630 }
4631
4632 /* Disable writes to flash interface (lock write-protect) */
4633 bnx2_disable_nvram_write(bp);
4634
4635 /* Disable access to flash interface */
4636 bnx2_disable_nvram_access(bp);
4637 bnx2_release_nvram_lock(bp);
4638
4639 /* Increment written */
4640 written += data_end - data_start;
4641 }
4642
4643nvram_write_end:
e6be763f
MC
4644 kfree(flash_buffer);
4645 kfree(align_buf);
b6016b76
MC
4646 return rc;
4647}
4648
0d8a6571 4649static void
7c62e83b 4650bnx2_init_fw_cap(struct bnx2 *bp)
0d8a6571 4651{
7c62e83b 4652 u32 val, sig = 0;
0d8a6571 4653
583c28e5 4654 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
7c62e83b
MC
4655 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4656
4657 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4658 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
0d8a6571 4659
2726d6e1 4660 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
0d8a6571
MC
4661 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4662 return;
4663
7c62e83b
MC
4664 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4665 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4666 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4667 }
4668
4669 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4670 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4671 u32 link;
4672
583c28e5 4673 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
0d8a6571 4674
7c62e83b
MC
4675 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4676 if (link & BNX2_LINK_STATUS_SERDES_LINK)
0d8a6571
MC
4677 bp->phy_port = PORT_FIBRE;
4678 else
4679 bp->phy_port = PORT_TP;
489310a4 4680
7c62e83b
MC
4681 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4682 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
0d8a6571 4683 }
7c62e83b
MC
4684
4685 if (netif_running(bp->dev) && sig)
4686 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
0d8a6571
MC
4687}
4688
b4b36042
MC
4689static void
4690bnx2_setup_msix_tbl(struct bnx2 *bp)
4691{
4692 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4693
4694 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4695 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4696}
4697
b6016b76
MC
4698static int
4699bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4700{
4701 u32 val;
4702 int i, rc = 0;
489310a4 4703 u8 old_port;
b6016b76
MC
4704
4705 /* Wait for the current PCI transaction to complete before
4706 * issuing a reset. */
a5dac108
EW
4707 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4708 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4709 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4710 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4711 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4712 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4713 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4714 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4715 udelay(5);
4716 } else { /* 5709 */
4717 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4718 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4719 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4720 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4721
4722 for (i = 0; i < 100; i++) {
4723 msleep(1);
4724 val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4725 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4726 break;
4727 }
4728 }
b6016b76 4729
b090ae2b 4730 /* Wait for the firmware to tell us it is ok to issue a reset. */
a2f13890 4731 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
b090ae2b 4732
b6016b76
MC
4733 /* Deposit a driver reset signature so the firmware knows that
4734 * this is a soft reset. */
2726d6e1
MC
4735 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4736 BNX2_DRV_RESET_SIGNATURE_MAGIC);
b6016b76 4737
b6016b76
MC
4738 /* Do a dummy read to force the chip to complete all current transaction
4739 * before we issue a reset. */
4740 val = REG_RD(bp, BNX2_MISC_ID);
4741
234754d5
MC
4742 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4743 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4744 REG_RD(bp, BNX2_MISC_COMMAND);
4745 udelay(5);
b6016b76 4746
234754d5
MC
4747 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4748 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4749
be7ff1af 4750 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4751
234754d5
MC
4752 } else {
4753 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4754 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4755 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4756
4757 /* Chip reset. */
4758 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4759
594a9dfa
MC
4760 /* Reading back any register after chip reset will hang the
4761 * bus on 5706 A0 and A1. The msleep below provides plenty
4762 * of margin for write posting.
4763 */
234754d5 4764 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4765 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4766 msleep(20);
b6016b76 4767
234754d5
MC
4768 /* Reset takes approximate 30 usec */
4769 for (i = 0; i < 10; i++) {
4770 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4771 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4772 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4773 break;
4774 udelay(10);
4775 }
4776
4777 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4778 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3a9c6a49 4779 pr_err("Chip reset did not complete\n");
234754d5
MC
4780 return -EBUSY;
4781 }
b6016b76
MC
4782 }
4783
4784 /* Make sure byte swapping is properly configured. */
4785 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4786 if (val != 0x01020304) {
3a9c6a49 4787 pr_err("Chip not in correct endian mode\n");
b6016b76
MC
4788 return -ENODEV;
4789 }
4790
b6016b76 4791 /* Wait for the firmware to finish its initialization. */
a2f13890 4792 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
b090ae2b
MC
4793 if (rc)
4794 return rc;
b6016b76 4795
0d8a6571 4796 spin_lock_bh(&bp->phy_lock);
489310a4 4797 old_port = bp->phy_port;
7c62e83b 4798 bnx2_init_fw_cap(bp);
583c28e5
MC
4799 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4800 old_port != bp->phy_port)
0d8a6571
MC
4801 bnx2_set_default_remote_link(bp);
4802 spin_unlock_bh(&bp->phy_lock);
4803
b6016b76
MC
4804 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4805 /* Adjust the voltage regular to two steps lower. The default
4806 * of this register is 0x0000000e. */
4807 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4808
4809 /* Remove bad rbuf memory from the free pool. */
4810 rc = bnx2_alloc_bad_rbuf(bp);
4811 }
4812
c441b8d2 4813 if (bp->flags & BNX2_FLAG_USING_MSIX) {
b4b36042 4814 bnx2_setup_msix_tbl(bp);
c441b8d2
MC
4815 /* Prevent MSIX table reads and write from timing out */
4816 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4817 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4818 }
b4b36042 4819
b6016b76
MC
4820 return rc;
4821}
4822
4823static int
4824bnx2_init_chip(struct bnx2 *bp)
4825{
d8026d93 4826 u32 val, mtu;
b4b36042 4827 int rc, i;
b6016b76
MC
4828
4829 /* Make sure the interrupt is not active. */
4830 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4831
4832 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4833 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4834#ifdef __BIG_ENDIAN
6aa20a22 4835 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4836#endif
6aa20a22 4837 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4838 DMA_READ_CHANS << 12 |
4839 DMA_WRITE_CHANS << 16;
4840
4841 val |= (0x2 << 20) | (1 << 11);
4842
f86e82fb 4843 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4844 val |= (1 << 23);
4845
4846 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
f86e82fb 4847 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
b6016b76
MC
4848 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4849
4850 REG_WR(bp, BNX2_DMA_CONFIG, val);
4851
4852 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4853 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4854 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4855 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4856 }
4857
f86e82fb 4858 if (bp->flags & BNX2_FLAG_PCIX) {
b6016b76
MC
4859 u16 val16;
4860
4861 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4862 &val16);
4863 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4864 val16 & ~PCI_X_CMD_ERO);
4865 }
4866
4867 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4868 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4869 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4870 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4871
4872 /* Initialize context mapping and zero out the quick contexts. The
4873 * context block must have already been enabled. */
641bdcd5
MC
4874 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4875 rc = bnx2_init_5709_context(bp);
4876 if (rc)
4877 return rc;
4878 } else
59b47d8a 4879 bnx2_init_context(bp);
b6016b76 4880
fba9fe91
MC
4881 if ((rc = bnx2_init_cpus(bp)) != 0)
4882 return rc;
4883
b6016b76
MC
4884 bnx2_init_nvram(bp);
4885
5fcaed01 4886 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
4887
4888 val = REG_RD(bp, BNX2_MQ_CONFIG);
4889 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4890 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4edd473f
MC
4891 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4892 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4893 if (CHIP_REV(bp) == CHIP_REV_Ax)
4894 val |= BNX2_MQ_CONFIG_HALT_DIS;
4895 }
68c9f75a 4896
b6016b76
MC
4897 REG_WR(bp, BNX2_MQ_CONFIG, val);
4898
4899 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4900 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4901 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4902
4903 val = (BCM_PAGE_BITS - 8) << 24;
4904 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4905
4906 /* Configure page size. */
4907 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4908 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4909 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4910 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4911
4912 val = bp->mac_addr[0] +
4913 (bp->mac_addr[1] << 8) +
4914 (bp->mac_addr[2] << 16) +
4915 bp->mac_addr[3] +
4916 (bp->mac_addr[4] << 8) +
4917 (bp->mac_addr[5] << 16);
4918 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4919
4920 /* Program the MTU. Also include 4 bytes for CRC32. */
d8026d93
MC
4921 mtu = bp->dev->mtu;
4922 val = mtu + ETH_HLEN + ETH_FCS_LEN;
b6016b76
MC
4923 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4924 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4925 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4926
d8026d93
MC
4927 if (mtu < 1500)
4928 mtu = 1500;
4929
4930 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4931 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4932 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4933
155d5561 4934 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
b4b36042
MC
4935 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4936 bp->bnx2_napi[i].last_status_idx = 0;
4937
efba0180
MC
4938 bp->idle_chk_status_idx = 0xffff;
4939
b6016b76
MC
4940 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4941
4942 /* Set up how to generate a link change interrupt. */
4943 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4944
4945 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4946 (u64) bp->status_blk_mapping & 0xffffffff);
4947 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4948
4949 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4950 (u64) bp->stats_blk_mapping & 0xffffffff);
4951 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4952 (u64) bp->stats_blk_mapping >> 32);
4953
6aa20a22 4954 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4955 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4956
4957 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4958 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4959
4960 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4961 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4962
4963 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4964
4965 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4966
4967 REG_WR(bp, BNX2_HC_COM_TICKS,
4968 (bp->com_ticks_int << 16) | bp->com_ticks);
4969
4970 REG_WR(bp, BNX2_HC_CMD_TICKS,
4971 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4972
61d9e3fa 4973 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
02537b06
MC
4974 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4975 else
7ea6920e 4976 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4977 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4978
4979 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4980 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4981 else {
8e6a72c4
MC
4982 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4983 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4984 }
4985
efde73a3 4986 if (bp->flags & BNX2_FLAG_USING_MSIX) {
c76c0475
MC
4987 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4988 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4989
5e9ad9e1
MC
4990 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4991 }
4992
4993 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
cf7474a6 4994 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5e9ad9e1
MC
4995
4996 REG_WR(bp, BNX2_HC_CONFIG, val);
4997
22fa159d
MC
4998 if (bp->rx_ticks < 25)
4999 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5000 else
5001 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5002
5e9ad9e1
MC
5003 for (i = 1; i < bp->irq_nvecs; i++) {
5004 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5005 BNX2_HC_SB_CONFIG_1;
5006
6f743ca0 5007 REG_WR(bp, base,
c76c0475 5008 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5e9ad9e1 5009 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
c76c0475
MC
5010 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5011
6f743ca0 5012 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
c76c0475
MC
5013 (bp->tx_quick_cons_trip_int << 16) |
5014 bp->tx_quick_cons_trip);
5015
6f743ca0 5016 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
c76c0475
MC
5017 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5018
5e9ad9e1
MC
5019 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5020 (bp->rx_quick_cons_trip_int << 16) |
5021 bp->rx_quick_cons_trip);
8e6a72c4 5022
5e9ad9e1
MC
5023 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5024 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5025 }
8e6a72c4 5026
b6016b76
MC
5027 /* Clear internal stats counters. */
5028 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5029
da3e4fbe 5030 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
5031
5032 /* Initialize the receive filter. */
5033 bnx2_set_rx_mode(bp->dev);
5034
0aa38df7
MC
5035 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5036 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5037 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5038 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5039 }
b090ae2b 5040 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
a2f13890 5041 1, 0);
b6016b76 5042
df149d70 5043 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
5044 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5045
5046 udelay(20);
5047
bf5295bb
MC
5048 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5049
b090ae2b 5050 return rc;
b6016b76
MC
5051}
5052
c76c0475
MC
5053static void
5054bnx2_clear_ring_states(struct bnx2 *bp)
5055{
5056 struct bnx2_napi *bnapi;
35e9010b 5057 struct bnx2_tx_ring_info *txr;
bb4f98ab 5058 struct bnx2_rx_ring_info *rxr;
c76c0475
MC
5059 int i;
5060
5061 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5062 bnapi = &bp->bnx2_napi[i];
35e9010b 5063 txr = &bnapi->tx_ring;
bb4f98ab 5064 rxr = &bnapi->rx_ring;
c76c0475 5065
35e9010b
MC
5066 txr->tx_cons = 0;
5067 txr->hw_tx_cons = 0;
bb4f98ab
MC
5068 rxr->rx_prod_bseq = 0;
5069 rxr->rx_prod = 0;
5070 rxr->rx_cons = 0;
5071 rxr->rx_pg_prod = 0;
5072 rxr->rx_pg_cons = 0;
c76c0475
MC
5073 }
5074}
5075
59b47d8a 5076static void
35e9010b 5077bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
59b47d8a
MC
5078{
5079 u32 val, offset0, offset1, offset2, offset3;
62a8313c 5080 u32 cid_addr = GET_CID_ADDR(cid);
59b47d8a
MC
5081
5082 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5083 offset0 = BNX2_L2CTX_TYPE_XI;
5084 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5085 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5086 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5087 } else {
5088 offset0 = BNX2_L2CTX_TYPE;
5089 offset1 = BNX2_L2CTX_CMD_TYPE;
5090 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5091 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5092 }
5093 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
62a8313c 5094 bnx2_ctx_wr(bp, cid_addr, offset0, val);
59b47d8a
MC
5095
5096 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
62a8313c 5097 bnx2_ctx_wr(bp, cid_addr, offset1, val);
59b47d8a 5098
35e9010b 5099 val = (u64) txr->tx_desc_mapping >> 32;
62a8313c 5100 bnx2_ctx_wr(bp, cid_addr, offset2, val);
59b47d8a 5101
35e9010b 5102 val = (u64) txr->tx_desc_mapping & 0xffffffff;
62a8313c 5103 bnx2_ctx_wr(bp, cid_addr, offset3, val);
59b47d8a 5104}
b6016b76
MC
5105
5106static void
35e9010b 5107bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
b6016b76
MC
5108{
5109 struct tx_bd *txbd;
c76c0475
MC
5110 u32 cid = TX_CID;
5111 struct bnx2_napi *bnapi;
35e9010b 5112 struct bnx2_tx_ring_info *txr;
c76c0475 5113
35e9010b
MC
5114 bnapi = &bp->bnx2_napi[ring_num];
5115 txr = &bnapi->tx_ring;
5116
5117 if (ring_num == 0)
5118 cid = TX_CID;
5119 else
5120 cid = TX_TSS_CID + ring_num - 1;
b6016b76 5121
2f8af120
MC
5122 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5123
35e9010b 5124 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 5125
35e9010b
MC
5126 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5127 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
b6016b76 5128
35e9010b
MC
5129 txr->tx_prod = 0;
5130 txr->tx_prod_bseq = 0;
6aa20a22 5131
35e9010b
MC
5132 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5133 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 5134
35e9010b 5135 bnx2_init_tx_context(bp, cid, txr);
b6016b76
MC
5136}
5137
5138static void
5d5d0015
MC
5139bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5140 int num_rings)
b6016b76 5141{
b6016b76 5142 int i;
5d5d0015 5143 struct rx_bd *rxbd;
6aa20a22 5144
5d5d0015 5145 for (i = 0; i < num_rings; i++) {
13daffa2 5146 int j;
b6016b76 5147
5d5d0015 5148 rxbd = &rx_ring[i][0];
13daffa2 5149 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 5150 rxbd->rx_bd_len = buf_size;
13daffa2
MC
5151 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5152 }
5d5d0015 5153 if (i == (num_rings - 1))
13daffa2
MC
5154 j = 0;
5155 else
5156 j = i + 1;
5d5d0015
MC
5157 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5158 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 5159 }
5d5d0015
MC
5160}
5161
5162static void
bb4f98ab 5163bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5d5d0015
MC
5164{
5165 int i;
5166 u16 prod, ring_prod;
bb4f98ab
MC
5167 u32 cid, rx_cid_addr, val;
5168 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5169 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5170
5171 if (ring_num == 0)
5172 cid = RX_CID;
5173 else
5174 cid = RX_RSS_CID + ring_num - 1;
5175
5176 rx_cid_addr = GET_CID_ADDR(cid);
5d5d0015 5177
bb4f98ab 5178 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5d5d0015
MC
5179 bp->rx_buf_use_size, bp->rx_max_ring);
5180
bb4f98ab 5181 bnx2_init_rx_context(bp, cid);
83e3fc89
MC
5182
5183 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5184 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5185 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5186 }
5187
62a8313c 5188 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246 5189 if (bp->rx_pg_ring_size) {
bb4f98ab
MC
5190 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5191 rxr->rx_pg_desc_mapping,
47bf4246
MC
5192 PAGE_SIZE, bp->rx_max_pg_ring);
5193 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
62a8313c
MC
5194 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5195 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5e9ad9e1 5196 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
47bf4246 5197
bb4f98ab 5198 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
62a8313c 5199 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
47bf4246 5200
bb4f98ab 5201 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
62a8313c 5202 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
47bf4246
MC
5203
5204 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5205 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5206 }
b6016b76 5207
bb4f98ab 5208 val = (u64) rxr->rx_desc_mapping[0] >> 32;
62a8313c 5209 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 5210
bb4f98ab 5211 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
62a8313c 5212 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 5213
bb4f98ab 5214 ring_prod = prod = rxr->rx_pg_prod;
47bf4246 5215 for (i = 0; i < bp->rx_pg_ring_size; i++) {
a2df00aa 5216 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
3a9c6a49
JP
5217 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5218 ring_num, i, bp->rx_pg_ring_size);
47bf4246 5219 break;
b929e53c 5220 }
47bf4246
MC
5221 prod = NEXT_RX_BD(prod);
5222 ring_prod = RX_PG_RING_IDX(prod);
5223 }
bb4f98ab 5224 rxr->rx_pg_prod = prod;
47bf4246 5225
bb4f98ab 5226 ring_prod = prod = rxr->rx_prod;
236b6394 5227 for (i = 0; i < bp->rx_ring_size; i++) {
a2df00aa 5228 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
3a9c6a49
JP
5229 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5230 ring_num, i, bp->rx_ring_size);
b6016b76 5231 break;
b929e53c 5232 }
b6016b76
MC
5233 prod = NEXT_RX_BD(prod);
5234 ring_prod = RX_RING_IDX(prod);
5235 }
bb4f98ab 5236 rxr->rx_prod = prod;
b6016b76 5237
bb4f98ab
MC
5238 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5239 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5240 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
b6016b76 5241
bb4f98ab
MC
5242 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5243 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5244
5245 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
5246}
5247
35e9010b
MC
5248static void
5249bnx2_init_all_rings(struct bnx2 *bp)
5250{
5251 int i;
5e9ad9e1 5252 u32 val;
35e9010b
MC
5253
5254 bnx2_clear_ring_states(bp);
5255
5256 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5257 for (i = 0; i < bp->num_tx_rings; i++)
5258 bnx2_init_tx_ring(bp, i);
5259
5260 if (bp->num_tx_rings > 1)
5261 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5262 (TX_TSS_CID << 7));
5263
5e9ad9e1
MC
5264 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5265 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5266
bb4f98ab
MC
5267 for (i = 0; i < bp->num_rx_rings; i++)
5268 bnx2_init_rx_ring(bp, i);
5e9ad9e1
MC
5269
5270 if (bp->num_rx_rings > 1) {
22fa159d 5271 u32 tbl_32 = 0;
5e9ad9e1
MC
5272
5273 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
22fa159d
MC
5274 int shift = (i % 8) << 2;
5275
5276 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5277 if ((i % 8) == 7) {
5278 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5279 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5280 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5281 BNX2_RLUP_RSS_COMMAND_WRITE |
5282 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5283 tbl_32 = 0;
5284 }
5e9ad9e1
MC
5285 }
5286
5287 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5288 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5289
5290 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5291
5292 }
35e9010b
MC
5293}
5294
5d5d0015 5295static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 5296{
5d5d0015 5297 u32 max, num_rings = 1;
13daffa2 5298
5d5d0015
MC
5299 while (ring_size > MAX_RX_DESC_CNT) {
5300 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
5301 num_rings++;
5302 }
5303 /* round to next power of 2 */
5d5d0015 5304 max = max_size;
13daffa2
MC
5305 while ((max & num_rings) == 0)
5306 max >>= 1;
5307
5308 if (num_rings != max)
5309 max <<= 1;
5310
5d5d0015
MC
5311 return max;
5312}
5313
5314static void
5315bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5316{
84eaa187 5317 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
5318
5319 /* 8 for CRC and VLAN */
d89cb6af 5320 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5d5d0015 5321
84eaa187
MC
5322 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5323 sizeof(struct skb_shared_info);
5324
601d3d18 5325 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
47bf4246
MC
5326 bp->rx_pg_ring_size = 0;
5327 bp->rx_max_pg_ring = 0;
5328 bp->rx_max_pg_ring_idx = 0;
f86e82fb 5329 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
84eaa187
MC
5330 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5331
5332 jumbo_size = size * pages;
5333 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5334 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5335
5336 bp->rx_pg_ring_size = jumbo_size;
5337 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5338 MAX_RX_PG_RINGS);
5339 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
601d3d18 5340 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
84eaa187
MC
5341 bp->rx_copy_thresh = 0;
5342 }
5d5d0015
MC
5343
5344 bp->rx_buf_use_size = rx_size;
5345 /* hw alignment */
5346 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
d89cb6af 5347 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5d5d0015
MC
5348 bp->rx_ring_size = size;
5349 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
5350 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5351}
5352
b6016b76
MC
5353static void
5354bnx2_free_tx_skbs(struct bnx2 *bp)
5355{
5356 int i;
5357
35e9010b
MC
5358 for (i = 0; i < bp->num_tx_rings; i++) {
5359 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5360 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5361 int j;
b6016b76 5362
35e9010b 5363 if (txr->tx_buf_ring == NULL)
b6016b76 5364 continue;
b6016b76 5365
35e9010b 5366 for (j = 0; j < TX_DESC_CNT; ) {
3d16af86 5367 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
35e9010b 5368 struct sk_buff *skb = tx_buf->skb;
e95524a7 5369 int k, last;
35e9010b
MC
5370
5371 if (skb == NULL) {
5372 j++;
5373 continue;
5374 }
5375
36227e88 5376 dma_unmap_single(&bp->pdev->dev,
1a4ccc2d 5377 dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
5378 skb_headlen(skb),
5379 PCI_DMA_TODEVICE);
b6016b76 5380
35e9010b 5381 tx_buf->skb = NULL;
b6016b76 5382
e95524a7
AD
5383 last = tx_buf->nr_frags;
5384 j++;
5385 for (k = 0; k < last; k++, j++) {
5386 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
36227e88 5387 dma_unmap_page(&bp->pdev->dev,
1a4ccc2d 5388 dma_unmap_addr(tx_buf, mapping),
9e903e08 5389 skb_frag_size(&skb_shinfo(skb)->frags[k]),
e95524a7
AD
5390 PCI_DMA_TODEVICE);
5391 }
35e9010b 5392 dev_kfree_skb(skb);
b6016b76 5393 }
b6016b76 5394 }
b6016b76
MC
5395}
5396
5397static void
5398bnx2_free_rx_skbs(struct bnx2 *bp)
5399{
5400 int i;
5401
bb4f98ab
MC
5402 for (i = 0; i < bp->num_rx_rings; i++) {
5403 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5404 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5405 int j;
b6016b76 5406
bb4f98ab
MC
5407 if (rxr->rx_buf_ring == NULL)
5408 return;
b6016b76 5409
bb4f98ab
MC
5410 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5411 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5412 struct sk_buff *skb = rx_buf->skb;
b6016b76 5413
bb4f98ab
MC
5414 if (skb == NULL)
5415 continue;
b6016b76 5416
36227e88 5417 dma_unmap_single(&bp->pdev->dev,
1a4ccc2d 5418 dma_unmap_addr(rx_buf, mapping),
bb4f98ab
MC
5419 bp->rx_buf_use_size,
5420 PCI_DMA_FROMDEVICE);
b6016b76 5421
bb4f98ab
MC
5422 rx_buf->skb = NULL;
5423
5424 dev_kfree_skb(skb);
5425 }
5426 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5427 bnx2_free_rx_page(bp, rxr, j);
b6016b76
MC
5428 }
5429}
5430
5431static void
5432bnx2_free_skbs(struct bnx2 *bp)
5433{
5434 bnx2_free_tx_skbs(bp);
5435 bnx2_free_rx_skbs(bp);
5436}
5437
5438static int
5439bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5440{
5441 int rc;
5442
5443 rc = bnx2_reset_chip(bp, reset_code);
5444 bnx2_free_skbs(bp);
5445 if (rc)
5446 return rc;
5447
fba9fe91
MC
5448 if ((rc = bnx2_init_chip(bp)) != 0)
5449 return rc;
5450
35e9010b 5451 bnx2_init_all_rings(bp);
b6016b76
MC
5452 return 0;
5453}
5454
5455static int
9a120bc5 5456bnx2_init_nic(struct bnx2 *bp, int reset_phy)
b6016b76
MC
5457{
5458 int rc;
5459
5460 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5461 return rc;
5462
80be4434 5463 spin_lock_bh(&bp->phy_lock);
9a120bc5 5464 bnx2_init_phy(bp, reset_phy);
b6016b76 5465 bnx2_set_link(bp);
543a827d
MC
5466 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5467 bnx2_remote_phy_event(bp);
0d8a6571 5468 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5469 return 0;
5470}
5471
74bf4ba3
MC
5472static int
5473bnx2_shutdown_chip(struct bnx2 *bp)
5474{
5475 u32 reset_code;
5476
5477 if (bp->flags & BNX2_FLAG_NO_WOL)
5478 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5479 else if (bp->wol)
5480 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5481 else
5482 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5483
5484 return bnx2_reset_chip(bp, reset_code);
5485}
5486
b6016b76
MC
5487static int
5488bnx2_test_registers(struct bnx2 *bp)
5489{
5490 int ret;
5bae30c9 5491 int i, is_5709;
f71e1309 5492 static const struct {
b6016b76
MC
5493 u16 offset;
5494 u16 flags;
5bae30c9 5495#define BNX2_FL_NOT_5709 1
b6016b76
MC
5496 u32 rw_mask;
5497 u32 ro_mask;
5498 } reg_tbl[] = {
5499 { 0x006c, 0, 0x00000000, 0x0000003f },
5500 { 0x0090, 0, 0xffffffff, 0x00000000 },
5501 { 0x0094, 0, 0x00000000, 0x00000000 },
5502
5bae30c9
MC
5503 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5504 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5505 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5506 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5507 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5508 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5509 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5510 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5511 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5512
5513 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5514 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5515 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5516 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5517 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5518 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5519
5520 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5521 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5522 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
5523
5524 { 0x1000, 0, 0x00000000, 0x00000001 },
15b169cc 5525 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
b6016b76
MC
5526
5527 { 0x1408, 0, 0x01c00800, 0x00000000 },
5528 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5529 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 5530 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
5531 { 0x14b0, 0, 0x00000002, 0x00000001 },
5532 { 0x14b8, 0, 0x00000000, 0x00000000 },
5533 { 0x14c0, 0, 0x00000000, 0x00000009 },
5534 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5535 { 0x14cc, 0, 0x00000000, 0x00000001 },
5536 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
5537
5538 { 0x1800, 0, 0x00000000, 0x00000001 },
5539 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
5540
5541 { 0x2800, 0, 0x00000000, 0x00000001 },
5542 { 0x2804, 0, 0x00000000, 0x00003f01 },
5543 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5544 { 0x2810, 0, 0xffff0000, 0x00000000 },
5545 { 0x2814, 0, 0xffff0000, 0x00000000 },
5546 { 0x2818, 0, 0xffff0000, 0x00000000 },
5547 { 0x281c, 0, 0xffff0000, 0x00000000 },
5548 { 0x2834, 0, 0xffffffff, 0x00000000 },
5549 { 0x2840, 0, 0x00000000, 0xffffffff },
5550 { 0x2844, 0, 0x00000000, 0xffffffff },
5551 { 0x2848, 0, 0xffffffff, 0x00000000 },
5552 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5553
5554 { 0x2c00, 0, 0x00000000, 0x00000011 },
5555 { 0x2c04, 0, 0x00000000, 0x00030007 },
5556
b6016b76
MC
5557 { 0x3c00, 0, 0x00000000, 0x00000001 },
5558 { 0x3c04, 0, 0x00000000, 0x00070000 },
5559 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5560 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5561 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5562 { 0x3c14, 0, 0x00000000, 0xffffffff },
5563 { 0x3c18, 0, 0x00000000, 0xffffffff },
5564 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5565 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
5566
5567 { 0x5004, 0, 0x00000000, 0x0000007f },
5568 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 5569
b6016b76
MC
5570 { 0x5c00, 0, 0x00000000, 0x00000001 },
5571 { 0x5c04, 0, 0x00000000, 0x0003000f },
5572 { 0x5c08, 0, 0x00000003, 0x00000000 },
5573 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5574 { 0x5c10, 0, 0x00000000, 0xffffffff },
5575 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5576 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5577 { 0x5c88, 0, 0x00000000, 0x00077373 },
5578 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5579
5580 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5581 { 0x680c, 0, 0xffffffff, 0x00000000 },
5582 { 0x6810, 0, 0xffffffff, 0x00000000 },
5583 { 0x6814, 0, 0xffffffff, 0x00000000 },
5584 { 0x6818, 0, 0xffffffff, 0x00000000 },
5585 { 0x681c, 0, 0xffffffff, 0x00000000 },
5586 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5587 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5588 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5589 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5590 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5591 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5592 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5593 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5594 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5595 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5596 { 0x684c, 0, 0xffffffff, 0x00000000 },
5597 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5598 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5599 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5600 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5601 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5602 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5603
5604 { 0xffff, 0, 0x00000000, 0x00000000 },
5605 };
5606
5607 ret = 0;
5bae30c9
MC
5608 is_5709 = 0;
5609 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5610 is_5709 = 1;
5611
b6016b76
MC
5612 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5613 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
5614 u16 flags = reg_tbl[i].flags;
5615
5616 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5617 continue;
b6016b76
MC
5618
5619 offset = (u32) reg_tbl[i].offset;
5620 rw_mask = reg_tbl[i].rw_mask;
5621 ro_mask = reg_tbl[i].ro_mask;
5622
14ab9b86 5623 save_val = readl(bp->regview + offset);
b6016b76 5624
14ab9b86 5625 writel(0, bp->regview + offset);
b6016b76 5626
14ab9b86 5627 val = readl(bp->regview + offset);
b6016b76
MC
5628 if ((val & rw_mask) != 0) {
5629 goto reg_test_err;
5630 }
5631
5632 if ((val & ro_mask) != (save_val & ro_mask)) {
5633 goto reg_test_err;
5634 }
5635
14ab9b86 5636 writel(0xffffffff, bp->regview + offset);
b6016b76 5637
14ab9b86 5638 val = readl(bp->regview + offset);
b6016b76
MC
5639 if ((val & rw_mask) != rw_mask) {
5640 goto reg_test_err;
5641 }
5642
5643 if ((val & ro_mask) != (save_val & ro_mask)) {
5644 goto reg_test_err;
5645 }
5646
14ab9b86 5647 writel(save_val, bp->regview + offset);
b6016b76
MC
5648 continue;
5649
5650reg_test_err:
14ab9b86 5651 writel(save_val, bp->regview + offset);
b6016b76
MC
5652 ret = -ENODEV;
5653 break;
5654 }
5655 return ret;
5656}
5657
5658static int
5659bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5660{
f71e1309 5661 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
5662 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5663 int i;
5664
5665 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5666 u32 offset;
5667
5668 for (offset = 0; offset < size; offset += 4) {
5669
2726d6e1 5670 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
b6016b76 5671
2726d6e1 5672 if (bnx2_reg_rd_ind(bp, start + offset) !=
b6016b76
MC
5673 test_pattern[i]) {
5674 return -ENODEV;
5675 }
5676 }
5677 }
5678 return 0;
5679}
5680
5681static int
5682bnx2_test_memory(struct bnx2 *bp)
5683{
5684 int ret = 0;
5685 int i;
5bae30c9 5686 static struct mem_entry {
b6016b76
MC
5687 u32 offset;
5688 u32 len;
5bae30c9 5689 } mem_tbl_5706[] = {
b6016b76 5690 { 0x60000, 0x4000 },
5b0c76ad 5691 { 0xa0000, 0x3000 },
b6016b76
MC
5692 { 0xe0000, 0x4000 },
5693 { 0x120000, 0x4000 },
5694 { 0x1a0000, 0x4000 },
5695 { 0x160000, 0x4000 },
5696 { 0xffffffff, 0 },
5bae30c9
MC
5697 },
5698 mem_tbl_5709[] = {
5699 { 0x60000, 0x4000 },
5700 { 0xa0000, 0x3000 },
5701 { 0xe0000, 0x4000 },
5702 { 0x120000, 0x4000 },
5703 { 0x1a0000, 0x4000 },
5704 { 0xffffffff, 0 },
b6016b76 5705 };
5bae30c9
MC
5706 struct mem_entry *mem_tbl;
5707
5708 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5709 mem_tbl = mem_tbl_5709;
5710 else
5711 mem_tbl = mem_tbl_5706;
b6016b76
MC
5712
5713 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5714 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5715 mem_tbl[i].len)) != 0) {
5716 return ret;
5717 }
5718 }
6aa20a22 5719
b6016b76
MC
5720 return ret;
5721}
5722
bc5a0690
MC
5723#define BNX2_MAC_LOOPBACK 0
5724#define BNX2_PHY_LOOPBACK 1
5725
b6016b76 5726static int
bc5a0690 5727bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
5728{
5729 unsigned int pkt_size, num_pkts, i;
5730 struct sk_buff *skb, *rx_skb;
5731 unsigned char *packet;
bc5a0690 5732 u16 rx_start_idx, rx_idx;
b6016b76
MC
5733 dma_addr_t map;
5734 struct tx_bd *txbd;
5735 struct sw_bd *rx_buf;
5736 struct l2_fhdr *rx_hdr;
5737 int ret = -ENODEV;
c76c0475 5738 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
35e9010b 5739 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 5740 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
c76c0475
MC
5741
5742 tx_napi = bnapi;
b6016b76 5743
35e9010b 5744 txr = &tx_napi->tx_ring;
bb4f98ab 5745 rxr = &bnapi->rx_ring;
bc5a0690
MC
5746 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5747 bp->loopback = MAC_LOOPBACK;
5748 bnx2_set_mac_loopback(bp);
5749 }
5750 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
583c28e5 5751 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
489310a4
MC
5752 return 0;
5753
80be4434 5754 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
5755 bnx2_set_phy_loopback(bp);
5756 }
5757 else
5758 return -EINVAL;
b6016b76 5759
84eaa187 5760 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 5761 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
5762 if (!skb)
5763 return -ENOMEM;
b6016b76 5764 packet = skb_put(skb, pkt_size);
6634292b 5765 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
5766 memset(packet + 6, 0x0, 8);
5767 for (i = 14; i < pkt_size; i++)
5768 packet[i] = (unsigned char) (i & 0xff);
5769
36227e88
SG
5770 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5771 PCI_DMA_TODEVICE);
5772 if (dma_mapping_error(&bp->pdev->dev, map)) {
3d16af86
BL
5773 dev_kfree_skb(skb);
5774 return -EIO;
5775 }
b6016b76 5776
bf5295bb
MC
5777 REG_WR(bp, BNX2_HC_COMMAND,
5778 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5779
b6016b76
MC
5780 REG_RD(bp, BNX2_HC_COMMAND);
5781
5782 udelay(5);
35efa7c1 5783 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 5784
b6016b76
MC
5785 num_pkts = 0;
5786
35e9010b 5787 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
b6016b76
MC
5788
5789 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5790 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5791 txbd->tx_bd_mss_nbytes = pkt_size;
5792 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5793
5794 num_pkts++;
35e9010b
MC
5795 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5796 txr->tx_prod_bseq += pkt_size;
b6016b76 5797
35e9010b
MC
5798 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5799 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
5800
5801 udelay(100);
5802
bf5295bb
MC
5803 REG_WR(bp, BNX2_HC_COMMAND,
5804 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5805
b6016b76
MC
5806 REG_RD(bp, BNX2_HC_COMMAND);
5807
5808 udelay(5);
5809
36227e88 5810 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 5811 dev_kfree_skb(skb);
b6016b76 5812
35e9010b 5813 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
b6016b76 5814 goto loopback_test_done;
b6016b76 5815
35efa7c1 5816 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
5817 if (rx_idx != rx_start_idx + num_pkts) {
5818 goto loopback_test_done;
5819 }
5820
bb4f98ab 5821 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
b6016b76
MC
5822 rx_skb = rx_buf->skb;
5823
a33fa66b 5824 rx_hdr = rx_buf->desc;
d89cb6af 5825 skb_reserve(rx_skb, BNX2_RX_OFFSET);
b6016b76 5826
36227e88 5827 dma_sync_single_for_cpu(&bp->pdev->dev,
1a4ccc2d 5828 dma_unmap_addr(rx_buf, mapping),
b6016b76
MC
5829 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5830
ade2bfe7 5831 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
5832 (L2_FHDR_ERRORS_BAD_CRC |
5833 L2_FHDR_ERRORS_PHY_DECODE |
5834 L2_FHDR_ERRORS_ALIGNMENT |
5835 L2_FHDR_ERRORS_TOO_SHORT |
5836 L2_FHDR_ERRORS_GIANT_FRAME)) {
5837
5838 goto loopback_test_done;
5839 }
5840
5841 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5842 goto loopback_test_done;
5843 }
5844
5845 for (i = 14; i < pkt_size; i++) {
5846 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5847 goto loopback_test_done;
5848 }
5849 }
5850
5851 ret = 0;
5852
5853loopback_test_done:
5854 bp->loopback = 0;
5855 return ret;
5856}
5857
bc5a0690
MC
5858#define BNX2_MAC_LOOPBACK_FAILED 1
5859#define BNX2_PHY_LOOPBACK_FAILED 2
5860#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5861 BNX2_PHY_LOOPBACK_FAILED)
5862
5863static int
5864bnx2_test_loopback(struct bnx2 *bp)
5865{
5866 int rc = 0;
5867
5868 if (!netif_running(bp->dev))
5869 return BNX2_LOOPBACK_FAILED;
5870
5871 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5872 spin_lock_bh(&bp->phy_lock);
9a120bc5 5873 bnx2_init_phy(bp, 1);
bc5a0690
MC
5874 spin_unlock_bh(&bp->phy_lock);
5875 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5876 rc |= BNX2_MAC_LOOPBACK_FAILED;
5877 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5878 rc |= BNX2_PHY_LOOPBACK_FAILED;
5879 return rc;
5880}
5881
b6016b76
MC
5882#define NVRAM_SIZE 0x200
5883#define CRC32_RESIDUAL 0xdebb20e3
5884
5885static int
5886bnx2_test_nvram(struct bnx2 *bp)
5887{
b491edd5 5888 __be32 buf[NVRAM_SIZE / 4];
b6016b76
MC
5889 u8 *data = (u8 *) buf;
5890 int rc = 0;
5891 u32 magic, csum;
5892
5893 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5894 goto test_nvram_done;
5895
5896 magic = be32_to_cpu(buf[0]);
5897 if (magic != 0x669955aa) {
5898 rc = -ENODEV;
5899 goto test_nvram_done;
5900 }
5901
5902 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5903 goto test_nvram_done;
5904
5905 csum = ether_crc_le(0x100, data);
5906 if (csum != CRC32_RESIDUAL) {
5907 rc = -ENODEV;
5908 goto test_nvram_done;
5909 }
5910
5911 csum = ether_crc_le(0x100, data + 0x100);
5912 if (csum != CRC32_RESIDUAL) {
5913 rc = -ENODEV;
5914 }
5915
5916test_nvram_done:
5917 return rc;
5918}
5919
5920static int
5921bnx2_test_link(struct bnx2 *bp)
5922{
5923 u32 bmsr;
5924
9f52b564
MC
5925 if (!netif_running(bp->dev))
5926 return -ENODEV;
5927
583c28e5 5928 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
489310a4
MC
5929 if (bp->link_up)
5930 return 0;
5931 return -ENODEV;
5932 }
c770a65c 5933 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5934 bnx2_enable_bmsr1(bp);
5935 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5936 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5937 bnx2_disable_bmsr1(bp);
c770a65c 5938 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5939
b6016b76
MC
5940 if (bmsr & BMSR_LSTATUS) {
5941 return 0;
5942 }
5943 return -ENODEV;
5944}
5945
5946static int
5947bnx2_test_intr(struct bnx2 *bp)
5948{
5949 int i;
b6016b76
MC
5950 u16 status_idx;
5951
5952 if (!netif_running(bp->dev))
5953 return -ENODEV;
5954
5955 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5956
5957 /* This register is not touched during run-time. */
bf5295bb 5958 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5959 REG_RD(bp, BNX2_HC_COMMAND);
5960
5961 for (i = 0; i < 10; i++) {
5962 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5963 status_idx) {
5964
5965 break;
5966 }
5967
5968 msleep_interruptible(10);
5969 }
5970 if (i < 10)
5971 return 0;
5972
5973 return -ENODEV;
5974}
5975
38ea3686 5976/* Determining link for parallel detection. */
b2fadeae
MC
5977static int
5978bnx2_5706_serdes_has_link(struct bnx2 *bp)
5979{
5980 u32 mode_ctl, an_dbg, exp;
5981
38ea3686
MC
5982 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5983 return 0;
5984
b2fadeae
MC
5985 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5986 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5987
5988 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5989 return 0;
5990
5991 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5992 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5993 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5994
f3014c0c 5995 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
b2fadeae
MC
5996 return 0;
5997
5998 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5999 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6000 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6001
6002 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
6003 return 0;
6004
6005 return 1;
6006}
6007
b6016b76 6008static void
48b01e2d 6009bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 6010{
b2fadeae
MC
6011 int check_link = 1;
6012
48b01e2d 6013 spin_lock(&bp->phy_lock);
b2fadeae 6014 if (bp->serdes_an_pending) {
48b01e2d 6015 bp->serdes_an_pending--;
b2fadeae
MC
6016 check_link = 0;
6017 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
48b01e2d 6018 u32 bmcr;
b6016b76 6019
ac392abc 6020 bp->current_interval = BNX2_TIMER_INTERVAL;
cd339a0e 6021
ca58c3af 6022 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 6023
48b01e2d 6024 if (bmcr & BMCR_ANENABLE) {
b2fadeae 6025 if (bnx2_5706_serdes_has_link(bp)) {
48b01e2d
MC
6026 bmcr &= ~BMCR_ANENABLE;
6027 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 6028 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
583c28e5 6029 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d 6030 }
b6016b76 6031 }
48b01e2d
MC
6032 }
6033 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
583c28e5 6034 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
48b01e2d 6035 u32 phy2;
b6016b76 6036
48b01e2d
MC
6037 bnx2_write_phy(bp, 0x17, 0x0f01);
6038 bnx2_read_phy(bp, 0x15, &phy2);
6039 if (phy2 & 0x20) {
6040 u32 bmcr;
cd339a0e 6041
ca58c3af 6042 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 6043 bmcr |= BMCR_ANENABLE;
ca58c3af 6044 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 6045
583c28e5 6046 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d
MC
6047 }
6048 } else
ac392abc 6049 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6050
a2724e25 6051 if (check_link) {
b2fadeae
MC
6052 u32 val;
6053
6054 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6055 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6056 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6057
a2724e25
MC
6058 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6059 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6060 bnx2_5706s_force_link_dn(bp, 1);
6061 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6062 } else
6063 bnx2_set_link(bp);
6064 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6065 bnx2_set_link(bp);
b2fadeae 6066 }
48b01e2d
MC
6067 spin_unlock(&bp->phy_lock);
6068}
b6016b76 6069
f8dd064e
MC
6070static void
6071bnx2_5708_serdes_timer(struct bnx2 *bp)
6072{
583c28e5 6073 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
6074 return;
6075
583c28e5 6076 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
f8dd064e
MC
6077 bp->serdes_an_pending = 0;
6078 return;
6079 }
b6016b76 6080
f8dd064e
MC
6081 spin_lock(&bp->phy_lock);
6082 if (bp->serdes_an_pending)
6083 bp->serdes_an_pending--;
6084 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6085 u32 bmcr;
b6016b76 6086
ca58c3af 6087 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 6088 if (bmcr & BMCR_ANENABLE) {
605a9e20 6089 bnx2_enable_forced_2g5(bp);
40105c0b 6090 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
f8dd064e 6091 } else {
605a9e20 6092 bnx2_disable_forced_2g5(bp);
f8dd064e 6093 bp->serdes_an_pending = 2;
ac392abc 6094 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6095 }
b6016b76 6096
f8dd064e 6097 } else
ac392abc 6098 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6099
f8dd064e
MC
6100 spin_unlock(&bp->phy_lock);
6101}
6102
48b01e2d
MC
6103static void
6104bnx2_timer(unsigned long data)
6105{
6106 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 6107
48b01e2d
MC
6108 if (!netif_running(bp->dev))
6109 return;
b6016b76 6110
48b01e2d
MC
6111 if (atomic_read(&bp->intr_sem) != 0)
6112 goto bnx2_restart_timer;
b6016b76 6113
efba0180
MC
6114 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6115 BNX2_FLAG_USING_MSI)
6116 bnx2_chk_missed_msi(bp);
6117
df149d70 6118 bnx2_send_heart_beat(bp);
b6016b76 6119
2726d6e1
MC
6120 bp->stats_blk->stat_FwRxDrop =
6121 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 6122
02537b06 6123 /* workaround occasional corrupted counters */
61d9e3fa 6124 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
02537b06
MC
6125 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6126 BNX2_HC_COMMAND_STATS_NOW);
6127
583c28e5 6128 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
f8dd064e
MC
6129 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6130 bnx2_5706_serdes_timer(bp);
27a005b8 6131 else
f8dd064e 6132 bnx2_5708_serdes_timer(bp);
b6016b76
MC
6133 }
6134
6135bnx2_restart_timer:
cd339a0e 6136 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6137}
6138
8e6a72c4
MC
6139static int
6140bnx2_request_irq(struct bnx2 *bp)
6141{
6d866ffc 6142 unsigned long flags;
b4b36042
MC
6143 struct bnx2_irq *irq;
6144 int rc = 0, i;
8e6a72c4 6145
f86e82fb 6146 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6d866ffc
MC
6147 flags = 0;
6148 else
6149 flags = IRQF_SHARED;
b4b36042
MC
6150
6151 for (i = 0; i < bp->irq_nvecs; i++) {
6152 irq = &bp->irq_tbl[i];
c76c0475 6153 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
f0ea2e63 6154 &bp->bnx2_napi[i]);
b4b36042
MC
6155 if (rc)
6156 break;
6157 irq->requested = 1;
6158 }
8e6a72c4
MC
6159 return rc;
6160}
6161
6162static void
a29ba9d2 6163__bnx2_free_irq(struct bnx2 *bp)
8e6a72c4 6164{
b4b36042
MC
6165 struct bnx2_irq *irq;
6166 int i;
8e6a72c4 6167
b4b36042
MC
6168 for (i = 0; i < bp->irq_nvecs; i++) {
6169 irq = &bp->irq_tbl[i];
6170 if (irq->requested)
f0ea2e63 6171 free_irq(irq->vector, &bp->bnx2_napi[i]);
b4b36042 6172 irq->requested = 0;
6d866ffc 6173 }
a29ba9d2
MC
6174}
6175
6176static void
6177bnx2_free_irq(struct bnx2 *bp)
6178{
6179
6180 __bnx2_free_irq(bp);
f86e82fb 6181 if (bp->flags & BNX2_FLAG_USING_MSI)
b4b36042 6182 pci_disable_msi(bp->pdev);
f86e82fb 6183 else if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
6184 pci_disable_msix(bp->pdev);
6185
f86e82fb 6186 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
b4b36042
MC
6187}
6188
6189static void
5e9ad9e1 6190bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
b4b36042 6191{
379b39a2 6192 int i, total_vecs, rc;
57851d84 6193 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
4e1d0de9
MC
6194 struct net_device *dev = bp->dev;
6195 const int len = sizeof(bp->irq_tbl[0].name);
57851d84 6196
b4b36042
MC
6197 bnx2_setup_msix_tbl(bp);
6198 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6199 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6200 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
57851d84 6201
e2eb8e38
BL
6202 /* Need to flush the previous three writes to ensure MSI-X
6203 * is setup properly */
6204 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6205
57851d84
MC
6206 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6207 msix_ent[i].entry = i;
6208 msix_ent[i].vector = 0;
6209 }
6210
379b39a2
MC
6211 total_vecs = msix_vecs;
6212#ifdef BCM_CNIC
6213 total_vecs++;
6214#endif
6215 rc = -ENOSPC;
6216 while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6217 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6218 if (rc <= 0)
6219 break;
6220 if (rc > 0)
6221 total_vecs = rc;
6222 }
6223
57851d84
MC
6224 if (rc != 0)
6225 return;
6226
379b39a2
MC
6227 msix_vecs = total_vecs;
6228#ifdef BCM_CNIC
6229 msix_vecs--;
6230#endif
5e9ad9e1 6231 bp->irq_nvecs = msix_vecs;
f86e82fb 6232 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
379b39a2 6233 for (i = 0; i < total_vecs; i++) {
57851d84 6234 bp->irq_tbl[i].vector = msix_ent[i].vector;
69010313
MC
6235 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6236 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6237 }
6d866ffc
MC
6238}
6239
657d92fe 6240static int
6d866ffc
MC
6241bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6242{
5e9ad9e1 6243 int cpus = num_online_cpus();
706bf240 6244 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5e9ad9e1 6245
6d866ffc
MC
6246 bp->irq_tbl[0].handler = bnx2_interrupt;
6247 strcpy(bp->irq_tbl[0].name, bp->dev->name);
b4b36042
MC
6248 bp->irq_nvecs = 1;
6249 bp->irq_tbl[0].vector = bp->pdev->irq;
6250
3d5f3a7b 6251 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5e9ad9e1 6252 bnx2_enable_msix(bp, msix_vecs);
6d866ffc 6253
f86e82fb
DM
6254 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6255 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6d866ffc 6256 if (pci_enable_msi(bp->pdev) == 0) {
f86e82fb 6257 bp->flags |= BNX2_FLAG_USING_MSI;
6d866ffc 6258 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
f86e82fb 6259 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6d866ffc
MC
6260 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6261 } else
6262 bp->irq_tbl[0].handler = bnx2_msi;
b4b36042
MC
6263
6264 bp->irq_tbl[0].vector = bp->pdev->irq;
6d866ffc
MC
6265 }
6266 }
706bf240
BL
6267
6268 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
657d92fe 6269 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
706bf240 6270
5e9ad9e1 6271 bp->num_rx_rings = bp->irq_nvecs;
657d92fe 6272 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
8e6a72c4
MC
6273}
6274
b6016b76
MC
6275/* Called with rtnl_lock */
6276static int
6277bnx2_open(struct net_device *dev)
6278{
972ec0d4 6279 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6280 int rc;
6281
7880b72e 6282 rc = bnx2_request_firmware(bp);
6283 if (rc < 0)
6284 goto out;
6285
1b2f922f
MC
6286 netif_carrier_off(dev);
6287
829ca9a3 6288 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6289 bnx2_disable_int(bp);
6290
657d92fe
BH
6291 rc = bnx2_setup_int_mode(bp, disable_msi);
6292 if (rc)
6293 goto open_err;
4327ba43 6294 bnx2_init_napi(bp);
35e9010b 6295 bnx2_napi_enable(bp);
b6016b76 6296 rc = bnx2_alloc_mem(bp);
2739a8bb
MC
6297 if (rc)
6298 goto open_err;
b6016b76 6299
8e6a72c4 6300 rc = bnx2_request_irq(bp);
2739a8bb
MC
6301 if (rc)
6302 goto open_err;
b6016b76 6303
9a120bc5 6304 rc = bnx2_init_nic(bp, 1);
2739a8bb
MC
6305 if (rc)
6306 goto open_err;
6aa20a22 6307
cd339a0e 6308 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6309
6310 atomic_set(&bp->intr_sem, 0);
6311
354fcd77
MC
6312 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6313
b6016b76
MC
6314 bnx2_enable_int(bp);
6315
f86e82fb 6316 if (bp->flags & BNX2_FLAG_USING_MSI) {
b6016b76
MC
6317 /* Test MSI to make sure it is working
6318 * If MSI test fails, go back to INTx mode
6319 */
6320 if (bnx2_test_intr(bp) != 0) {
3a9c6a49 6321 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
b6016b76
MC
6322
6323 bnx2_disable_int(bp);
8e6a72c4 6324 bnx2_free_irq(bp);
b6016b76 6325
6d866ffc
MC
6326 bnx2_setup_int_mode(bp, 1);
6327
9a120bc5 6328 rc = bnx2_init_nic(bp, 0);
b6016b76 6329
8e6a72c4
MC
6330 if (!rc)
6331 rc = bnx2_request_irq(bp);
6332
b6016b76 6333 if (rc) {
b6016b76 6334 del_timer_sync(&bp->timer);
2739a8bb 6335 goto open_err;
b6016b76
MC
6336 }
6337 bnx2_enable_int(bp);
6338 }
6339 }
f86e82fb 6340 if (bp->flags & BNX2_FLAG_USING_MSI)
3a9c6a49 6341 netdev_info(dev, "using MSI\n");
f86e82fb 6342 else if (bp->flags & BNX2_FLAG_USING_MSIX)
3a9c6a49 6343 netdev_info(dev, "using MSIX\n");
b6016b76 6344
706bf240 6345 netif_tx_start_all_queues(dev);
7880b72e 6346out:
6347 return rc;
2739a8bb
MC
6348
6349open_err:
6350 bnx2_napi_disable(bp);
6351 bnx2_free_skbs(bp);
6352 bnx2_free_irq(bp);
6353 bnx2_free_mem(bp);
f048fa9c 6354 bnx2_del_napi(bp);
7880b72e 6355 bnx2_release_firmware(bp);
6356 goto out;
b6016b76
MC
6357}
6358
6359static void
c4028958 6360bnx2_reset_task(struct work_struct *work)
b6016b76 6361{
c4028958 6362 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
cd634019 6363 int rc;
b6016b76 6364
51bf6bb4
MC
6365 rtnl_lock();
6366 if (!netif_running(bp->dev)) {
6367 rtnl_unlock();
afdc08b9 6368 return;
51bf6bb4 6369 }
afdc08b9 6370
212f9934 6371 bnx2_netif_stop(bp, true);
b6016b76 6372
cd634019
MC
6373 rc = bnx2_init_nic(bp, 1);
6374 if (rc) {
6375 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6376 bnx2_napi_enable(bp);
6377 dev_close(bp->dev);
6378 rtnl_unlock();
6379 return;
6380 }
b6016b76
MC
6381
6382 atomic_set(&bp->intr_sem, 1);
212f9934 6383 bnx2_netif_start(bp, true);
51bf6bb4 6384 rtnl_unlock();
b6016b76
MC
6385}
6386
20175c57
MC
6387static void
6388bnx2_dump_state(struct bnx2 *bp)
6389{
6390 struct net_device *dev = bp->dev;
ecdbf6e0 6391 u32 val1, val2;
5804a8fb
MC
6392
6393 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6394 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6395 atomic_read(&bp->intr_sem), val1);
6396 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6397 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6398 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
b98eba52 6399 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
3a9c6a49 6400 REG_RD(bp, BNX2_EMAC_TX_STATUS),
b98eba52
EW
6401 REG_RD(bp, BNX2_EMAC_RX_STATUS));
6402 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
3a9c6a49 6403 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
3a9c6a49
JP
6404 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6405 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
20175c57 6406 if (bp->flags & BNX2_FLAG_USING_MSIX)
3a9c6a49
JP
6407 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6408 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
20175c57
MC
6409}
6410
b6016b76
MC
6411static void
6412bnx2_tx_timeout(struct net_device *dev)
6413{
972ec0d4 6414 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6415
20175c57 6416 bnx2_dump_state(bp);
ecdbf6e0 6417 bnx2_dump_mcp_state(bp);
20175c57 6418
b6016b76
MC
6419 /* This allows the netif to be shutdown gracefully before resetting */
6420 schedule_work(&bp->reset_task);
6421}
6422
932ff279 6423/* Called with netif_tx_lock.
2f8af120
MC
6424 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6425 * netif_wake_queue().
b6016b76 6426 */
61357325 6427static netdev_tx_t
b6016b76
MC
6428bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6429{
972ec0d4 6430 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6431 dma_addr_t mapping;
6432 struct tx_bd *txbd;
3d16af86 6433 struct sw_tx_bd *tx_buf;
b6016b76
MC
6434 u32 len, vlan_tag_flags, last_frag, mss;
6435 u16 prod, ring_prod;
6436 int i;
706bf240
BL
6437 struct bnx2_napi *bnapi;
6438 struct bnx2_tx_ring_info *txr;
6439 struct netdev_queue *txq;
6440
6441 /* Determine which tx ring we will be placed on */
6442 i = skb_get_queue_mapping(skb);
6443 bnapi = &bp->bnx2_napi[i];
6444 txr = &bnapi->tx_ring;
6445 txq = netdev_get_tx_queue(dev, i);
b6016b76 6446
35e9010b 6447 if (unlikely(bnx2_tx_avail(bp, txr) <
a550c99b 6448 (skb_shinfo(skb)->nr_frags + 1))) {
706bf240 6449 netif_tx_stop_queue(txq);
3a9c6a49 6450 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
b6016b76
MC
6451
6452 return NETDEV_TX_BUSY;
6453 }
6454 len = skb_headlen(skb);
35e9010b 6455 prod = txr->tx_prod;
b6016b76
MC
6456 ring_prod = TX_RING_IDX(prod);
6457
6458 vlan_tag_flags = 0;
84fa7933 6459 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
6460 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6461 }
6462
eab6d18d 6463 if (vlan_tx_tag_present(skb)) {
b6016b76
MC
6464 vlan_tag_flags |=
6465 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6466 }
7d0fd211 6467
fde82055 6468 if ((mss = skb_shinfo(skb)->gso_size)) {
a1efb4b6 6469 u32 tcp_opt_len;
eddc9ec5 6470 struct iphdr *iph;
b6016b76 6471
b6016b76
MC
6472 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6473
4666f87a
MC
6474 tcp_opt_len = tcp_optlen(skb);
6475
6476 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6477 u32 tcp_off = skb_transport_offset(skb) -
6478 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 6479
4666f87a
MC
6480 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6481 TX_BD_FLAGS_SW_FLAGS;
6482 if (likely(tcp_off == 0))
6483 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6484 else {
6485 tcp_off >>= 3;
6486 vlan_tag_flags |= ((tcp_off & 0x3) <<
6487 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6488 ((tcp_off & 0x10) <<
6489 TX_BD_FLAGS_TCP6_OFF4_SHL);
6490 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6491 }
6492 } else {
4666f87a 6493 iph = ip_hdr(skb);
4666f87a
MC
6494 if (tcp_opt_len || (iph->ihl > 5)) {
6495 vlan_tag_flags |= ((iph->ihl - 5) +
6496 (tcp_opt_len >> 2)) << 8;
6497 }
b6016b76 6498 }
4666f87a 6499 } else
b6016b76 6500 mss = 0;
b6016b76 6501
36227e88
SG
6502 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6503 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
3d16af86
BL
6504 dev_kfree_skb(skb);
6505 return NETDEV_TX_OK;
6506 }
6507
35e9010b 6508 tx_buf = &txr->tx_buf_ring[ring_prod];
b6016b76 6509 tx_buf->skb = skb;
1a4ccc2d 6510 dma_unmap_addr_set(tx_buf, mapping, mapping);
b6016b76 6511
35e9010b 6512 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6513
6514 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6515 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6516 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6517 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6518
6519 last_frag = skb_shinfo(skb)->nr_frags;
d62fda08
ED
6520 tx_buf->nr_frags = last_frag;
6521 tx_buf->is_gso = skb_is_gso(skb);
b6016b76
MC
6522
6523 for (i = 0; i < last_frag; i++) {
9e903e08 6524 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
b6016b76
MC
6525
6526 prod = NEXT_TX_BD(prod);
6527 ring_prod = TX_RING_IDX(prod);
35e9010b 6528 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76 6529
9e903e08 6530 len = skb_frag_size(frag);
b7b6a688 6531 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
5d6bcdfe 6532 DMA_TO_DEVICE);
36227e88 6533 if (dma_mapping_error(&bp->pdev->dev, mapping))
e95524a7 6534 goto dma_error;
1a4ccc2d 6535 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
e95524a7 6536 mapping);
b6016b76
MC
6537
6538 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6539 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6540 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6541 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6542
6543 }
6544 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6545
6546 prod = NEXT_TX_BD(prod);
35e9010b 6547 txr->tx_prod_bseq += skb->len;
b6016b76 6548
35e9010b
MC
6549 REG_WR16(bp, txr->tx_bidx_addr, prod);
6550 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
6551
6552 mmiowb();
6553
35e9010b 6554 txr->tx_prod = prod;
b6016b76 6555
35e9010b 6556 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
706bf240 6557 netif_tx_stop_queue(txq);
11848b96
MC
6558
6559 /* netif_tx_stop_queue() must be done before checking
6560 * tx index in bnx2_tx_avail() below, because in
6561 * bnx2_tx_int(), we update tx index before checking for
6562 * netif_tx_queue_stopped().
6563 */
6564 smp_mb();
35e9010b 6565 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
706bf240 6566 netif_tx_wake_queue(txq);
b6016b76
MC
6567 }
6568
e95524a7
AD
6569 return NETDEV_TX_OK;
6570dma_error:
6571 /* save value of frag that failed */
6572 last_frag = i;
6573
6574 /* start back at beginning and unmap skb */
6575 prod = txr->tx_prod;
6576 ring_prod = TX_RING_IDX(prod);
6577 tx_buf = &txr->tx_buf_ring[ring_prod];
6578 tx_buf->skb = NULL;
36227e88 6579 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
6580 skb_headlen(skb), PCI_DMA_TODEVICE);
6581
6582 /* unmap remaining mapped pages */
6583 for (i = 0; i < last_frag; i++) {
6584 prod = NEXT_TX_BD(prod);
6585 ring_prod = TX_RING_IDX(prod);
6586 tx_buf = &txr->tx_buf_ring[ring_prod];
36227e88 6587 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
9e903e08 6588 skb_frag_size(&skb_shinfo(skb)->frags[i]),
e95524a7
AD
6589 PCI_DMA_TODEVICE);
6590 }
6591
6592 dev_kfree_skb(skb);
b6016b76
MC
6593 return NETDEV_TX_OK;
6594}
6595
6596/* Called with rtnl_lock */
6597static int
6598bnx2_close(struct net_device *dev)
6599{
972ec0d4 6600 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6601
bea3348e 6602 bnx2_disable_int_sync(bp);
35efa7c1 6603 bnx2_napi_disable(bp);
b6016b76 6604 del_timer_sync(&bp->timer);
74bf4ba3 6605 bnx2_shutdown_chip(bp);
8e6a72c4 6606 bnx2_free_irq(bp);
b6016b76
MC
6607 bnx2_free_skbs(bp);
6608 bnx2_free_mem(bp);
f048fa9c 6609 bnx2_del_napi(bp);
b6016b76
MC
6610 bp->link_up = 0;
6611 netif_carrier_off(bp->dev);
829ca9a3 6612 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
6613 return 0;
6614}
6615
354fcd77
MC
6616static void
6617bnx2_save_stats(struct bnx2 *bp)
6618{
6619 u32 *hw_stats = (u32 *) bp->stats_blk;
6620 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6621 int i;
6622
6623 /* The 1st 10 counters are 64-bit counters */
6624 for (i = 0; i < 20; i += 2) {
6625 u32 hi;
6626 u64 lo;
6627
c9885fe5
PR
6628 hi = temp_stats[i] + hw_stats[i];
6629 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
354fcd77
MC
6630 if (lo > 0xffffffff)
6631 hi++;
c9885fe5
PR
6632 temp_stats[i] = hi;
6633 temp_stats[i + 1] = lo & 0xffffffff;
354fcd77
MC
6634 }
6635
6636 for ( ; i < sizeof(struct statistics_block) / 4; i++)
c9885fe5 6637 temp_stats[i] += hw_stats[i];
354fcd77
MC
6638}
6639
5d07bf26
ED
6640#define GET_64BIT_NET_STATS64(ctr) \
6641 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
b6016b76 6642
a4743058 6643#define GET_64BIT_NET_STATS(ctr) \
354fcd77
MC
6644 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6645 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
b6016b76 6646
a4743058 6647#define GET_32BIT_NET_STATS(ctr) \
354fcd77
MC
6648 (unsigned long) (bp->stats_blk->ctr + \
6649 bp->temp_stats_blk->ctr)
a4743058 6650
5d07bf26
ED
6651static struct rtnl_link_stats64 *
6652bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
b6016b76 6653{
972ec0d4 6654 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6655
5d07bf26 6656 if (bp->stats_blk == NULL)
b6016b76 6657 return net_stats;
5d07bf26 6658
b6016b76 6659 net_stats->rx_packets =
a4743058
MC
6660 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6661 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6662 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
b6016b76
MC
6663
6664 net_stats->tx_packets =
a4743058
MC
6665 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6666 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6667 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
b6016b76
MC
6668
6669 net_stats->rx_bytes =
a4743058 6670 GET_64BIT_NET_STATS(stat_IfHCInOctets);
b6016b76
MC
6671
6672 net_stats->tx_bytes =
a4743058 6673 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
b6016b76 6674
6aa20a22 6675 net_stats->multicast =
6fdae995 6676 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
b6016b76 6677
6aa20a22 6678 net_stats->collisions =
a4743058 6679 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
b6016b76 6680
6aa20a22 6681 net_stats->rx_length_errors =
a4743058
MC
6682 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6683 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
b6016b76 6684
6aa20a22 6685 net_stats->rx_over_errors =
a4743058
MC
6686 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6687 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
b6016b76 6688
6aa20a22 6689 net_stats->rx_frame_errors =
a4743058 6690 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
b6016b76 6691
6aa20a22 6692 net_stats->rx_crc_errors =
a4743058 6693 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
b6016b76
MC
6694
6695 net_stats->rx_errors = net_stats->rx_length_errors +
6696 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6697 net_stats->rx_crc_errors;
6698
6699 net_stats->tx_aborted_errors =
a4743058
MC
6700 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6701 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
b6016b76 6702
5b0c76ad
MC
6703 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6704 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
6705 net_stats->tx_carrier_errors = 0;
6706 else {
6707 net_stats->tx_carrier_errors =
a4743058 6708 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
b6016b76
MC
6709 }
6710
6711 net_stats->tx_errors =
a4743058 6712 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
b6016b76
MC
6713 net_stats->tx_aborted_errors +
6714 net_stats->tx_carrier_errors;
6715
cea94db9 6716 net_stats->rx_missed_errors =
a4743058
MC
6717 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6718 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6719 GET_32BIT_NET_STATS(stat_FwRxDrop);
cea94db9 6720
b6016b76
MC
6721 return net_stats;
6722}
6723
6724/* All ethtool functions called with rtnl_lock */
6725
6726static int
6727bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6728{
972ec0d4 6729 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 6730 int support_serdes = 0, support_copper = 0;
b6016b76
MC
6731
6732 cmd->supported = SUPPORTED_Autoneg;
583c28e5 6733 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6734 support_serdes = 1;
6735 support_copper = 1;
6736 } else if (bp->phy_port == PORT_FIBRE)
6737 support_serdes = 1;
6738 else
6739 support_copper = 1;
6740
6741 if (support_serdes) {
b6016b76
MC
6742 cmd->supported |= SUPPORTED_1000baseT_Full |
6743 SUPPORTED_FIBRE;
583c28e5 6744 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
605a9e20 6745 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 6746
b6016b76 6747 }
7b6b8347 6748 if (support_copper) {
b6016b76
MC
6749 cmd->supported |= SUPPORTED_10baseT_Half |
6750 SUPPORTED_10baseT_Full |
6751 SUPPORTED_100baseT_Half |
6752 SUPPORTED_100baseT_Full |
6753 SUPPORTED_1000baseT_Full |
6754 SUPPORTED_TP;
6755
b6016b76
MC
6756 }
6757
7b6b8347
MC
6758 spin_lock_bh(&bp->phy_lock);
6759 cmd->port = bp->phy_port;
b6016b76
MC
6760 cmd->advertising = bp->advertising;
6761
6762 if (bp->autoneg & AUTONEG_SPEED) {
6763 cmd->autoneg = AUTONEG_ENABLE;
70739497 6764 } else {
b6016b76
MC
6765 cmd->autoneg = AUTONEG_DISABLE;
6766 }
6767
6768 if (netif_carrier_ok(dev)) {
70739497 6769 ethtool_cmd_speed_set(cmd, bp->line_speed);
b6016b76
MC
6770 cmd->duplex = bp->duplex;
6771 }
6772 else {
70739497 6773 ethtool_cmd_speed_set(cmd, -1);
b6016b76
MC
6774 cmd->duplex = -1;
6775 }
7b6b8347 6776 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6777
6778 cmd->transceiver = XCVR_INTERNAL;
6779 cmd->phy_address = bp->phy_addr;
6780
6781 return 0;
6782}
6aa20a22 6783
b6016b76
MC
6784static int
6785bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6786{
972ec0d4 6787 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6788 u8 autoneg = bp->autoneg;
6789 u8 req_duplex = bp->req_duplex;
6790 u16 req_line_speed = bp->req_line_speed;
6791 u32 advertising = bp->advertising;
7b6b8347
MC
6792 int err = -EINVAL;
6793
6794 spin_lock_bh(&bp->phy_lock);
6795
6796 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6797 goto err_out_unlock;
6798
583c28e5
MC
6799 if (cmd->port != bp->phy_port &&
6800 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
7b6b8347 6801 goto err_out_unlock;
b6016b76 6802
d6b14486
MC
6803 /* If device is down, we can store the settings only if the user
6804 * is setting the currently active port.
6805 */
6806 if (!netif_running(dev) && cmd->port != bp->phy_port)
6807 goto err_out_unlock;
6808
b6016b76
MC
6809 if (cmd->autoneg == AUTONEG_ENABLE) {
6810 autoneg |= AUTONEG_SPEED;
6811
beb499af
MC
6812 advertising = cmd->advertising;
6813 if (cmd->port == PORT_TP) {
6814 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6815 if (!advertising)
b6016b76 6816 advertising = ETHTOOL_ALL_COPPER_SPEED;
beb499af
MC
6817 } else {
6818 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6819 if (!advertising)
6820 advertising = ETHTOOL_ALL_FIBRE_SPEED;
b6016b76
MC
6821 }
6822 advertising |= ADVERTISED_Autoneg;
6823 }
6824 else {
25db0338 6825 u32 speed = ethtool_cmd_speed(cmd);
7b6b8347 6826 if (cmd->port == PORT_FIBRE) {
25db0338
DD
6827 if ((speed != SPEED_1000 &&
6828 speed != SPEED_2500) ||
80be4434 6829 (cmd->duplex != DUPLEX_FULL))
7b6b8347 6830 goto err_out_unlock;
80be4434 6831
25db0338 6832 if (speed == SPEED_2500 &&
583c28e5 6833 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7b6b8347 6834 goto err_out_unlock;
25db0338 6835 } else if (speed == SPEED_1000 || speed == SPEED_2500)
7b6b8347
MC
6836 goto err_out_unlock;
6837
b6016b76 6838 autoneg &= ~AUTONEG_SPEED;
25db0338 6839 req_line_speed = speed;
b6016b76
MC
6840 req_duplex = cmd->duplex;
6841 advertising = 0;
6842 }
6843
6844 bp->autoneg = autoneg;
6845 bp->advertising = advertising;
6846 bp->req_line_speed = req_line_speed;
6847 bp->req_duplex = req_duplex;
6848
d6b14486
MC
6849 err = 0;
6850 /* If device is down, the new settings will be picked up when it is
6851 * brought up.
6852 */
6853 if (netif_running(dev))
6854 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 6855
7b6b8347 6856err_out_unlock:
c770a65c 6857 spin_unlock_bh(&bp->phy_lock);
b6016b76 6858
7b6b8347 6859 return err;
b6016b76
MC
6860}
6861
6862static void
6863bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6864{
972ec0d4 6865 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6866
68aad78c
RJ
6867 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6868 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6869 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6870 strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
b6016b76
MC
6871}
6872
244ac4f4
MC
6873#define BNX2_REGDUMP_LEN (32 * 1024)
6874
6875static int
6876bnx2_get_regs_len(struct net_device *dev)
6877{
6878 return BNX2_REGDUMP_LEN;
6879}
6880
6881static void
6882bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6883{
6884 u32 *p = _p, i, offset;
6885 u8 *orig_p = _p;
6886 struct bnx2 *bp = netdev_priv(dev);
b6bc7650
JP
6887 static const u32 reg_boundaries[] = {
6888 0x0000, 0x0098, 0x0400, 0x045c,
6889 0x0800, 0x0880, 0x0c00, 0x0c10,
6890 0x0c30, 0x0d08, 0x1000, 0x101c,
6891 0x1040, 0x1048, 0x1080, 0x10a4,
6892 0x1400, 0x1490, 0x1498, 0x14f0,
6893 0x1500, 0x155c, 0x1580, 0x15dc,
6894 0x1600, 0x1658, 0x1680, 0x16d8,
6895 0x1800, 0x1820, 0x1840, 0x1854,
6896 0x1880, 0x1894, 0x1900, 0x1984,
6897 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6898 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6899 0x2000, 0x2030, 0x23c0, 0x2400,
6900 0x2800, 0x2820, 0x2830, 0x2850,
6901 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6902 0x3c00, 0x3c94, 0x4000, 0x4010,
6903 0x4080, 0x4090, 0x43c0, 0x4458,
6904 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6905 0x4fc0, 0x5010, 0x53c0, 0x5444,
6906 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6907 0x5fc0, 0x6000, 0x6400, 0x6428,
6908 0x6800, 0x6848, 0x684c, 0x6860,
6909 0x6888, 0x6910, 0x8000
6910 };
244ac4f4
MC
6911
6912 regs->version = 0;
6913
6914 memset(p, 0, BNX2_REGDUMP_LEN);
6915
6916 if (!netif_running(bp->dev))
6917 return;
6918
6919 i = 0;
6920 offset = reg_boundaries[0];
6921 p += offset;
6922 while (offset < BNX2_REGDUMP_LEN) {
6923 *p++ = REG_RD(bp, offset);
6924 offset += 4;
6925 if (offset == reg_boundaries[i + 1]) {
6926 offset = reg_boundaries[i + 2];
6927 p = (u32 *) (orig_p + offset);
6928 i += 2;
6929 }
6930 }
6931}
6932
b6016b76
MC
6933static void
6934bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6935{
972ec0d4 6936 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6937
f86e82fb 6938 if (bp->flags & BNX2_FLAG_NO_WOL) {
b6016b76
MC
6939 wol->supported = 0;
6940 wol->wolopts = 0;
6941 }
6942 else {
6943 wol->supported = WAKE_MAGIC;
6944 if (bp->wol)
6945 wol->wolopts = WAKE_MAGIC;
6946 else
6947 wol->wolopts = 0;
6948 }
6949 memset(&wol->sopass, 0, sizeof(wol->sopass));
6950}
6951
6952static int
6953bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6954{
972ec0d4 6955 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6956
6957 if (wol->wolopts & ~WAKE_MAGIC)
6958 return -EINVAL;
6959
6960 if (wol->wolopts & WAKE_MAGIC) {
f86e82fb 6961 if (bp->flags & BNX2_FLAG_NO_WOL)
b6016b76
MC
6962 return -EINVAL;
6963
6964 bp->wol = 1;
6965 }
6966 else {
6967 bp->wol = 0;
6968 }
6969 return 0;
6970}
6971
6972static int
6973bnx2_nway_reset(struct net_device *dev)
6974{
972ec0d4 6975 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6976 u32 bmcr;
6977
9f52b564
MC
6978 if (!netif_running(dev))
6979 return -EAGAIN;
6980
b6016b76
MC
6981 if (!(bp->autoneg & AUTONEG_SPEED)) {
6982 return -EINVAL;
6983 }
6984
c770a65c 6985 spin_lock_bh(&bp->phy_lock);
b6016b76 6986
583c28e5 6987 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6988 int rc;
6989
6990 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6991 spin_unlock_bh(&bp->phy_lock);
6992 return rc;
6993 }
6994
b6016b76 6995 /* Force a link down visible on the other side */
583c28e5 6996 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
ca58c3af 6997 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 6998 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6999
7000 msleep(20);
7001
c770a65c 7002 spin_lock_bh(&bp->phy_lock);
f8dd064e 7003
40105c0b 7004 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
7005 bp->serdes_an_pending = 1;
7006 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
7007 }
7008
ca58c3af 7009 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 7010 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 7011 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 7012
c770a65c 7013 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7014
7015 return 0;
7016}
7017
7959ea25
ON
7018static u32
7019bnx2_get_link(struct net_device *dev)
7020{
7021 struct bnx2 *bp = netdev_priv(dev);
7022
7023 return bp->link_up;
7024}
7025
b6016b76
MC
7026static int
7027bnx2_get_eeprom_len(struct net_device *dev)
7028{
972ec0d4 7029 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7030
1122db71 7031 if (bp->flash_info == NULL)
b6016b76
MC
7032 return 0;
7033
1122db71 7034 return (int) bp->flash_size;
b6016b76
MC
7035}
7036
7037static int
7038bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7039 u8 *eebuf)
7040{
972ec0d4 7041 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7042 int rc;
7043
9f52b564
MC
7044 if (!netif_running(dev))
7045 return -EAGAIN;
7046
1064e944 7047 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
7048
7049 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7050
7051 return rc;
7052}
7053
7054static int
7055bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7056 u8 *eebuf)
7057{
972ec0d4 7058 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7059 int rc;
7060
9f52b564
MC
7061 if (!netif_running(dev))
7062 return -EAGAIN;
7063
1064e944 7064 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
7065
7066 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7067
7068 return rc;
7069}
7070
7071static int
7072bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7073{
972ec0d4 7074 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7075
7076 memset(coal, 0, sizeof(struct ethtool_coalesce));
7077
7078 coal->rx_coalesce_usecs = bp->rx_ticks;
7079 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7080 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7081 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7082
7083 coal->tx_coalesce_usecs = bp->tx_ticks;
7084 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7085 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7086 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7087
7088 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7089
7090 return 0;
7091}
7092
7093static int
7094bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7095{
972ec0d4 7096 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7097
7098 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7099 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7100
6aa20a22 7101 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
7102 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7103
7104 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7105 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7106
7107 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7108 if (bp->rx_quick_cons_trip_int > 0xff)
7109 bp->rx_quick_cons_trip_int = 0xff;
7110
7111 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7112 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7113
7114 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7115 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7116
7117 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7118 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7119
7120 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7121 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7122 0xff;
7123
7124 bp->stats_ticks = coal->stats_block_coalesce_usecs;
61d9e3fa 7125 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
02537b06
MC
7126 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7127 bp->stats_ticks = USEC_PER_SEC;
7128 }
7ea6920e
MC
7129 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7130 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7131 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
7132
7133 if (netif_running(bp->dev)) {
212f9934 7134 bnx2_netif_stop(bp, true);
9a120bc5 7135 bnx2_init_nic(bp, 0);
212f9934 7136 bnx2_netif_start(bp, true);
b6016b76
MC
7137 }
7138
7139 return 0;
7140}
7141
7142static void
7143bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7144{
972ec0d4 7145 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7146
13daffa2 7147 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
47bf4246 7148 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
7149
7150 ering->rx_pending = bp->rx_ring_size;
47bf4246 7151 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
7152
7153 ering->tx_max_pending = MAX_TX_DESC_CNT;
7154 ering->tx_pending = bp->tx_ring_size;
7155}
7156
7157static int
5d5d0015 7158bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 7159{
13daffa2 7160 if (netif_running(bp->dev)) {
354fcd77
MC
7161 /* Reset will erase chipset stats; save them */
7162 bnx2_save_stats(bp);
7163
212f9934 7164 bnx2_netif_stop(bp, true);
13daffa2 7165 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
a29ba9d2 7166 __bnx2_free_irq(bp);
13daffa2
MC
7167 bnx2_free_skbs(bp);
7168 bnx2_free_mem(bp);
7169 }
7170
5d5d0015
MC
7171 bnx2_set_rx_ring_size(bp, rx);
7172 bp->tx_ring_size = tx;
b6016b76
MC
7173
7174 if (netif_running(bp->dev)) {
13daffa2
MC
7175 int rc;
7176
7177 rc = bnx2_alloc_mem(bp);
a29ba9d2
MC
7178 if (!rc)
7179 rc = bnx2_request_irq(bp);
7180
6fefb65e
MC
7181 if (!rc)
7182 rc = bnx2_init_nic(bp, 0);
7183
7184 if (rc) {
7185 bnx2_napi_enable(bp);
7186 dev_close(bp->dev);
13daffa2 7187 return rc;
6fefb65e 7188 }
e9f26c49
MC
7189#ifdef BCM_CNIC
7190 mutex_lock(&bp->cnic_lock);
7191 /* Let cnic know about the new status block. */
7192 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7193 bnx2_setup_cnic_irq_info(bp);
7194 mutex_unlock(&bp->cnic_lock);
7195#endif
212f9934 7196 bnx2_netif_start(bp, true);
b6016b76 7197 }
b6016b76
MC
7198 return 0;
7199}
7200
5d5d0015
MC
7201static int
7202bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7203{
7204 struct bnx2 *bp = netdev_priv(dev);
7205 int rc;
7206
7207 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7208 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7209 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7210
7211 return -EINVAL;
7212 }
7213 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7214 return rc;
7215}
7216
b6016b76
MC
7217static void
7218bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7219{
972ec0d4 7220 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7221
7222 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7223 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7224 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7225}
7226
7227static int
7228bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7229{
972ec0d4 7230 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7231
7232 bp->req_flow_ctrl = 0;
7233 if (epause->rx_pause)
7234 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7235 if (epause->tx_pause)
7236 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7237
7238 if (epause->autoneg) {
7239 bp->autoneg |= AUTONEG_FLOW_CTRL;
7240 }
7241 else {
7242 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7243 }
7244
9f52b564
MC
7245 if (netif_running(dev)) {
7246 spin_lock_bh(&bp->phy_lock);
7247 bnx2_setup_phy(bp, bp->phy_port);
7248 spin_unlock_bh(&bp->phy_lock);
7249 }
b6016b76
MC
7250
7251 return 0;
7252}
7253
14ab9b86 7254static struct {
b6016b76 7255 char string[ETH_GSTRING_LEN];
790dab2f 7256} bnx2_stats_str_arr[] = {
b6016b76
MC
7257 { "rx_bytes" },
7258 { "rx_error_bytes" },
7259 { "tx_bytes" },
7260 { "tx_error_bytes" },
7261 { "rx_ucast_packets" },
7262 { "rx_mcast_packets" },
7263 { "rx_bcast_packets" },
7264 { "tx_ucast_packets" },
7265 { "tx_mcast_packets" },
7266 { "tx_bcast_packets" },
7267 { "tx_mac_errors" },
7268 { "tx_carrier_errors" },
7269 { "rx_crc_errors" },
7270 { "rx_align_errors" },
7271 { "tx_single_collisions" },
7272 { "tx_multi_collisions" },
7273 { "tx_deferred" },
7274 { "tx_excess_collisions" },
7275 { "tx_late_collisions" },
7276 { "tx_total_collisions" },
7277 { "rx_fragments" },
7278 { "rx_jabbers" },
7279 { "rx_undersize_packets" },
7280 { "rx_oversize_packets" },
7281 { "rx_64_byte_packets" },
7282 { "rx_65_to_127_byte_packets" },
7283 { "rx_128_to_255_byte_packets" },
7284 { "rx_256_to_511_byte_packets" },
7285 { "rx_512_to_1023_byte_packets" },
7286 { "rx_1024_to_1522_byte_packets" },
7287 { "rx_1523_to_9022_byte_packets" },
7288 { "tx_64_byte_packets" },
7289 { "tx_65_to_127_byte_packets" },
7290 { "tx_128_to_255_byte_packets" },
7291 { "tx_256_to_511_byte_packets" },
7292 { "tx_512_to_1023_byte_packets" },
7293 { "tx_1024_to_1522_byte_packets" },
7294 { "tx_1523_to_9022_byte_packets" },
7295 { "rx_xon_frames" },
7296 { "rx_xoff_frames" },
7297 { "tx_xon_frames" },
7298 { "tx_xoff_frames" },
7299 { "rx_mac_ctrl_frames" },
7300 { "rx_filtered_packets" },
790dab2f 7301 { "rx_ftq_discards" },
b6016b76 7302 { "rx_discards" },
cea94db9 7303 { "rx_fw_discards" },
b6016b76
MC
7304};
7305
790dab2f
MC
7306#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7307 sizeof(bnx2_stats_str_arr[0]))
7308
b6016b76
MC
7309#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7310
f71e1309 7311static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7312 STATS_OFFSET32(stat_IfHCInOctets_hi),
7313 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7314 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7315 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7316 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7317 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7318 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7319 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7320 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7321 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7322 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
7323 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7324 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7325 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7326 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7327 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7328 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7329 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7330 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7331 STATS_OFFSET32(stat_EtherStatsCollisions),
7332 STATS_OFFSET32(stat_EtherStatsFragments),
7333 STATS_OFFSET32(stat_EtherStatsJabbers),
7334 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7335 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7336 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7337 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7338 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7339 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7340 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7341 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7342 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7343 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7344 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7345 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7346 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7347 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7348 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7349 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7350 STATS_OFFSET32(stat_XonPauseFramesReceived),
7351 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7352 STATS_OFFSET32(stat_OutXonSent),
7353 STATS_OFFSET32(stat_OutXoffSent),
7354 STATS_OFFSET32(stat_MacControlFramesReceived),
7355 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
790dab2f 7356 STATS_OFFSET32(stat_IfInFTQDiscards),
6aa20a22 7357 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 7358 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
7359};
7360
7361/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7362 * skipped because of errata.
6aa20a22 7363 */
14ab9b86 7364static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7365 8,0,8,8,8,8,8,8,8,8,
7366 4,0,4,4,4,4,4,4,4,4,
7367 4,4,4,4,4,4,4,4,4,4,
7368 4,4,4,4,4,4,4,4,4,4,
790dab2f 7369 4,4,4,4,4,4,4,
b6016b76
MC
7370};
7371
5b0c76ad
MC
7372static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7373 8,0,8,8,8,8,8,8,8,8,
7374 4,4,4,4,4,4,4,4,4,4,
7375 4,4,4,4,4,4,4,4,4,4,
7376 4,4,4,4,4,4,4,4,4,4,
790dab2f 7377 4,4,4,4,4,4,4,
5b0c76ad
MC
7378};
7379
b6016b76
MC
7380#define BNX2_NUM_TESTS 6
7381
14ab9b86 7382static struct {
b6016b76
MC
7383 char string[ETH_GSTRING_LEN];
7384} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7385 { "register_test (offline)" },
7386 { "memory_test (offline)" },
7387 { "loopback_test (offline)" },
7388 { "nvram_test (online)" },
7389 { "interrupt_test (online)" },
7390 { "link_test (online)" },
7391};
7392
7393static int
b9f2c044 7394bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 7395{
b9f2c044
JG
7396 switch (sset) {
7397 case ETH_SS_TEST:
7398 return BNX2_NUM_TESTS;
7399 case ETH_SS_STATS:
7400 return BNX2_NUM_STATS;
7401 default:
7402 return -EOPNOTSUPP;
7403 }
b6016b76
MC
7404}
7405
7406static void
7407bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7408{
972ec0d4 7409 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7410
9f52b564
MC
7411 bnx2_set_power_state(bp, PCI_D0);
7412
b6016b76
MC
7413 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7414 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
7415 int i;
7416
212f9934 7417 bnx2_netif_stop(bp, true);
b6016b76
MC
7418 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7419 bnx2_free_skbs(bp);
7420
7421 if (bnx2_test_registers(bp) != 0) {
7422 buf[0] = 1;
7423 etest->flags |= ETH_TEST_FL_FAILED;
7424 }
7425 if (bnx2_test_memory(bp) != 0) {
7426 buf[1] = 1;
7427 etest->flags |= ETH_TEST_FL_FAILED;
7428 }
bc5a0690 7429 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 7430 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76 7431
9f52b564
MC
7432 if (!netif_running(bp->dev))
7433 bnx2_shutdown_chip(bp);
b6016b76 7434 else {
9a120bc5 7435 bnx2_init_nic(bp, 1);
212f9934 7436 bnx2_netif_start(bp, true);
b6016b76
MC
7437 }
7438
7439 /* wait for link up */
80be4434
MC
7440 for (i = 0; i < 7; i++) {
7441 if (bp->link_up)
7442 break;
7443 msleep_interruptible(1000);
7444 }
b6016b76
MC
7445 }
7446
7447 if (bnx2_test_nvram(bp) != 0) {
7448 buf[3] = 1;
7449 etest->flags |= ETH_TEST_FL_FAILED;
7450 }
7451 if (bnx2_test_intr(bp) != 0) {
7452 buf[4] = 1;
7453 etest->flags |= ETH_TEST_FL_FAILED;
7454 }
7455
7456 if (bnx2_test_link(bp) != 0) {
7457 buf[5] = 1;
7458 etest->flags |= ETH_TEST_FL_FAILED;
7459
7460 }
9f52b564
MC
7461 if (!netif_running(bp->dev))
7462 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
7463}
7464
7465static void
7466bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7467{
7468 switch (stringset) {
7469 case ETH_SS_STATS:
7470 memcpy(buf, bnx2_stats_str_arr,
7471 sizeof(bnx2_stats_str_arr));
7472 break;
7473 case ETH_SS_TEST:
7474 memcpy(buf, bnx2_tests_str_arr,
7475 sizeof(bnx2_tests_str_arr));
7476 break;
7477 }
7478}
7479
b6016b76
MC
7480static void
7481bnx2_get_ethtool_stats(struct net_device *dev,
7482 struct ethtool_stats *stats, u64 *buf)
7483{
972ec0d4 7484 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7485 int i;
7486 u32 *hw_stats = (u32 *) bp->stats_blk;
354fcd77 7487 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
14ab9b86 7488 u8 *stats_len_arr = NULL;
b6016b76
MC
7489
7490 if (hw_stats == NULL) {
7491 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7492 return;
7493 }
7494
5b0c76ad
MC
7495 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7496 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7497 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7498 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 7499 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
7500 else
7501 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
7502
7503 for (i = 0; i < BNX2_NUM_STATS; i++) {
354fcd77
MC
7504 unsigned long offset;
7505
b6016b76
MC
7506 if (stats_len_arr[i] == 0) {
7507 /* skip this counter */
7508 buf[i] = 0;
7509 continue;
7510 }
354fcd77
MC
7511
7512 offset = bnx2_stats_offset_arr[i];
b6016b76
MC
7513 if (stats_len_arr[i] == 4) {
7514 /* 4-byte counter */
354fcd77
MC
7515 buf[i] = (u64) *(hw_stats + offset) +
7516 *(temp_stats + offset);
b6016b76
MC
7517 continue;
7518 }
7519 /* 8-byte counter */
354fcd77
MC
7520 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7521 *(hw_stats + offset + 1) +
7522 (((u64) *(temp_stats + offset)) << 32) +
7523 *(temp_stats + offset + 1);
b6016b76
MC
7524 }
7525}
7526
7527static int
2e17e1aa 7528bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
b6016b76 7529{
972ec0d4 7530 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7531
2e17e1aa 7532 switch (state) {
7533 case ETHTOOL_ID_ACTIVE:
7534 bnx2_set_power_state(bp, PCI_D0);
9f52b564 7535
2e17e1aa 7536 bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7537 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
fce55922 7538 return 1; /* cycle on/off once per second */
b6016b76 7539
2e17e1aa 7540 case ETHTOOL_ID_ON:
7541 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7542 BNX2_EMAC_LED_1000MB_OVERRIDE |
7543 BNX2_EMAC_LED_100MB_OVERRIDE |
7544 BNX2_EMAC_LED_10MB_OVERRIDE |
7545 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7546 BNX2_EMAC_LED_TRAFFIC);
7547 break;
b6016b76 7548
2e17e1aa 7549 case ETHTOOL_ID_OFF:
7550 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7551 break;
9f52b564 7552
2e17e1aa 7553 case ETHTOOL_ID_INACTIVE:
7554 REG_WR(bp, BNX2_EMAC_LED, 0);
7555 REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7556
7557 if (!netif_running(dev))
7558 bnx2_set_power_state(bp, PCI_D3hot);
7559 break;
7560 }
9f52b564 7561
b6016b76
MC
7562 return 0;
7563}
7564
c8f44aff
MM
7565static netdev_features_t
7566bnx2_fix_features(struct net_device *dev, netdev_features_t features)
4666f87a
MC
7567{
7568 struct bnx2 *bp = netdev_priv(dev);
7569
8d7dfc2b
MM
7570 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7571 features |= NETIF_F_HW_VLAN_RX;
7572
7573 return features;
4666f87a
MC
7574}
7575
fdc8541d 7576static int
c8f44aff 7577bnx2_set_features(struct net_device *dev, netdev_features_t features)
fdc8541d 7578{
7d0fd211 7579 struct bnx2 *bp = netdev_priv(dev);
7d0fd211 7580
7c810477 7581 /* TSO with VLAN tag won't work with current firmware */
8d7dfc2b
MM
7582 if (features & NETIF_F_HW_VLAN_TX)
7583 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7584 else
7585 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7d0fd211 7586
8d7dfc2b 7587 if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7d0fd211
JG
7588 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7589 netif_running(dev)) {
7590 bnx2_netif_stop(bp, false);
8d7dfc2b 7591 dev->features = features;
7d0fd211
JG
7592 bnx2_set_rx_mode(dev);
7593 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7594 bnx2_netif_start(bp, false);
8d7dfc2b 7595 return 1;
7d0fd211
JG
7596 }
7597
7598 return 0;
fdc8541d
MC
7599}
7600
7282d491 7601static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
7602 .get_settings = bnx2_get_settings,
7603 .set_settings = bnx2_set_settings,
7604 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
7605 .get_regs_len = bnx2_get_regs_len,
7606 .get_regs = bnx2_get_regs,
b6016b76
MC
7607 .get_wol = bnx2_get_wol,
7608 .set_wol = bnx2_set_wol,
7609 .nway_reset = bnx2_nway_reset,
7959ea25 7610 .get_link = bnx2_get_link,
b6016b76
MC
7611 .get_eeprom_len = bnx2_get_eeprom_len,
7612 .get_eeprom = bnx2_get_eeprom,
7613 .set_eeprom = bnx2_set_eeprom,
7614 .get_coalesce = bnx2_get_coalesce,
7615 .set_coalesce = bnx2_set_coalesce,
7616 .get_ringparam = bnx2_get_ringparam,
7617 .set_ringparam = bnx2_set_ringparam,
7618 .get_pauseparam = bnx2_get_pauseparam,
7619 .set_pauseparam = bnx2_set_pauseparam,
b6016b76
MC
7620 .self_test = bnx2_self_test,
7621 .get_strings = bnx2_get_strings,
2e17e1aa 7622 .set_phys_id = bnx2_set_phys_id,
b6016b76 7623 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 7624 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
7625};
7626
7627/* Called with rtnl_lock */
7628static int
7629bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7630{
14ab9b86 7631 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 7632 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7633 int err;
7634
7635 switch(cmd) {
7636 case SIOCGMIIPHY:
7637 data->phy_id = bp->phy_addr;
7638
7639 /* fallthru */
7640 case SIOCGMIIREG: {
7641 u32 mii_regval;
7642
583c28e5 7643 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7644 return -EOPNOTSUPP;
7645
dad3e452
MC
7646 if (!netif_running(dev))
7647 return -EAGAIN;
7648
c770a65c 7649 spin_lock_bh(&bp->phy_lock);
b6016b76 7650 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 7651 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7652
7653 data->val_out = mii_regval;
7654
7655 return err;
7656 }
7657
7658 case SIOCSMIIREG:
583c28e5 7659 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7660 return -EOPNOTSUPP;
7661
dad3e452
MC
7662 if (!netif_running(dev))
7663 return -EAGAIN;
7664
c770a65c 7665 spin_lock_bh(&bp->phy_lock);
b6016b76 7666 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 7667 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7668
7669 return err;
7670
7671 default:
7672 /* do nothing */
7673 break;
7674 }
7675 return -EOPNOTSUPP;
7676}
7677
7678/* Called with rtnl_lock */
7679static int
7680bnx2_change_mac_addr(struct net_device *dev, void *p)
7681{
7682 struct sockaddr *addr = p;
972ec0d4 7683 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7684
73eef4cd
MC
7685 if (!is_valid_ether_addr(addr->sa_data))
7686 return -EINVAL;
7687
b6016b76
MC
7688 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7689 if (netif_running(dev))
5fcaed01 7690 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
7691
7692 return 0;
7693}
7694
7695/* Called with rtnl_lock */
7696static int
7697bnx2_change_mtu(struct net_device *dev, int new_mtu)
7698{
972ec0d4 7699 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7700
7701 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7702 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7703 return -EINVAL;
7704
7705 dev->mtu = new_mtu;
807540ba 7706 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
b6016b76
MC
7707}
7708
257ddbda 7709#ifdef CONFIG_NET_POLL_CONTROLLER
b6016b76
MC
7710static void
7711poll_bnx2(struct net_device *dev)
7712{
972ec0d4 7713 struct bnx2 *bp = netdev_priv(dev);
b2af2c1d 7714 int i;
b6016b76 7715
b2af2c1d 7716 for (i = 0; i < bp->irq_nvecs; i++) {
1bf1e347
MC
7717 struct bnx2_irq *irq = &bp->irq_tbl[i];
7718
7719 disable_irq(irq->vector);
7720 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7721 enable_irq(irq->vector);
b2af2c1d 7722 }
b6016b76
MC
7723}
7724#endif
7725
253c8b75
MC
7726static void __devinit
7727bnx2_get_5709_media(struct bnx2 *bp)
7728{
7729 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7730 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7731 u32 strap;
7732
7733 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7734 return;
7735 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
583c28e5 7736 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7737 return;
7738 }
7739
7740 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7741 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7742 else
7743 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7744
7745 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7746 switch (strap) {
7747 case 0x4:
7748 case 0x5:
7749 case 0x6:
583c28e5 7750 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7751 return;
7752 }
7753 } else {
7754 switch (strap) {
7755 case 0x1:
7756 case 0x2:
7757 case 0x4:
583c28e5 7758 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7759 return;
7760 }
7761 }
7762}
7763
883e5151
MC
7764static void __devinit
7765bnx2_get_pci_speed(struct bnx2 *bp)
7766{
7767 u32 reg;
7768
7769 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7770 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7771 u32 clkreg;
7772
f86e82fb 7773 bp->flags |= BNX2_FLAG_PCIX;
883e5151
MC
7774
7775 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7776
7777 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7778 switch (clkreg) {
7779 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7780 bp->bus_speed_mhz = 133;
7781 break;
7782
7783 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7784 bp->bus_speed_mhz = 100;
7785 break;
7786
7787 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7788 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7789 bp->bus_speed_mhz = 66;
7790 break;
7791
7792 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7793 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7794 bp->bus_speed_mhz = 50;
7795 break;
7796
7797 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7798 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7799 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7800 bp->bus_speed_mhz = 33;
7801 break;
7802 }
7803 }
7804 else {
7805 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7806 bp->bus_speed_mhz = 66;
7807 else
7808 bp->bus_speed_mhz = 33;
7809 }
7810
7811 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
f86e82fb 7812 bp->flags |= BNX2_FLAG_PCI_32BIT;
883e5151
MC
7813
7814}
7815
76d99061
MC
7816static void __devinit
7817bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7818{
df25bc38 7819 int rc, i, j;
76d99061 7820 u8 *data;
df25bc38 7821 unsigned int block_end, rosize, len;
76d99061 7822
012093f6
MC
7823#define BNX2_VPD_NVRAM_OFFSET 0x300
7824#define BNX2_VPD_LEN 128
76d99061
MC
7825#define BNX2_MAX_VER_SLEN 30
7826
7827 data = kmalloc(256, GFP_KERNEL);
7828 if (!data)
7829 return;
7830
012093f6
MC
7831 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7832 BNX2_VPD_LEN);
76d99061
MC
7833 if (rc)
7834 goto vpd_done;
7835
012093f6
MC
7836 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7837 data[i] = data[i + BNX2_VPD_LEN + 3];
7838 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7839 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7840 data[i + 3] = data[i + BNX2_VPD_LEN];
76d99061
MC
7841 }
7842
df25bc38
MC
7843 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7844 if (i < 0)
7845 goto vpd_done;
76d99061 7846
df25bc38
MC
7847 rosize = pci_vpd_lrdt_size(&data[i]);
7848 i += PCI_VPD_LRDT_TAG_SIZE;
7849 block_end = i + rosize;
76d99061 7850
df25bc38
MC
7851 if (block_end > BNX2_VPD_LEN)
7852 goto vpd_done;
76d99061 7853
df25bc38
MC
7854 j = pci_vpd_find_info_keyword(data, i, rosize,
7855 PCI_VPD_RO_KEYWORD_MFR_ID);
7856 if (j < 0)
7857 goto vpd_done;
76d99061 7858
df25bc38 7859 len = pci_vpd_info_field_size(&data[j]);
76d99061 7860
df25bc38
MC
7861 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7862 if (j + len > block_end || len != 4 ||
7863 memcmp(&data[j], "1028", 4))
7864 goto vpd_done;
4067a854 7865
df25bc38
MC
7866 j = pci_vpd_find_info_keyword(data, i, rosize,
7867 PCI_VPD_RO_KEYWORD_VENDOR0);
7868 if (j < 0)
7869 goto vpd_done;
4067a854 7870
df25bc38 7871 len = pci_vpd_info_field_size(&data[j]);
4067a854 7872
df25bc38
MC
7873 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7874 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
76d99061 7875 goto vpd_done;
df25bc38
MC
7876
7877 memcpy(bp->fw_version, &data[j], len);
7878 bp->fw_version[len] = ' ';
76d99061
MC
7879
7880vpd_done:
7881 kfree(data);
7882}
7883
b6016b76
MC
7884static int __devinit
7885bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7886{
7887 struct bnx2 *bp;
7888 unsigned long mem_len;
58fc2ea4 7889 int rc, i, j;
b6016b76 7890 u32 reg;
40453c83 7891 u64 dma_mask, persist_dma_mask;
cd709aa9 7892 int err;
b6016b76 7893
b6016b76 7894 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 7895 bp = netdev_priv(dev);
b6016b76
MC
7896
7897 bp->flags = 0;
7898 bp->phy_flags = 0;
7899
354fcd77
MC
7900 bp->temp_stats_blk =
7901 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7902
7903 if (bp->temp_stats_blk == NULL) {
7904 rc = -ENOMEM;
7905 goto err_out;
7906 }
7907
b6016b76
MC
7908 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7909 rc = pci_enable_device(pdev);
7910 if (rc) {
3a9c6a49 7911 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
b6016b76
MC
7912 goto err_out;
7913 }
7914
7915 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 7916 dev_err(&pdev->dev,
3a9c6a49 7917 "Cannot find PCI device base address, aborting\n");
b6016b76
MC
7918 rc = -ENODEV;
7919 goto err_out_disable;
7920 }
7921
7922 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7923 if (rc) {
3a9c6a49 7924 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
b6016b76
MC
7925 goto err_out_disable;
7926 }
7927
7928 pci_set_master(pdev);
7929
7930 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7931 if (bp->pm_cap == 0) {
9b91cf9d 7932 dev_err(&pdev->dev,
3a9c6a49 7933 "Cannot find power management capability, aborting\n");
b6016b76
MC
7934 rc = -EIO;
7935 goto err_out_release;
7936 }
7937
b6016b76
MC
7938 bp->dev = dev;
7939 bp->pdev = pdev;
7940
7941 spin_lock_init(&bp->phy_lock);
1b8227c4 7942 spin_lock_init(&bp->indirect_lock);
c5a88950
MC
7943#ifdef BCM_CNIC
7944 mutex_init(&bp->cnic_lock);
7945#endif
c4028958 7946 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
7947
7948 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
4edd473f 7949 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
b6016b76
MC
7950 dev->mem_end = dev->mem_start + mem_len;
7951 dev->irq = pdev->irq;
7952
7953 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7954
7955 if (!bp->regview) {
3a9c6a49 7956 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
b6016b76
MC
7957 rc = -ENOMEM;
7958 goto err_out_release;
7959 }
7960
be7ff1af
MC
7961 bnx2_set_power_state(bp, PCI_D0);
7962
b6016b76
MC
7963 /* Configure byte swap and enable write to the reg_window registers.
7964 * Rely on CPU to do target byte swapping on big endian systems
7965 * The chip's target access swapping will not swap all accesses
7966 */
be7ff1af
MC
7967 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
7968 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7969 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
b6016b76
MC
7970
7971 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7972
883e5151 7973 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
e82760e7
JM
7974 if (!pci_is_pcie(pdev)) {
7975 dev_err(&pdev->dev, "Not PCIE, aborting\n");
883e5151
MC
7976 rc = -EIO;
7977 goto err_out_unmap;
7978 }
f86e82fb 7979 bp->flags |= BNX2_FLAG_PCIE;
2dd201d7 7980 if (CHIP_REV(bp) == CHIP_REV_Ax)
f86e82fb 7981 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
c239f279
MC
7982
7983 /* AER (Advanced Error Reporting) hooks */
7984 err = pci_enable_pcie_error_reporting(pdev);
4bb9ebc7
MC
7985 if (!err)
7986 bp->flags |= BNX2_FLAG_AER_ENABLED;
c239f279 7987
883e5151 7988 } else {
59b47d8a
MC
7989 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7990 if (bp->pcix_cap == 0) {
7991 dev_err(&pdev->dev,
3a9c6a49 7992 "Cannot find PCIX capability, aborting\n");
59b47d8a
MC
7993 rc = -EIO;
7994 goto err_out_unmap;
7995 }
61d9e3fa 7996 bp->flags |= BNX2_FLAG_BROKEN_STATS;
59b47d8a
MC
7997 }
7998
b4b36042
MC
7999 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
8000 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
f86e82fb 8001 bp->flags |= BNX2_FLAG_MSIX_CAP;
b4b36042
MC
8002 }
8003
8e6a72c4
MC
8004 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
8005 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
f86e82fb 8006 bp->flags |= BNX2_FLAG_MSI_CAP;
8e6a72c4
MC
8007 }
8008
40453c83
MC
8009 /* 5708 cannot support DMA addresses > 40-bit. */
8010 if (CHIP_NUM(bp) == CHIP_NUM_5708)
50cf156a 8011 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
40453c83 8012 else
6a35528a 8013 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
40453c83
MC
8014
8015 /* Configure DMA attributes. */
8016 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8017 dev->features |= NETIF_F_HIGHDMA;
8018 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8019 if (rc) {
8020 dev_err(&pdev->dev,
3a9c6a49 8021 "pci_set_consistent_dma_mask failed, aborting\n");
40453c83
MC
8022 goto err_out_unmap;
8023 }
284901a9 8024 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3a9c6a49 8025 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
40453c83
MC
8026 goto err_out_unmap;
8027 }
8028
f86e82fb 8029 if (!(bp->flags & BNX2_FLAG_PCIE))
883e5151 8030 bnx2_get_pci_speed(bp);
b6016b76
MC
8031
8032 /* 5706A0 may falsely detect SERR and PERR. */
8033 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8034 reg = REG_RD(bp, PCI_COMMAND);
8035 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8036 REG_WR(bp, PCI_COMMAND, reg);
8037 }
8038 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
f86e82fb 8039 !(bp->flags & BNX2_FLAG_PCIX)) {
b6016b76 8040
9b91cf9d 8041 dev_err(&pdev->dev,
3a9c6a49 8042 "5706 A1 can only be used in a PCIX bus, aborting\n");
b6016b76
MC
8043 goto err_out_unmap;
8044 }
8045
8046 bnx2_init_nvram(bp);
8047
2726d6e1 8048 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
e3648b3d
MC
8049
8050 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
8051 BNX2_SHM_HDR_SIGNATURE_SIG) {
8052 u32 off = PCI_FUNC(pdev->devfn) << 2;
8053
2726d6e1 8054 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
24cb230b 8055 } else
e3648b3d
MC
8056 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8057
b6016b76
MC
8058 /* Get the permanent MAC address. First we need to make sure the
8059 * firmware is actually running.
8060 */
2726d6e1 8061 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
8062
8063 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8064 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
3a9c6a49 8065 dev_err(&pdev->dev, "Firmware not running, aborting\n");
b6016b76
MC
8066 rc = -ENODEV;
8067 goto err_out_unmap;
8068 }
8069
76d99061
MC
8070 bnx2_read_vpd_fw_ver(bp);
8071
8072 j = strlen(bp->fw_version);
2726d6e1 8073 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
76d99061 8074 for (i = 0; i < 3 && j < 24; i++) {
58fc2ea4
MC
8075 u8 num, k, skip0;
8076
76d99061
MC
8077 if (i == 0) {
8078 bp->fw_version[j++] = 'b';
8079 bp->fw_version[j++] = 'c';
8080 bp->fw_version[j++] = ' ';
8081 }
58fc2ea4
MC
8082 num = (u8) (reg >> (24 - (i * 8)));
8083 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8084 if (num >= k || !skip0 || k == 1) {
8085 bp->fw_version[j++] = (num / k) + '0';
8086 skip0 = 0;
8087 }
8088 }
8089 if (i != 2)
8090 bp->fw_version[j++] = '.';
8091 }
2726d6e1 8092 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
846f5c62
MC
8093 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8094 bp->wol = 1;
8095
8096 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
f86e82fb 8097 bp->flags |= BNX2_FLAG_ASF_ENABLE;
c2d3db8c
MC
8098
8099 for (i = 0; i < 30; i++) {
2726d6e1 8100 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
c2d3db8c
MC
8101 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8102 break;
8103 msleep(10);
8104 }
8105 }
2726d6e1 8106 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
58fc2ea4
MC
8107 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8108 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8109 reg != BNX2_CONDITION_MFW_RUN_NONE) {
2726d6e1 8110 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
58fc2ea4 8111
76d99061
MC
8112 if (j < 32)
8113 bp->fw_version[j++] = ' ';
8114 for (i = 0; i < 3 && j < 28; i++) {
2726d6e1 8115 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
3aeb7d22 8116 reg = be32_to_cpu(reg);
58fc2ea4
MC
8117 memcpy(&bp->fw_version[j], &reg, 4);
8118 j += 4;
8119 }
8120 }
b6016b76 8121
2726d6e1 8122 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
8123 bp->mac_addr[0] = (u8) (reg >> 8);
8124 bp->mac_addr[1] = (u8) reg;
8125
2726d6e1 8126 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
8127 bp->mac_addr[2] = (u8) (reg >> 24);
8128 bp->mac_addr[3] = (u8) (reg >> 16);
8129 bp->mac_addr[4] = (u8) (reg >> 8);
8130 bp->mac_addr[5] = (u8) reg;
8131
8132 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 8133 bnx2_set_rx_ring_size(bp, 255);
b6016b76 8134
cf7474a6 8135 bp->tx_quick_cons_trip_int = 2;
b6016b76 8136 bp->tx_quick_cons_trip = 20;
cf7474a6 8137 bp->tx_ticks_int = 18;
b6016b76 8138 bp->tx_ticks = 80;
6aa20a22 8139
cf7474a6
MC
8140 bp->rx_quick_cons_trip_int = 2;
8141 bp->rx_quick_cons_trip = 12;
b6016b76
MC
8142 bp->rx_ticks_int = 18;
8143 bp->rx_ticks = 18;
8144
7ea6920e 8145 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76 8146
ac392abc 8147 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 8148
5b0c76ad
MC
8149 bp->phy_addr = 1;
8150
b6016b76 8151 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
8152 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8153 bnx2_get_5709_media(bp);
8154 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
583c28e5 8155 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
bac0dff6 8156
0d8a6571 8157 bp->phy_port = PORT_TP;
583c28e5 8158 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 8159 bp->phy_port = PORT_FIBRE;
2726d6e1 8160 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
846f5c62 8161 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
f86e82fb 8162 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
8163 bp->wol = 0;
8164 }
38ea3686
MC
8165 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8166 /* Don't do parallel detect on this board because of
8167 * some board problems. The link will not go down
8168 * if we do parallel detect.
8169 */
8170 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8171 pdev->subsystem_device == 0x310c)
8172 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8173 } else {
5b0c76ad 8174 bp->phy_addr = 2;
5b0c76ad 8175 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
583c28e5 8176 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
5b0c76ad 8177 }
261dd5ca
MC
8178 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8179 CHIP_NUM(bp) == CHIP_NUM_5708)
583c28e5 8180 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
fb0c18bd
MC
8181 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8182 (CHIP_REV(bp) == CHIP_REV_Ax ||
8183 CHIP_REV(bp) == CHIP_REV_Bx))
583c28e5 8184 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
b6016b76 8185
7c62e83b
MC
8186 bnx2_init_fw_cap(bp);
8187
16088272
MC
8188 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8189 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5ec6d7bf
MC
8190 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8191 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
f86e82fb 8192 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
8193 bp->wol = 0;
8194 }
dda1e390 8195
b6016b76
MC
8196 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8197 bp->tx_quick_cons_trip_int =
8198 bp->tx_quick_cons_trip;
8199 bp->tx_ticks_int = bp->tx_ticks;
8200 bp->rx_quick_cons_trip_int =
8201 bp->rx_quick_cons_trip;
8202 bp->rx_ticks_int = bp->rx_ticks;
8203 bp->comp_prod_trip_int = bp->comp_prod_trip;
8204 bp->com_ticks_int = bp->com_ticks;
8205 bp->cmd_ticks_int = bp->cmd_ticks;
8206 }
8207
f9317a40
MC
8208 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8209 *
8210 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8211 * with byte enables disabled on the unused 32-bit word. This is legal
8212 * but causes problems on the AMD 8132 which will eventually stop
8213 * responding after a while.
8214 *
8215 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 8216 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
8217 */
8218 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8219 struct pci_dev *amd_8132 = NULL;
8220
8221 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8222 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8223 amd_8132))) {
f9317a40 8224
44c10138
AK
8225 if (amd_8132->revision >= 0x10 &&
8226 amd_8132->revision <= 0x13) {
f9317a40
MC
8227 disable_msi = 1;
8228 pci_dev_put(amd_8132);
8229 break;
8230 }
8231 }
8232 }
8233
deaf391b 8234 bnx2_set_default_link(bp);
b6016b76
MC
8235 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8236
cd339a0e 8237 init_timer(&bp->timer);
ac392abc 8238 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
cd339a0e
MC
8239 bp->timer.data = (unsigned long) bp;
8240 bp->timer.function = bnx2_timer;
8241
7625eb2f 8242#ifdef BCM_CNIC
41c2178a
MC
8243 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8244 bp->cnic_eth_dev.max_iscsi_conn =
8245 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8246 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
7625eb2f 8247#endif
c239f279
MC
8248 pci_save_state(pdev);
8249
b6016b76
MC
8250 return 0;
8251
8252err_out_unmap:
4bb9ebc7 8253 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
c239f279 8254 pci_disable_pcie_error_reporting(pdev);
4bb9ebc7
MC
8255 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8256 }
c239f279 8257
b6016b76
MC
8258 if (bp->regview) {
8259 iounmap(bp->regview);
73eef4cd 8260 bp->regview = NULL;
b6016b76
MC
8261 }
8262
8263err_out_release:
8264 pci_release_regions(pdev);
8265
8266err_out_disable:
8267 pci_disable_device(pdev);
8268 pci_set_drvdata(pdev, NULL);
8269
8270err_out:
8271 return rc;
8272}
8273
883e5151
MC
8274static char * __devinit
8275bnx2_bus_string(struct bnx2 *bp, char *str)
8276{
8277 char *s = str;
8278
f86e82fb 8279 if (bp->flags & BNX2_FLAG_PCIE) {
883e5151
MC
8280 s += sprintf(s, "PCI Express");
8281 } else {
8282 s += sprintf(s, "PCI");
f86e82fb 8283 if (bp->flags & BNX2_FLAG_PCIX)
883e5151 8284 s += sprintf(s, "-X");
f86e82fb 8285 if (bp->flags & BNX2_FLAG_PCI_32BIT)
883e5151
MC
8286 s += sprintf(s, " 32-bit");
8287 else
8288 s += sprintf(s, " 64-bit");
8289 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8290 }
8291 return str;
8292}
8293
f048fa9c
MC
8294static void
8295bnx2_del_napi(struct bnx2 *bp)
8296{
8297 int i;
8298
8299 for (i = 0; i < bp->irq_nvecs; i++)
8300 netif_napi_del(&bp->bnx2_napi[i].napi);
8301}
8302
8303static void
35efa7c1
MC
8304bnx2_init_napi(struct bnx2 *bp)
8305{
b4b36042 8306 int i;
35efa7c1 8307
4327ba43 8308 for (i = 0; i < bp->irq_nvecs; i++) {
35e9010b
MC
8309 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8310 int (*poll)(struct napi_struct *, int);
8311
8312 if (i == 0)
8313 poll = bnx2_poll;
8314 else
f0ea2e63 8315 poll = bnx2_poll_msix;
35e9010b
MC
8316
8317 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
b4b36042
MC
8318 bnapi->bp = bp;
8319 }
35efa7c1
MC
8320}
8321
0421eae6
SH
8322static const struct net_device_ops bnx2_netdev_ops = {
8323 .ndo_open = bnx2_open,
8324 .ndo_start_xmit = bnx2_start_xmit,
8325 .ndo_stop = bnx2_close,
5d07bf26 8326 .ndo_get_stats64 = bnx2_get_stats64,
0421eae6
SH
8327 .ndo_set_rx_mode = bnx2_set_rx_mode,
8328 .ndo_do_ioctl = bnx2_ioctl,
8329 .ndo_validate_addr = eth_validate_addr,
8330 .ndo_set_mac_address = bnx2_change_mac_addr,
8331 .ndo_change_mtu = bnx2_change_mtu,
8d7dfc2b
MM
8332 .ndo_fix_features = bnx2_fix_features,
8333 .ndo_set_features = bnx2_set_features,
0421eae6 8334 .ndo_tx_timeout = bnx2_tx_timeout,
257ddbda 8335#ifdef CONFIG_NET_POLL_CONTROLLER
0421eae6
SH
8336 .ndo_poll_controller = poll_bnx2,
8337#endif
8338};
8339
b6016b76
MC
8340static int __devinit
8341bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8342{
8343 static int version_printed = 0;
8344 struct net_device *dev = NULL;
8345 struct bnx2 *bp;
0795af57 8346 int rc;
883e5151 8347 char str[40];
b6016b76
MC
8348
8349 if (version_printed++ == 0)
3a9c6a49 8350 pr_info("%s", version);
b6016b76
MC
8351
8352 /* dev zeroed in init_etherdev */
706bf240 8353 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
b6016b76
MC
8354
8355 if (!dev)
8356 return -ENOMEM;
8357
8358 rc = bnx2_init_board(pdev, dev);
8359 if (rc < 0) {
8360 free_netdev(dev);
8361 return rc;
8362 }
8363
0421eae6 8364 dev->netdev_ops = &bnx2_netdev_ops;
b6016b76 8365 dev->watchdog_timeo = TX_TIMEOUT;
b6016b76 8366 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 8367
972ec0d4 8368 bp = netdev_priv(dev);
b6016b76 8369
1b2f922f
MC
8370 pci_set_drvdata(pdev, dev);
8371
8372 memcpy(dev->dev_addr, bp->mac_addr, 6);
8373 memcpy(dev->perm_addr, bp->mac_addr, 6);
1b2f922f 8374
8d7dfc2b
MM
8375 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8376 NETIF_F_TSO | NETIF_F_TSO_ECN |
8377 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8378
8379 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8380 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8381
8382 dev->vlan_features = dev->hw_features;
8383 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8384 dev->features |= dev->hw_features;
01789349 8385 dev->priv_flags |= IFF_UNICAST_FLT;
8d7dfc2b 8386
b6016b76 8387 if ((rc = register_netdev(dev))) {
9b91cf9d 8388 dev_err(&pdev->dev, "Cannot register net device\n");
57579f76 8389 goto error;
b6016b76
MC
8390 }
8391
3a9c6a49
JP
8392 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8393 board_info[ent->driver_data].name,
8394 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8395 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8396 bnx2_bus_string(bp, str),
8397 dev->base_addr,
8398 bp->pdev->irq, dev->dev_addr);
b6016b76 8399
b6016b76 8400 return 0;
57579f76
MC
8401
8402error:
57579f76
MC
8403 if (bp->regview)
8404 iounmap(bp->regview);
8405 pci_release_regions(pdev);
8406 pci_disable_device(pdev);
8407 pci_set_drvdata(pdev, NULL);
8408 free_netdev(dev);
8409 return rc;
b6016b76
MC
8410}
8411
8412static void __devexit
8413bnx2_remove_one(struct pci_dev *pdev)
8414{
8415 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8416 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
8417
8418 unregister_netdev(dev);
8419
8333a46a 8420 del_timer_sync(&bp->timer);
cd634019 8421 cancel_work_sync(&bp->reset_task);
8333a46a 8422
b6016b76
MC
8423 if (bp->regview)
8424 iounmap(bp->regview);
8425
354fcd77
MC
8426 kfree(bp->temp_stats_blk);
8427
4bb9ebc7 8428 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
c239f279 8429 pci_disable_pcie_error_reporting(pdev);
4bb9ebc7
MC
8430 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8431 }
cd709aa9 8432
7880b72e 8433 bnx2_release_firmware(bp);
8434
c239f279 8435 free_netdev(dev);
cd709aa9 8436
b6016b76
MC
8437 pci_release_regions(pdev);
8438 pci_disable_device(pdev);
8439 pci_set_drvdata(pdev, NULL);
8440}
8441
8442static int
829ca9a3 8443bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
8444{
8445 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8446 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8447
6caebb02
MC
8448 /* PCI register 4 needs to be saved whether netif_running() or not.
8449 * MSI address and data need to be saved if using MSI and
8450 * netif_running().
8451 */
8452 pci_save_state(pdev);
b6016b76
MC
8453 if (!netif_running(dev))
8454 return 0;
8455
23f333a2 8456 cancel_work_sync(&bp->reset_task);
212f9934 8457 bnx2_netif_stop(bp, true);
b6016b76
MC
8458 netif_device_detach(dev);
8459 del_timer_sync(&bp->timer);
74bf4ba3 8460 bnx2_shutdown_chip(bp);
b6016b76 8461 bnx2_free_skbs(bp);
829ca9a3 8462 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
8463 return 0;
8464}
8465
8466static int
8467bnx2_resume(struct pci_dev *pdev)
8468{
8469 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8470 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8471
6caebb02 8472 pci_restore_state(pdev);
b6016b76
MC
8473 if (!netif_running(dev))
8474 return 0;
8475
829ca9a3 8476 bnx2_set_power_state(bp, PCI_D0);
b6016b76 8477 netif_device_attach(dev);
9a120bc5 8478 bnx2_init_nic(bp, 1);
212f9934 8479 bnx2_netif_start(bp, true);
b6016b76
MC
8480 return 0;
8481}
8482
6ff2da49
WX
8483/**
8484 * bnx2_io_error_detected - called when PCI error is detected
8485 * @pdev: Pointer to PCI device
8486 * @state: The current pci connection state
8487 *
8488 * This function is called after a PCI bus error affecting
8489 * this device has been detected.
8490 */
8491static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8492 pci_channel_state_t state)
8493{
8494 struct net_device *dev = pci_get_drvdata(pdev);
8495 struct bnx2 *bp = netdev_priv(dev);
8496
8497 rtnl_lock();
8498 netif_device_detach(dev);
8499
2ec3de26
DN
8500 if (state == pci_channel_io_perm_failure) {
8501 rtnl_unlock();
8502 return PCI_ERS_RESULT_DISCONNECT;
8503 }
8504
6ff2da49 8505 if (netif_running(dev)) {
212f9934 8506 bnx2_netif_stop(bp, true);
6ff2da49
WX
8507 del_timer_sync(&bp->timer);
8508 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8509 }
8510
8511 pci_disable_device(pdev);
8512 rtnl_unlock();
8513
8514 /* Request a slot slot reset. */
8515 return PCI_ERS_RESULT_NEED_RESET;
8516}
8517
8518/**
8519 * bnx2_io_slot_reset - called after the pci bus has been reset.
8520 * @pdev: Pointer to PCI device
8521 *
8522 * Restart the card from scratch, as if from a cold-boot.
8523 */
8524static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8525{
8526 struct net_device *dev = pci_get_drvdata(pdev);
8527 struct bnx2 *bp = netdev_priv(dev);
cd709aa9
JF
8528 pci_ers_result_t result;
8529 int err;
6ff2da49
WX
8530
8531 rtnl_lock();
8532 if (pci_enable_device(pdev)) {
8533 dev_err(&pdev->dev,
3a9c6a49 8534 "Cannot re-enable PCI device after reset\n");
cd709aa9
JF
8535 result = PCI_ERS_RESULT_DISCONNECT;
8536 } else {
8537 pci_set_master(pdev);
8538 pci_restore_state(pdev);
8539 pci_save_state(pdev);
8540
8541 if (netif_running(dev)) {
8542 bnx2_set_power_state(bp, PCI_D0);
8543 bnx2_init_nic(bp, 1);
8544 }
8545 result = PCI_ERS_RESULT_RECOVERED;
6ff2da49 8546 }
cd709aa9 8547 rtnl_unlock();
6ff2da49 8548
4bb9ebc7 8549 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
c239f279
MC
8550 return result;
8551
cd709aa9
JF
8552 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8553 if (err) {
8554 dev_err(&pdev->dev,
8555 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8556 err); /* non-fatal, continue */
6ff2da49
WX
8557 }
8558
cd709aa9 8559 return result;
6ff2da49
WX
8560}
8561
8562/**
8563 * bnx2_io_resume - called when traffic can start flowing again.
8564 * @pdev: Pointer to PCI device
8565 *
8566 * This callback is called when the error recovery driver tells us that
8567 * its OK to resume normal operation.
8568 */
8569static void bnx2_io_resume(struct pci_dev *pdev)
8570{
8571 struct net_device *dev = pci_get_drvdata(pdev);
8572 struct bnx2 *bp = netdev_priv(dev);
8573
8574 rtnl_lock();
8575 if (netif_running(dev))
212f9934 8576 bnx2_netif_start(bp, true);
6ff2da49
WX
8577
8578 netif_device_attach(dev);
8579 rtnl_unlock();
8580}
8581
8582static struct pci_error_handlers bnx2_err_handler = {
8583 .error_detected = bnx2_io_error_detected,
8584 .slot_reset = bnx2_io_slot_reset,
8585 .resume = bnx2_io_resume,
8586};
8587
b6016b76 8588static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
8589 .name = DRV_MODULE_NAME,
8590 .id_table = bnx2_pci_tbl,
8591 .probe = bnx2_init_one,
8592 .remove = __devexit_p(bnx2_remove_one),
8593 .suspend = bnx2_suspend,
8594 .resume = bnx2_resume,
6ff2da49 8595 .err_handler = &bnx2_err_handler,
b6016b76
MC
8596};
8597
8598static int __init bnx2_init(void)
8599{
29917620 8600 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
8601}
8602
8603static void __exit bnx2_cleanup(void)
8604{
8605 pci_unregister_driver(&bnx2_pci_driver);
8606}
8607
8608module_init(bnx2_init);
8609module_exit(bnx2_cleanup);
8610
8611
8612