]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/bnx2x_main.c
bnx2x: White spaces
[mirror_ubuntu-eoan-kernel.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
4fd89b7a
DK
60#define DRV_MODULE_VERSION "1.52.1-8"
61#define DRV_MODULE_RELDATE "2010/04/01"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
cdaa7cb8
VZ
105MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 "(1 INT#x; 2 MSI)");
8badd27a 107
a18f5128
EG
108static int dropless_fc;
109module_param(dropless_fc, int, 0);
110MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
9898f86d 112static int poll;
a2fbb9ea 113module_param(poll, int, 0);
9898f86d 114MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
115
116static int mrrs = -1;
117module_param(mrrs, int, 0);
118MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
9898f86d 120static int debug;
a2fbb9ea 121module_param(debug, int, 0);
9898f86d
EG
122MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 125
1cf167f2 126static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
127
128enum bnx2x_board_type {
129 BCM57710 = 0,
34f80b04
EG
130 BCM57711 = 1,
131 BCM57711E = 2,
a2fbb9ea
ET
132};
133
34f80b04 134/* indexed by board_type, above */
53a10565 135static struct {
a2fbb9ea
ET
136 char *name;
137} board_info[] __devinitdata = {
34f80b04
EG
138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
141};
142
34f80b04 143
a3aa1884 144static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
148 { 0 }
149};
150
151MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153/****************************************************************************
154* General service functions
155****************************************************************************/
156
157/* used only at init
158 * locking is done by mcp
159 */
573f2035 160void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
161{
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
166}
167
a2fbb9ea
ET
168static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169{
170 u32 val;
171
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
176
177 return val;
178}
a2fbb9ea
ET
179
180static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185};
186
187/* copy command into DMAE command memory and set DMAE command go */
188static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189 int idx)
190{
191 u32 cmd_offset;
192 int i;
193
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
ad8d3948
EG
198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
200 }
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
202}
203
ad8d3948
EG
204void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205 u32 len32)
a2fbb9ea 206{
5ff7b6d4 207 struct dmae_command dmae;
a2fbb9ea 208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
209 int cnt = 200;
210
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217 return;
218 }
219
5ff7b6d4 220 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 221
5ff7b6d4
EG
222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 225#ifdef __BIG_ENDIAN
5ff7b6d4 226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 227#else
5ff7b6d4 228 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 229#endif
5ff7b6d4
EG
230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
236 dmae.len = len32;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 240
c3eefaf6 241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 251
5ff7b6d4
EG
252 mutex_lock(&bp->dmae_mutex);
253
a2fbb9ea
ET
254 *wb_comp = 0;
255
5ff7b6d4 256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
257
258 udelay(5);
ad8d3948
EG
259
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
ad8d3948 263 if (!cnt) {
c3eefaf6 264 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
265 break;
266 }
ad8d3948 267 cnt--;
12469401
YG
268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
270 msleep(100);
271 else
272 udelay(5);
a2fbb9ea 273 }
ad8d3948
EG
274
275 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
276}
277
c18487ee 278void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 279{
5ff7b6d4 280 struct dmae_command dmae;
a2fbb9ea 281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
282 int cnt = 200;
283
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
286 int i;
287
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292 return;
293 }
294
5ff7b6d4 295 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 296
5ff7b6d4
EG
297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 300#ifdef __BIG_ENDIAN
5ff7b6d4 301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 302#else
5ff7b6d4 303 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 304#endif
5ff7b6d4
EG
305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.len = len32;
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 315
c3eefaf6 316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 323
5ff7b6d4
EG
324 mutex_lock(&bp->dmae_mutex);
325
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
327 *wb_comp = 0;
328
5ff7b6d4 329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
330
331 udelay(5);
ad8d3948
EG
332
333 while (*wb_comp != DMAE_COMP_VAL) {
334
ad8d3948 335 if (!cnt) {
c3eefaf6 336 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
337 break;
338 }
ad8d3948 339 cnt--;
12469401
YG
340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
342 msleep(100);
343 else
344 udelay(5);
a2fbb9ea 345 }
ad8d3948 346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
349
350 mutex_unlock(&bp->dmae_mutex);
351}
352
573f2035
EG
353void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 u32 addr, u32 len)
355{
02e3c6cb 356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
357 int offset = 0;
358
02e3c6cb 359 while (len > dmae_wr_max) {
573f2035 360 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
363 len -= dmae_wr_max;
573f2035
EG
364 }
365
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367}
368
ad8d3948
EG
369/* used only for slowpath so not inlined */
370static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371{
372 u32 wb_write[2];
373
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 377}
a2fbb9ea 378
ad8d3948
EG
379#ifdef USE_WB_RD
380static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381{
382 u32 wb_data[2];
383
384 REG_RD_DMAE(bp, reg, wb_data, 2);
385
386 return HILO_U64(wb_data[0], wb_data[1]);
387}
388#endif
389
a2fbb9ea
ET
390static int bnx2x_mc_assert(struct bnx2x *bp)
391{
a2fbb9ea 392 char last_idx;
34f80b04
EG
393 int i, rc = 0;
394 u32 row0, row1, row2, row3;
395
396 /* XSTORM */
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
399 if (last_idx)
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
418 rc++;
419 } else {
420 break;
421 }
422 }
423
424 /* TSTORM */
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
427 if (last_idx)
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
446 rc++;
447 } else {
448 break;
449 }
450 }
451
452 /* CSTORM */
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
455 if (last_idx)
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
474 rc++;
475 } else {
476 break;
477 }
478 }
479
480 /* USTORM */
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
483 if (last_idx)
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
502 rc++;
503 } else {
504 break;
a2fbb9ea
ET
505 }
506 }
34f80b04 507
a2fbb9ea
ET
508 return rc;
509}
c14423fe 510
a2fbb9ea
ET
511static void bnx2x_fw_dump(struct bnx2x *bp)
512{
cdaa7cb8 513 u32 addr;
a2fbb9ea 514 u32 mark, offset;
4781bfad 515 __be32 data[9];
a2fbb9ea
ET
516 int word;
517
2145a920
VZ
518 if (BP_NOMCP(bp)) {
519 BNX2X_ERR("NO MCP - can not dump\n");
520 return;
521 }
cdaa7cb8
VZ
522
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 526 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 527
7995c64e 528 pr_err("");
cdaa7cb8 529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 530 for (word = 0; word < 8; word++)
cdaa7cb8 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
cdaa7cb8 535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 536 for (word = 0; word < 8; word++)
cdaa7cb8 537 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 538 data[8] = 0x0;
7995c64e 539 pr_cont("%s", (char *)data);
a2fbb9ea 540 }
7995c64e 541 pr_err("end of fw dump\n");
a2fbb9ea
ET
542}
543
544static void bnx2x_panic_dump(struct bnx2x *bp)
545{
546 int i;
547 u16 j, start, end;
548
66e855f3
YG
549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
a2fbb9ea
ET
552 BNX2X_ERR("begin crash dump -----------------\n");
553
8440d2b6
EG
554 /* Indices */
555 /* Common */
cdaa7cb8
VZ
556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562 /* Rx */
54b9ddaa 563 for_each_queue(bp, i) {
a2fbb9ea 564 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 565
cdaa7cb8
VZ
566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 569 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
577 }
a2fbb9ea 578
8440d2b6 579 /* Tx */
54b9ddaa 580 for_each_queue(bp, i) {
8440d2b6 581 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 582
cdaa7cb8
VZ
583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 590 fp->status_blk->c_status_block.status_block_index,
ca00392c 591 fp->tx_db.data.prod);
8440d2b6 592 }
a2fbb9ea 593
8440d2b6
EG
594 /* Rings */
595 /* Rx */
54b9ddaa 596 for_each_queue(bp, i) {
8440d2b6 597 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
598
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 601 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
c3eefaf6
EG
605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
607 }
608
3196a88a
EG
609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
8440d2b6 611 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
c3eefaf6
EG
615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
617 }
618
a2fbb9ea
ET
619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
c3eefaf6
EG
624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
626 }
627 }
628
8440d2b6 629 /* Tx */
54b9ddaa 630 for_each_queue(bp, i) {
8440d2b6
EG
631 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
c3eefaf6
EG
638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
640 }
641
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
c3eefaf6
EG
647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
649 }
650 }
a2fbb9ea 651
34f80b04 652 bnx2x_fw_dump(bp);
a2fbb9ea
ET
653 bnx2x_mc_assert(bp);
654 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
655}
656
615f8fd9 657static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 658{
34f80b04 659 int port = BP_PORT(bp);
a2fbb9ea
ET
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
664
665 if (msix) {
8badd27a
EG
666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
670 } else if (msi) {
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
675 } else {
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 680
8badd27a
EG
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682 val, port, addr);
615f8fd9
ET
683
684 REG_WR(bp, addr, val);
685
a2fbb9ea
ET
686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687 }
688
8badd27a
EG
689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
691
692 REG_WR(bp, addr, val);
37dbbf32
EG
693 /*
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
695 */
696 mmiowb();
697 barrier();
34f80b04
EG
698
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
701 if (IS_E1HMF(bp)) {
8badd27a 702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 703 if (bp->port.pmf)
4acac6a5
EG
704 /* enable nig and gpio3 attention */
705 val |= 0x1100;
34f80b04
EG
706 } else
707 val = 0xffff;
708
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711 }
37dbbf32
EG
712
713 /* Make sure that interrupts are indeed enabled from here on */
714 mmiowb();
a2fbb9ea
ET
715}
716
615f8fd9 717static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 718{
34f80b04 719 int port = BP_PORT(bp);
a2fbb9ea
ET
720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
722
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729 val, port, addr);
730
8badd27a
EG
731 /* flush all outstanding writes */
732 mmiowb();
733
a2fbb9ea
ET
734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737}
738
f8ef6e44 739static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 740{
a2fbb9ea 741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 742 int i, offset;
a2fbb9ea 743
34f80b04 744 /* disable interrupt handling */
a2fbb9ea 745 atomic_inc(&bp->intr_sem);
e1510706
EG
746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
f8ef6e44
YG
748 if (disable_hw)
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
a2fbb9ea
ET
751
752 /* make sure all ISRs are done */
753 if (msix) {
8badd27a
EG
754 synchronize_irq(bp->msix_table[0].vector);
755 offset = 1;
37b091ba
MC
756#ifdef BCM_CNIC
757 offset++;
758#endif
a2fbb9ea 759 for_each_queue(bp, i)
8badd27a 760 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
761 } else
762 synchronize_irq(bp->pdev->irq);
763
764 /* make sure sp_task is not running */
1cf167f2
EG
765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
767}
768
34f80b04 769/* fast path */
a2fbb9ea
ET
770
771/*
34f80b04 772 * General service functions
a2fbb9ea
ET
773 */
774
72fd0718
VZ
775/* Return true if succeeded to acquire the lock */
776static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777{
778 u32 lock_status;
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
782
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 DP(NETIF_MSG_HW,
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
790 return -EINVAL;
791 }
792
793 if (func <= 5)
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 else
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
803 return true;
804
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806 return false;
807}
808
34f80b04 809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
810 u8 storm, u16 index, u8 op, u8 update)
811{
5c862848
EG
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
814 struct igu_ack_register igu_ack;
815
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
34f80b04 818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
5c862848
EG
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
826
827 /* Make sure that ACK is written */
828 mmiowb();
829 barrier();
a2fbb9ea
ET
830}
831
54b9ddaa 832static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
833{
834 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
835
836 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
839}
840
a2fbb9ea
ET
841static u16 bnx2x_ack_int(struct bnx2x *bp)
842{
5c862848
EG
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 846
5c862848
EG
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848 result, hc_addr);
a2fbb9ea 849
a2fbb9ea
ET
850 return result;
851}
852
853
854/*
855 * fast path service functions
856 */
857
e8b5fc51
VZ
858static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859{
860 /* Tell compiler that consumer and producer can change */
861 barrier();
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
863}
864
a2fbb9ea
ET
865/* free skb in the packet ring at pos idx
866 * return idx of last bd freed
867 */
868static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869 u16 idx)
870{
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 874 struct sk_buff *skb = tx_buf->skb;
34f80b04 875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
876 int nbd;
877
54b9ddaa
VZ
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
879 prefetch(&skb->end);
880
a2fbb9ea
ET
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
882 idx, tx_buf, skb);
883
884 /* unmap first bd */
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c 886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
1a983142 887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
ca00392c 888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 889
ca00392c 890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 891#ifdef BNX2X_STOP_ON_ERROR
ca00392c 892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 893 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
894 bnx2x_panic();
895 }
896#endif
ca00392c 897 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 898
ca00392c
EG
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 901
ca00392c
EG
902 /* Skip a parse bd... */
903 --nbd;
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908 --nbd;
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
910 }
911
912 /* now free frags */
913 while (nbd > 0) {
914
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c 916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
1a983142
FT
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
a2fbb9ea
ET
919 if (--nbd)
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921 }
922
923 /* release skb */
53e5e96e 924 WARN_ON(!skb);
54b9ddaa 925 dev_kfree_skb(skb);
a2fbb9ea
ET
926 tx_buf->first_bd = 0;
927 tx_buf->skb = NULL;
928
34f80b04 929 return new_cons;
a2fbb9ea
ET
930}
931
34f80b04 932static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 933{
34f80b04
EG
934 s16 used;
935 u16 prod;
936 u16 cons;
a2fbb9ea 937
a2fbb9ea
ET
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
940
34f80b04
EG
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 944
34f80b04 945#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
946 WARN_ON(used < 0);
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 949#endif
a2fbb9ea 950
34f80b04 951 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
952}
953
54b9ddaa
VZ
954static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955{
956 u16 hw_cons;
957
958 /* Tell compiler that status block fields can change */
959 barrier();
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
962}
963
964static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
965{
966 struct bnx2x *bp = fp->bp;
555f6c78 967 struct netdev_queue *txq;
a2fbb9ea 968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
969
970#ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
54b9ddaa 972 return -1;
a2fbb9ea
ET
973#endif
974
54b9ddaa 975 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
978
979 while (sw_cons != hw_cons) {
980 u16 pkt_cons;
981
982 pkt_cons = TX_BD(sw_cons);
983
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
34f80b04 986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
987 hw_cons, sw_cons, pkt_cons);
988
34f80b04 989/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
990 rmb();
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992 }
993*/
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995 sw_cons++;
a2fbb9ea
ET
996 }
997
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1000
c16cc0b4
VZ
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1005 * forever.
1006 */
2d99cf16 1007 smp_mb();
c16cc0b4 1008
a2fbb9ea 1009 /* TBD need a thresh? */
555f6c78 1010 if (unlikely(netif_tx_queue_stopped(txq))) {
c16cc0b4
VZ
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015 *
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1018 * stops the queue
6044735d 1019 */
c16cc0b4
VZ
1020
1021 __netif_tx_lock(txq, smp_processor_id());
6044735d 1022
555f6c78 1023 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 1024 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 1026 netif_tx_wake_queue(txq);
c16cc0b4
VZ
1027
1028 __netif_tx_unlock(txq);
a2fbb9ea 1029 }
54b9ddaa 1030 return 0;
a2fbb9ea
ET
1031}
1032
993ac7b5
MC
1033#ifdef BCM_CNIC
1034static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035#endif
3196a88a 1036
a2fbb9ea
ET
1037static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1039{
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
34f80b04 1044 DP(BNX2X_MSG_SP,
a2fbb9ea 1045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1046 fp->index, cid, command, bp->state,
34f80b04 1047 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
1048
1049 bp->spq_left++;
1050
0626b899 1051 if (fp->index) {
a2fbb9ea
ET
1052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056 cid);
1057 fp->state = BNX2X_FP_STATE_OPEN;
1058 break;
1059
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062 cid);
1063 fp->state = BNX2X_FP_STATE_HALTED;
1064 break;
1065
1066 default:
34f80b04 1067 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
34f80b04 1070 break;
a2fbb9ea 1071 }
34f80b04 1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1073 return;
1074 }
c14423fe 1075
a2fbb9ea
ET
1076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1080 break;
1081
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1086 break;
1087
a2fbb9ea 1088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1091 break;
1092
993ac7b5
MC
1093#ifdef BCM_CNIC
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1097 break;
1098#endif
3196a88a 1099
a2fbb9ea 1100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1103 bp->set_mac_pending--;
1104 smp_wmb();
a2fbb9ea
ET
1105 break;
1106
49d66772 1107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1109 bp->set_mac_pending--;
1110 smp_wmb();
49d66772
ET
1111 break;
1112
a2fbb9ea 1113 default:
34f80b04 1114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1115 command, bp->state);
34f80b04 1116 break;
a2fbb9ea 1117 }
34f80b04 1118 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1119}
1120
7a9b2557
VZ
1121static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1123{
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128 /* Skip "next page" elements */
1129 if (!page)
1130 return;
1131
1a983142 1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
4f40f2cb 1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136 sw_buf->page = NULL;
1137 sge->addr_hi = 0;
1138 sge->addr_lo = 0;
1139}
1140
1141static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1143{
1144 int i;
1145
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1148}
1149
1150static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1152{
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156 dma_addr_t mapping;
1157
1158 if (unlikely(page == NULL))
1159 return -ENOMEM;
1160
1a983142
FT
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
8d8bb39b 1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165 return -ENOMEM;
1166 }
1167
1168 sw_buf->page = page;
1a983142 1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
7a9b2557
VZ
1170
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174 return 0;
1175}
1176
a2fbb9ea
ET
1177static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1179{
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183 dma_addr_t mapping;
1184
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1187 return -ENOMEM;
1188
1a983142
FT
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190 DMA_FROM_DEVICE);
8d8bb39b 1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1192 dev_kfree_skb(skb);
1193 return -ENOMEM;
1194 }
1195
1196 rx_buf->skb = skb;
1a983142 1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
a2fbb9ea
ET
1198
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202 return 0;
1203}
1204
1205/* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1209 */
1210static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1212{
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1a983142
FT
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
a2fbb9ea
ET
1222
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1a983142
FT
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
a2fbb9ea
ET
1226 *prod_bd = *cons_bd;
1227}
1228
7a9b2557
VZ
1229static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230 u16 idx)
1231{
1232 u16 last_max = fp->last_max_sge;
1233
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1236}
1237
1238static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239{
1240 int i, j;
1241
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1244
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1247 idx--;
1248 }
1249 }
1250}
1251
1252static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1254{
1255 struct bnx2x *bp = fp->bp;
4f40f2cb 1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1258 SGE_PAGE_SHIFT;
7a9b2557
VZ
1259 u16 last_max, last_elem, first_elem;
1260 u16 delta = 0;
1261 u16 i;
1262
1263 if (!sge_len)
1264 return;
1265
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1283 last_elem++;
1284
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1288 break;
1289
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1292 }
1293
1294 if (delta > 0) {
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1298 }
1299
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1303}
1304
1305static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306{
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
33471629
EG
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1316}
1317
1318static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1320{
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325 dma_addr_t mapping;
1326
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1a983142
FT
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
7a9b2557
VZ
1332
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346#ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
cdaa7cb8 1348#ifdef _ASM_GENERIC_INT_L64_H
7a9b2557
VZ
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350#else
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352#endif
1353 fp->tpa_queue_used);
1354#endif
1355}
1356
1357static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1360 u16 cqe_idx)
1361{
1362 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1365 int err;
1366 int j;
1367
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1370
1371 /* This is needed in order to enable forwarding support */
1372 if (frag_size)
4f40f2cb 1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1374 max(frag_size, (u32)len_on_bd));
1375
1376#ifdef BNX2X_STOP_ON_ERROR
cdaa7cb8 1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
7a9b2557
VZ
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379 pages, cqe_idx);
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1382 bnx2x_panic();
1383 return -EINVAL;
1384 }
1385#endif
1386
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1394 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1395 old_rx_pg = *rx_pg;
1396
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
de832a55 1401 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1402 return err;
1403 }
1404
1405 /* Unmap the page as we r going to pass it to the stack */
1a983142
FT
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
7a9b2557
VZ
1409
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1416
1417 frag_size -= frag_len;
1418 }
1419
1420 return 0;
1421}
1422
1423static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425 u16 cqe_idx)
1426{
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1429 /* alloc new skb */
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434 fails. */
1a983142
FT
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557 1437
7a9b2557 1438 if (likely(new_skb)) {
66e855f3
YG
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
0c6671b0
EG
1441#ifdef BCM_VLAN
1442 int is_vlan_cqe =
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447#endif
7a9b2557
VZ
1448
1449 prefetch(skb);
1450 prefetch(((char *)(skb)) + 128);
1451
7a9b2557
VZ
1452#ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1457 bnx2x_panic();
1458 return;
1459 }
1460#endif
1461
1462 skb_reserve(skb, pad);
1463 skb_put(skb, len);
1464
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468 {
1469 struct iphdr *iph;
1470
1471 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1472#ifdef BCM_VLAN
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477#endif
7a9b2557
VZ
1478 iph->check = 0;
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480 }
1481
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1484#ifdef BCM_VLAN
0c6671b0
EG
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
4fd89b7a
DK
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1489 vlan_tag), skb);
7a9b2557
VZ
1490 else
1491#endif
4fd89b7a 1492 napi_gro_receive(&fp->napi, skb);
7a9b2557
VZ
1493 } else {
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1496 dev_kfree_skb(skb);
1497 }
1498
7a9b2557
VZ
1499
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1502
1503 } else {
66e855f3 1504 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
de832a55 1507 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1508 }
1509
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511}
1512
1513static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1516 u16 rx_sge_prod)
1517{
8d9c5f34 1518 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1519 int i;
1520
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1525
58f4c4cf
EG
1526 /*
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1529 * is updated.
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1533 */
1534 wmb();
1535
8d9c5f34
EG
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1539 ((u32 *)&rx_prods)[i]);
1540
58f4c4cf
EG
1541 mmiowb(); /* keep prod updates ordered */
1542
7a9b2557 1543 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1546}
1547
a2fbb9ea
ET
1548static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1549{
1550 struct bnx2x *bp = fp->bp;
34f80b04 1551 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1552 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1553 int rx_pkt = 0;
1554
1555#ifdef BNX2X_STOP_ON_ERROR
1556 if (unlikely(bp->panic))
1557 return 0;
1558#endif
1559
34f80b04
EG
1560 /* CQ "next element" is of the size of the regular element,
1561 that's why it's ok here */
a2fbb9ea
ET
1562 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1564 hw_comp_cons++;
1565
1566 bd_cons = fp->rx_bd_cons;
1567 bd_prod = fp->rx_bd_prod;
34f80b04 1568 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1569 sw_comp_cons = fp->rx_comp_cons;
1570 sw_comp_prod = fp->rx_comp_prod;
1571
1572 /* Memory barrier necessary as speculative reads of the rx
1573 * buffer can be ahead of the index in the status block
1574 */
1575 rmb();
1576
1577 DP(NETIF_MSG_RX_STATUS,
1578 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1579 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1580
1581 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1582 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1583 struct sk_buff *skb;
1584 union eth_rx_cqe *cqe;
34f80b04
EG
1585 u8 cqe_fp_flags;
1586 u16 len, pad;
a2fbb9ea
ET
1587
1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589 bd_prod = RX_BD(bd_prod);
1590 bd_cons = RX_BD(bd_cons);
1591
619e7a66
EG
1592 /* Prefetch the page containing the BD descriptor
1593 at producer's index. It will be needed when new skb is
1594 allocated */
1595 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596 (&fp->rx_desc_ring[bd_prod])) -
1597 PAGE_SIZE + 1));
1598
a2fbb9ea 1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1601
a2fbb9ea 1602 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1603 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1604 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1605 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1607 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1608
1609 /* is this a slowpath msg? */
34f80b04 1610 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1611 bnx2x_sp_event(fp, cqe);
1612 goto next_cqe;
1613
1614 /* this is an rx packet */
1615 } else {
1616 rx_buf = &fp->rx_buf_ring[bd_cons];
1617 skb = rx_buf->skb;
54b9ddaa
VZ
1618 prefetch(skb);
1619 prefetch((u8 *)skb + 256);
a2fbb9ea
ET
1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621 pad = cqe->fast_path_cqe.placement_offset;
1622
7a9b2557
VZ
1623 /* If CQE is marked both TPA_START and TPA_END
1624 it is a non-TPA CQE */
1625 if ((!fp->disable_tpa) &&
1626 (TPA_TYPE(cqe_fp_flags) !=
1627 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1628 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1629
1630 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631 DP(NETIF_MSG_RX_STATUS,
1632 "calling tpa_start on queue %d\n",
1633 queue);
1634
1635 bnx2x_tpa_start(fp, queue, skb,
1636 bd_cons, bd_prod);
1637 goto next_rx;
1638 }
1639
1640 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641 DP(NETIF_MSG_RX_STATUS,
1642 "calling tpa_stop on queue %d\n",
1643 queue);
1644
1645 if (!BNX2X_RX_SUM_FIX(cqe))
1646 BNX2X_ERR("STOP on none TCP "
1647 "data\n");
1648
1649 /* This is a size of the linear data
1650 on this skb */
1651 len = le16_to_cpu(cqe->fast_path_cqe.
1652 len_on_bd);
1653 bnx2x_tpa_stop(bp, fp, queue, pad,
1654 len, cqe, comp_ring_cons);
1655#ifdef BNX2X_STOP_ON_ERROR
1656 if (bp->panic)
17cb4006 1657 return 0;
7a9b2557
VZ
1658#endif
1659
1660 bnx2x_update_sge_prod(fp,
1661 &cqe->fast_path_cqe);
1662 goto next_cqe;
1663 }
1664 }
1665
1a983142
FT
1666 dma_sync_single_for_device(&bp->pdev->dev,
1667 dma_unmap_addr(rx_buf, mapping),
1668 pad + RX_COPY_THRESH,
1669 DMA_FROM_DEVICE);
a2fbb9ea
ET
1670 prefetch(skb);
1671 prefetch(((char *)(skb)) + 128);
1672
1673 /* is this an error packet? */
34f80b04 1674 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1675 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1676 "ERROR flags %x rx packet %u\n",
1677 cqe_fp_flags, sw_comp_cons);
de832a55 1678 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1679 goto reuse_rx;
1680 }
1681
1682 /* Since we don't have a jumbo ring
1683 * copy small packets if mtu > 1500
1684 */
1685 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1686 (len <= RX_COPY_THRESH)) {
1687 struct sk_buff *new_skb;
1688
1689 new_skb = netdev_alloc_skb(bp->dev,
1690 len + pad);
1691 if (new_skb == NULL) {
1692 DP(NETIF_MSG_RX_ERR,
34f80b04 1693 "ERROR packet dropped "
a2fbb9ea 1694 "because of alloc failure\n");
de832a55 1695 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1696 goto reuse_rx;
1697 }
1698
1699 /* aligned copy */
1700 skb_copy_from_linear_data_offset(skb, pad,
1701 new_skb->data + pad, len);
1702 skb_reserve(new_skb, pad);
1703 skb_put(new_skb, len);
1704
1705 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1706
1707 skb = new_skb;
1708
a119a069
EG
1709 } else
1710 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1a983142
FT
1711 dma_unmap_single(&bp->pdev->dev,
1712 dma_unmap_addr(rx_buf, mapping),
437cf2f1 1713 bp->rx_buf_size,
1a983142 1714 DMA_FROM_DEVICE);
a2fbb9ea
ET
1715 skb_reserve(skb, pad);
1716 skb_put(skb, len);
1717
1718 } else {
1719 DP(NETIF_MSG_RX_ERR,
34f80b04 1720 "ERROR packet dropped because "
a2fbb9ea 1721 "of alloc failure\n");
de832a55 1722 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1723reuse_rx:
1724 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1725 goto next_rx;
1726 }
1727
1728 skb->protocol = eth_type_trans(skb, bp->dev);
1729
1730 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1731 if (bp->rx_csum) {
1adcd8be
EG
1732 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1733 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1734 else
de832a55 1735 fp->eth_q_stats.hw_csum_err++;
66e855f3 1736 }
a2fbb9ea
ET
1737 }
1738
748e5439 1739 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1740
a2fbb9ea 1741#ifdef BCM_VLAN
0c6671b0 1742 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1743 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1744 PARSING_FLAGS_VLAN))
4fd89b7a
DK
1745 vlan_gro_receive(&fp->napi, bp->vlgrp,
1746 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
a2fbb9ea
ET
1747 else
1748#endif
4fd89b7a 1749 napi_gro_receive(&fp->napi, skb);
a2fbb9ea 1750
a2fbb9ea
ET
1751
1752next_rx:
1753 rx_buf->skb = NULL;
1754
1755 bd_cons = NEXT_RX_IDX(bd_cons);
1756 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1757 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1758 rx_pkt++;
a2fbb9ea
ET
1759next_cqe:
1760 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1761 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1762
34f80b04 1763 if (rx_pkt == budget)
a2fbb9ea
ET
1764 break;
1765 } /* while */
1766
1767 fp->rx_bd_cons = bd_cons;
34f80b04 1768 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1769 fp->rx_comp_cons = sw_comp_cons;
1770 fp->rx_comp_prod = sw_comp_prod;
1771
7a9b2557
VZ
1772 /* Update producers */
1773 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1774 fp->rx_sge_prod);
a2fbb9ea
ET
1775
1776 fp->rx_pkt += rx_pkt;
1777 fp->rx_calls++;
1778
1779 return rx_pkt;
1780}
1781
1782static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1783{
1784 struct bnx2x_fastpath *fp = fp_cookie;
1785 struct bnx2x *bp = fp->bp;
a2fbb9ea 1786
da5a662a
VZ
1787 /* Return here if interrupt is disabled */
1788 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1789 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1790 return IRQ_HANDLED;
1791 }
1792
34f80b04 1793 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1794 fp->index, fp->sb_id);
0626b899 1795 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1796
1797#ifdef BNX2X_STOP_ON_ERROR
1798 if (unlikely(bp->panic))
1799 return IRQ_HANDLED;
1800#endif
ca00392c 1801
54b9ddaa
VZ
1802 /* Handle Rx and Tx according to MSI-X vector */
1803 prefetch(fp->rx_cons_sb);
1804 prefetch(fp->tx_cons_sb);
1805 prefetch(&fp->status_blk->u_status_block.status_block_index);
1806 prefetch(&fp->status_blk->c_status_block.status_block_index);
1807 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1808
a2fbb9ea
ET
1809 return IRQ_HANDLED;
1810}
1811
1812static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1813{
555f6c78 1814 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1815 u16 status = bnx2x_ack_int(bp);
34f80b04 1816 u16 mask;
ca00392c 1817 int i;
a2fbb9ea 1818
34f80b04 1819 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1820 if (unlikely(status == 0)) {
1821 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1822 return IRQ_NONE;
1823 }
f5372251 1824 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1825
34f80b04 1826 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1827 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1828 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1829 return IRQ_HANDLED;
1830 }
1831
3196a88a
EG
1832#ifdef BNX2X_STOP_ON_ERROR
1833 if (unlikely(bp->panic))
1834 return IRQ_HANDLED;
1835#endif
1836
ca00392c
EG
1837 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1838 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1839
ca00392c
EG
1840 mask = 0x2 << fp->sb_id;
1841 if (status & mask) {
54b9ddaa
VZ
1842 /* Handle Rx and Tx according to SB id */
1843 prefetch(fp->rx_cons_sb);
1844 prefetch(&fp->status_blk->u_status_block.
1845 status_block_index);
1846 prefetch(fp->tx_cons_sb);
1847 prefetch(&fp->status_blk->c_status_block.
1848 status_block_index);
1849 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1850 status &= ~mask;
1851 }
a2fbb9ea
ET
1852 }
1853
993ac7b5
MC
1854#ifdef BCM_CNIC
1855 mask = 0x2 << CNIC_SB_ID(bp);
1856 if (status & (mask | 0x1)) {
1857 struct cnic_ops *c_ops = NULL;
1858
1859 rcu_read_lock();
1860 c_ops = rcu_dereference(bp->cnic_ops);
1861 if (c_ops)
1862 c_ops->cnic_handler(bp->cnic_data, NULL);
1863 rcu_read_unlock();
1864
1865 status &= ~mask;
1866 }
1867#endif
a2fbb9ea 1868
34f80b04 1869 if (unlikely(status & 0x1)) {
1cf167f2 1870 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1871
1872 status &= ~0x1;
1873 if (!status)
1874 return IRQ_HANDLED;
1875 }
1876
cdaa7cb8
VZ
1877 if (unlikely(status))
1878 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1879 status);
a2fbb9ea 1880
c18487ee 1881 return IRQ_HANDLED;
a2fbb9ea
ET
1882}
1883
c18487ee 1884/* end of fast path */
a2fbb9ea 1885
bb2a0f7a 1886static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1887
c18487ee
YR
1888/* Link */
1889
1890/*
1891 * General service functions
1892 */
a2fbb9ea 1893
4a37fb66 1894static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1895{
1896 u32 lock_status;
1897 u32 resource_bit = (1 << resource);
4a37fb66
YG
1898 int func = BP_FUNC(bp);
1899 u32 hw_lock_control_reg;
c18487ee 1900 int cnt;
a2fbb9ea 1901
c18487ee
YR
1902 /* Validating that the resource is within range */
1903 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1904 DP(NETIF_MSG_HW,
1905 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1906 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1907 return -EINVAL;
1908 }
a2fbb9ea 1909
4a37fb66
YG
1910 if (func <= 5) {
1911 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1912 } else {
1913 hw_lock_control_reg =
1914 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1915 }
1916
c18487ee 1917 /* Validating that the resource is not already taken */
4a37fb66 1918 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1919 if (lock_status & resource_bit) {
1920 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1921 lock_status, resource_bit);
1922 return -EEXIST;
1923 }
a2fbb9ea 1924
46230476
EG
1925 /* Try for 5 second every 5ms */
1926 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1927 /* Try to acquire the lock */
4a37fb66
YG
1928 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1929 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1930 if (lock_status & resource_bit)
1931 return 0;
a2fbb9ea 1932
c18487ee 1933 msleep(5);
a2fbb9ea 1934 }
c18487ee
YR
1935 DP(NETIF_MSG_HW, "Timeout\n");
1936 return -EAGAIN;
1937}
a2fbb9ea 1938
4a37fb66 1939static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1940{
1941 u32 lock_status;
1942 u32 resource_bit = (1 << resource);
4a37fb66
YG
1943 int func = BP_FUNC(bp);
1944 u32 hw_lock_control_reg;
a2fbb9ea 1945
72fd0718
VZ
1946 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1947
c18487ee
YR
1948 /* Validating that the resource is within range */
1949 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1950 DP(NETIF_MSG_HW,
1951 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1952 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1953 return -EINVAL;
1954 }
1955
4a37fb66
YG
1956 if (func <= 5) {
1957 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1958 } else {
1959 hw_lock_control_reg =
1960 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1961 }
1962
c18487ee 1963 /* Validating that the resource is currently taken */
4a37fb66 1964 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1965 if (!(lock_status & resource_bit)) {
1966 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1967 lock_status, resource_bit);
1968 return -EFAULT;
a2fbb9ea
ET
1969 }
1970
4a37fb66 1971 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1972 return 0;
1973}
1974
1975/* HW Lock for shared dual port PHYs */
4a37fb66 1976static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1977{
34f80b04 1978 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1979
46c6a674
EG
1980 if (bp->port.need_hw_lock)
1981 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1982}
a2fbb9ea 1983
4a37fb66 1984static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1985{
46c6a674
EG
1986 if (bp->port.need_hw_lock)
1987 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1988
34f80b04 1989 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1990}
a2fbb9ea 1991
4acac6a5
EG
1992int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1993{
1994 /* The GPIO should be swapped if swap register is set and active */
1995 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1996 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1997 int gpio_shift = gpio_num +
1998 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1999 u32 gpio_mask = (1 << gpio_shift);
2000 u32 gpio_reg;
2001 int value;
2002
2003 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2004 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2005 return -EINVAL;
2006 }
2007
2008 /* read GPIO value */
2009 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2010
2011 /* get the requested pin value */
2012 if ((gpio_reg & gpio_mask) == gpio_mask)
2013 value = 1;
2014 else
2015 value = 0;
2016
2017 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2018
2019 return value;
2020}
2021
17de50b7 2022int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
2023{
2024 /* The GPIO should be swapped if swap register is set and active */
2025 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 2026 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2027 int gpio_shift = gpio_num +
2028 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2029 u32 gpio_mask = (1 << gpio_shift);
2030 u32 gpio_reg;
a2fbb9ea 2031
c18487ee
YR
2032 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2033 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2034 return -EINVAL;
2035 }
a2fbb9ea 2036
4a37fb66 2037 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2038 /* read GPIO and mask except the float bits */
2039 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2040
c18487ee
YR
2041 switch (mode) {
2042 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2043 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2044 gpio_num, gpio_shift);
2045 /* clear FLOAT and set CLR */
2046 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2048 break;
a2fbb9ea 2049
c18487ee
YR
2050 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2051 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2052 gpio_num, gpio_shift);
2053 /* clear FLOAT and set SET */
2054 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2055 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2056 break;
a2fbb9ea 2057
17de50b7 2058 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2059 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2060 gpio_num, gpio_shift);
2061 /* set FLOAT */
2062 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2063 break;
a2fbb9ea 2064
c18487ee
YR
2065 default:
2066 break;
a2fbb9ea
ET
2067 }
2068
c18487ee 2069 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2070 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2071
c18487ee 2072 return 0;
a2fbb9ea
ET
2073}
2074
4acac6a5
EG
2075int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2076{
2077 /* The GPIO should be swapped if swap register is set and active */
2078 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2079 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2080 int gpio_shift = gpio_num +
2081 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2082 u32 gpio_mask = (1 << gpio_shift);
2083 u32 gpio_reg;
2084
2085 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2086 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2087 return -EINVAL;
2088 }
2089
2090 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2091 /* read GPIO int */
2092 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2093
2094 switch (mode) {
2095 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2096 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2097 "output low\n", gpio_num, gpio_shift);
2098 /* clear SET and set CLR */
2099 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2100 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2101 break;
2102
2103 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2104 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2105 "output high\n", gpio_num, gpio_shift);
2106 /* clear CLR and set SET */
2107 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2108 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2109 break;
2110
2111 default:
2112 break;
2113 }
2114
2115 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2116 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2117
2118 return 0;
2119}
2120
c18487ee 2121static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2122{
c18487ee
YR
2123 u32 spio_mask = (1 << spio_num);
2124 u32 spio_reg;
a2fbb9ea 2125
c18487ee
YR
2126 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2127 (spio_num > MISC_REGISTERS_SPIO_7)) {
2128 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2129 return -EINVAL;
a2fbb9ea
ET
2130 }
2131
4a37fb66 2132 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2133 /* read SPIO and mask except the float bits */
2134 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2135
c18487ee 2136 switch (mode) {
6378c025 2137 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2138 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2139 /* clear FLOAT and set CLR */
2140 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2141 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2142 break;
a2fbb9ea 2143
6378c025 2144 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2145 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2146 /* clear FLOAT and set SET */
2147 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2148 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2149 break;
a2fbb9ea 2150
c18487ee
YR
2151 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2152 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2153 /* set FLOAT */
2154 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2155 break;
a2fbb9ea 2156
c18487ee
YR
2157 default:
2158 break;
a2fbb9ea
ET
2159 }
2160
c18487ee 2161 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2162 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2163
a2fbb9ea
ET
2164 return 0;
2165}
2166
c18487ee 2167static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2168{
ad33ea3a
EG
2169 switch (bp->link_vars.ieee_fc &
2170 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2171 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2172 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2173 ADVERTISED_Pause);
2174 break;
356e2385 2175
c18487ee 2176 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2177 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2178 ADVERTISED_Pause);
2179 break;
356e2385 2180
c18487ee 2181 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2182 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2183 break;
356e2385 2184
c18487ee 2185 default:
34f80b04 2186 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2187 ADVERTISED_Pause);
2188 break;
2189 }
2190}
f1410647 2191
c18487ee
YR
2192static void bnx2x_link_report(struct bnx2x *bp)
2193{
f34d28ea 2194 if (bp->flags & MF_FUNC_DIS) {
2691d51d 2195 netif_carrier_off(bp->dev);
7995c64e 2196 netdev_err(bp->dev, "NIC Link is Down\n");
2691d51d
EG
2197 return;
2198 }
2199
c18487ee 2200 if (bp->link_vars.link_up) {
35c5f8fe
EG
2201 u16 line_speed;
2202
c18487ee
YR
2203 if (bp->state == BNX2X_STATE_OPEN)
2204 netif_carrier_on(bp->dev);
7995c64e 2205 netdev_info(bp->dev, "NIC Link is Up, ");
f1410647 2206
35c5f8fe
EG
2207 line_speed = bp->link_vars.line_speed;
2208 if (IS_E1HMF(bp)) {
2209 u16 vn_max_rate;
2210
2211 vn_max_rate =
2212 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2213 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2214 if (vn_max_rate < line_speed)
2215 line_speed = vn_max_rate;
2216 }
7995c64e 2217 pr_cont("%d Mbps ", line_speed);
f1410647 2218
c18487ee 2219 if (bp->link_vars.duplex == DUPLEX_FULL)
7995c64e 2220 pr_cont("full duplex");
c18487ee 2221 else
7995c64e 2222 pr_cont("half duplex");
f1410647 2223
c0700f90
DM
2224 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2225 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
7995c64e 2226 pr_cont(", receive ");
356e2385
EG
2227 if (bp->link_vars.flow_ctrl &
2228 BNX2X_FLOW_CTRL_TX)
7995c64e 2229 pr_cont("& transmit ");
c18487ee 2230 } else {
7995c64e 2231 pr_cont(", transmit ");
c18487ee 2232 }
7995c64e 2233 pr_cont("flow control ON");
c18487ee 2234 }
7995c64e 2235 pr_cont("\n");
f1410647 2236
c18487ee
YR
2237 } else { /* link_down */
2238 netif_carrier_off(bp->dev);
7995c64e 2239 netdev_err(bp->dev, "NIC Link is Down\n");
f1410647 2240 }
c18487ee
YR
2241}
2242
b5bf9068 2243static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2244{
19680c48
EG
2245 if (!BP_NOMCP(bp)) {
2246 u8 rc;
a2fbb9ea 2247
19680c48 2248 /* Initialize link parameters structure variables */
8c99e7b0
YR
2249 /* It is recommended to turn off RX FC for jumbo frames
2250 for better performance */
0c593270 2251 if (bp->dev->mtu > 5000)
c0700f90 2252 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2253 else
c0700f90 2254 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2255
4a37fb66 2256 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2257
2258 if (load_mode == LOAD_DIAG)
2259 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2260
19680c48 2261 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2262
4a37fb66 2263 bnx2x_release_phy_lock(bp);
a2fbb9ea 2264
3c96c68b
EG
2265 bnx2x_calc_fc_adv(bp);
2266
b5bf9068
EG
2267 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2268 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2269 bnx2x_link_report(bp);
b5bf9068 2270 }
34f80b04 2271
19680c48
EG
2272 return rc;
2273 }
f5372251 2274 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2275 return -EINVAL;
a2fbb9ea
ET
2276}
2277
c18487ee 2278static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2279{
19680c48 2280 if (!BP_NOMCP(bp)) {
4a37fb66 2281 bnx2x_acquire_phy_lock(bp);
19680c48 2282 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2283 bnx2x_release_phy_lock(bp);
a2fbb9ea 2284
19680c48
EG
2285 bnx2x_calc_fc_adv(bp);
2286 } else
f5372251 2287 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2288}
a2fbb9ea 2289
c18487ee
YR
2290static void bnx2x__link_reset(struct bnx2x *bp)
2291{
19680c48 2292 if (!BP_NOMCP(bp)) {
4a37fb66 2293 bnx2x_acquire_phy_lock(bp);
589abe3a 2294 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2295 bnx2x_release_phy_lock(bp);
19680c48 2296 } else
f5372251 2297 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2298}
a2fbb9ea 2299
c18487ee
YR
2300static u8 bnx2x_link_test(struct bnx2x *bp)
2301{
2145a920 2302 u8 rc = 0;
a2fbb9ea 2303
2145a920
VZ
2304 if (!BP_NOMCP(bp)) {
2305 bnx2x_acquire_phy_lock(bp);
2306 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2307 bnx2x_release_phy_lock(bp);
2308 } else
2309 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 2310
c18487ee
YR
2311 return rc;
2312}
a2fbb9ea 2313
8a1c38d1 2314static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2315{
8a1c38d1
EG
2316 u32 r_param = bp->link_vars.line_speed / 8;
2317 u32 fair_periodic_timeout_usec;
2318 u32 t_fair;
34f80b04 2319
8a1c38d1
EG
2320 memset(&(bp->cmng.rs_vars), 0,
2321 sizeof(struct rate_shaping_vars_per_port));
2322 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2323
8a1c38d1
EG
2324 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2325 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2326
8a1c38d1
EG
2327 /* this is the threshold below which no timer arming will occur
2328 1.25 coefficient is for the threshold to be a little bigger
2329 than the real time, to compensate for timer in-accuracy */
2330 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2331 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2332
8a1c38d1
EG
2333 /* resolution of fairness timer */
2334 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2335 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2336 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2337
8a1c38d1
EG
2338 /* this is the threshold below which we won't arm the timer anymore */
2339 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2340
8a1c38d1
EG
2341 /* we multiply by 1e3/8 to get bytes/msec.
2342 We don't want the credits to pass a credit
2343 of the t_fair*FAIR_MEM (algorithm resolution) */
2344 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2345 /* since each tick is 4 usec */
2346 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2347}
2348
2691d51d
EG
2349/* Calculates the sum of vn_min_rates.
2350 It's needed for further normalizing of the min_rates.
2351 Returns:
2352 sum of vn_min_rates.
2353 or
2354 0 - if all the min_rates are 0.
2355 In the later case fainess algorithm should be deactivated.
2356 If not all min_rates are zero then those that are zeroes will be set to 1.
2357 */
2358static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2359{
2360 int all_zero = 1;
2361 int port = BP_PORT(bp);
2362 int vn;
2363
2364 bp->vn_weight_sum = 0;
2365 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2366 int func = 2*vn + port;
2367 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2368 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2369 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2370
2371 /* Skip hidden vns */
2372 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2373 continue;
2374
2375 /* If min rate is zero - set it to 1 */
2376 if (!vn_min_rate)
2377 vn_min_rate = DEF_MIN_RATE;
2378 else
2379 all_zero = 0;
2380
2381 bp->vn_weight_sum += vn_min_rate;
2382 }
2383
2384 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2385 if (all_zero) {
2386 bp->cmng.flags.cmng_enables &=
2387 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2388 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2389 " fairness will be disabled\n");
2390 } else
2391 bp->cmng.flags.cmng_enables |=
2392 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2393}
2394
8a1c38d1 2395static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2396{
2397 struct rate_shaping_vars_per_vn m_rs_vn;
2398 struct fairness_vars_per_vn m_fair_vn;
2399 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2400 u16 vn_min_rate, vn_max_rate;
2401 int i;
2402
2403 /* If function is hidden - set min and max to zeroes */
2404 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2405 vn_min_rate = 0;
2406 vn_max_rate = 0;
2407
2408 } else {
2409 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2410 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2411 /* If min rate is zero - set it to 1 */
2412 if (!vn_min_rate)
34f80b04
EG
2413 vn_min_rate = DEF_MIN_RATE;
2414 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2415 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2416 }
8a1c38d1 2417 DP(NETIF_MSG_IFUP,
b015e3d1 2418 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2419 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2420
2421 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2422 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2423
2424 /* global vn counter - maximal Mbps for this vn */
2425 m_rs_vn.vn_counter.rate = vn_max_rate;
2426
2427 /* quota - number of bytes transmitted in this period */
2428 m_rs_vn.vn_counter.quota =
2429 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2430
8a1c38d1 2431 if (bp->vn_weight_sum) {
34f80b04
EG
2432 /* credit for each period of the fairness algorithm:
2433 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2434 vn_weight_sum should not be larger than 10000, thus
2435 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2436 than zero */
34f80b04 2437 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
2438 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2439 (8 * bp->vn_weight_sum))),
2440 (bp->cmng.fair_vars.fair_threshold * 2));
2441 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
2442 m_fair_vn.vn_credit_delta);
2443 }
2444
34f80b04
EG
2445 /* Store it to internal memory */
2446 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2447 REG_WR(bp, BAR_XSTRORM_INTMEM +
2448 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2449 ((u32 *)(&m_rs_vn))[i]);
2450
2451 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2452 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2454 ((u32 *)(&m_fair_vn))[i]);
2455}
2456
8a1c38d1 2457
c18487ee
YR
2458/* This function is called upon link interrupt */
2459static void bnx2x_link_attn(struct bnx2x *bp)
2460{
bb2a0f7a
YG
2461 /* Make sure that we are synced with the current statistics */
2462 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2463
c18487ee 2464 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2465
bb2a0f7a
YG
2466 if (bp->link_vars.link_up) {
2467
1c06328c 2468 /* dropless flow control */
a18f5128 2469 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2470 int port = BP_PORT(bp);
2471 u32 pause_enabled = 0;
2472
2473 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2474 pause_enabled = 1;
2475
2476 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2477 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2478 pause_enabled);
2479 }
2480
bb2a0f7a
YG
2481 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2482 struct host_port_stats *pstats;
2483
2484 pstats = bnx2x_sp(bp, port_stats);
2485 /* reset old bmac stats */
2486 memset(&(pstats->mac_stx[0]), 0,
2487 sizeof(struct mac_stx));
2488 }
f34d28ea 2489 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2490 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2491 }
2492
c18487ee
YR
2493 /* indicate link status */
2494 bnx2x_link_report(bp);
34f80b04
EG
2495
2496 if (IS_E1HMF(bp)) {
8a1c38d1 2497 int port = BP_PORT(bp);
34f80b04 2498 int func;
8a1c38d1 2499 int vn;
34f80b04 2500
ab6ad5a4 2501 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2502 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2503 if (vn == BP_E1HVN(bp))
2504 continue;
2505
8a1c38d1 2506 func = ((vn << 1) | port);
34f80b04
EG
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2508 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2509 }
34f80b04 2510
8a1c38d1
EG
2511 if (bp->link_vars.link_up) {
2512 int i;
2513
2514 /* Init rate shaping and fairness contexts */
2515 bnx2x_init_port_minmax(bp);
34f80b04 2516
34f80b04 2517 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2518 bnx2x_init_vn_minmax(bp, 2*vn + port);
2519
2520 /* Store it to internal memory */
2521 for (i = 0;
2522 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2523 REG_WR(bp, BAR_XSTRORM_INTMEM +
2524 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2525 ((u32 *)(&bp->cmng))[i]);
2526 }
34f80b04 2527 }
c18487ee 2528}
a2fbb9ea 2529
c18487ee
YR
2530static void bnx2x__link_status_update(struct bnx2x *bp)
2531{
f34d28ea 2532 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2533 return;
a2fbb9ea 2534
c18487ee 2535 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2536
bb2a0f7a
YG
2537 if (bp->link_vars.link_up)
2538 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2539 else
2540 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2541
2691d51d
EG
2542 bnx2x_calc_vn_weight_sum(bp);
2543
c18487ee
YR
2544 /* indicate link status */
2545 bnx2x_link_report(bp);
a2fbb9ea 2546}
a2fbb9ea 2547
34f80b04
EG
2548static void bnx2x_pmf_update(struct bnx2x *bp)
2549{
2550 int port = BP_PORT(bp);
2551 u32 val;
2552
2553 bp->port.pmf = 1;
2554 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2555
2556 /* enable nig attention */
2557 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2558 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2559 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2560
2561 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2562}
2563
c18487ee 2564/* end of Link */
a2fbb9ea
ET
2565
2566/* slow path */
2567
2568/*
2569 * General service functions
2570 */
2571
2691d51d
EG
2572/* send the MCP a request, block until there is a reply */
2573u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2574{
2575 int func = BP_FUNC(bp);
2576 u32 seq = ++bp->fw_seq;
2577 u32 rc = 0;
2578 u32 cnt = 1;
2579 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2580
c4ff7cbf 2581 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2582 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2583 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2584
2585 do {
2586 /* let the FW do it's magic ... */
2587 msleep(delay);
2588
2589 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2590
c4ff7cbf
EG
2591 /* Give the FW up to 5 second (500*10ms) */
2592 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2593
2594 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2595 cnt*delay, rc, seq);
2596
2597 /* is this a reply to our command? */
2598 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2599 rc &= FW_MSG_CODE_MASK;
2600 else {
2601 /* FW BUG! */
2602 BNX2X_ERR("FW failed to respond!\n");
2603 bnx2x_fw_dump(bp);
2604 rc = 0;
2605 }
c4ff7cbf 2606 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2607
2608 return rc;
2609}
2610
e665bfda 2611static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2612static void bnx2x_set_rx_mode(struct net_device *dev);
2613
2614static void bnx2x_e1h_disable(struct bnx2x *bp)
2615{
2616 int port = BP_PORT(bp);
2691d51d
EG
2617
2618 netif_tx_disable(bp->dev);
2691d51d
EG
2619
2620 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2621
2691d51d
EG
2622 netif_carrier_off(bp->dev);
2623}
2624
2625static void bnx2x_e1h_enable(struct bnx2x *bp)
2626{
2627 int port = BP_PORT(bp);
2628
2629 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2630
2691d51d
EG
2631 /* Tx queue should be only reenabled */
2632 netif_tx_wake_all_queues(bp->dev);
2633
061bc702
EG
2634 /*
2635 * Should not call netif_carrier_on since it will be called if the link
2636 * is up when checking for link state
2637 */
2691d51d
EG
2638}
2639
2640static void bnx2x_update_min_max(struct bnx2x *bp)
2641{
2642 int port = BP_PORT(bp);
2643 int vn, i;
2644
2645 /* Init rate shaping and fairness contexts */
2646 bnx2x_init_port_minmax(bp);
2647
2648 bnx2x_calc_vn_weight_sum(bp);
2649
2650 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2651 bnx2x_init_vn_minmax(bp, 2*vn + port);
2652
2653 if (bp->port.pmf) {
2654 int func;
2655
2656 /* Set the attention towards other drivers on the same port */
2657 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2658 if (vn == BP_E1HVN(bp))
2659 continue;
2660
2661 func = ((vn << 1) | port);
2662 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2663 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2664 }
2665
2666 /* Store it to internal memory */
2667 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2668 REG_WR(bp, BAR_XSTRORM_INTMEM +
2669 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2670 ((u32 *)(&bp->cmng))[i]);
2671 }
2672}
2673
2674static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2675{
2691d51d 2676 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2677
2678 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2679
f34d28ea
EG
2680 /*
2681 * This is the only place besides the function initialization
2682 * where the bp->flags can change so it is done without any
2683 * locks
2684 */
2691d51d
EG
2685 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2686 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2687 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2688
2689 bnx2x_e1h_disable(bp);
2690 } else {
2691 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2692 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2693
2694 bnx2x_e1h_enable(bp);
2695 }
2696 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2697 }
2698 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2699
2700 bnx2x_update_min_max(bp);
2701 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2702 }
2703
2704 /* Report results to MCP */
2705 if (dcc_event)
2706 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2707 else
2708 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2709}
2710
28912902
MC
2711/* must be called under the spq lock */
2712static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2713{
2714 struct eth_spe *next_spe = bp->spq_prod_bd;
2715
2716 if (bp->spq_prod_bd == bp->spq_last_bd) {
2717 bp->spq_prod_bd = bp->spq;
2718 bp->spq_prod_idx = 0;
2719 DP(NETIF_MSG_TIMER, "end of spq\n");
2720 } else {
2721 bp->spq_prod_bd++;
2722 bp->spq_prod_idx++;
2723 }
2724 return next_spe;
2725}
2726
2727/* must be called under the spq lock */
2728static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2729{
2730 int func = BP_FUNC(bp);
2731
2732 /* Make sure that BD data is updated before writing the producer */
2733 wmb();
2734
2735 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2736 bp->spq_prod_idx);
2737 mmiowb();
2738}
2739
a2fbb9ea
ET
2740/* the slow path queue is odd since completions arrive on the fastpath ring */
2741static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2742 u32 data_hi, u32 data_lo, int common)
2743{
28912902 2744 struct eth_spe *spe;
a2fbb9ea 2745
a2fbb9ea
ET
2746#ifdef BNX2X_STOP_ON_ERROR
2747 if (unlikely(bp->panic))
2748 return -EIO;
2749#endif
2750
34f80b04 2751 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2752
2753 if (!bp->spq_left) {
2754 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2755 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2756 bnx2x_panic();
2757 return -EBUSY;
2758 }
f1410647 2759
28912902
MC
2760 spe = bnx2x_sp_get_next(bp);
2761
a2fbb9ea 2762 /* CID needs port number to be encoded int it */
28912902 2763 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2764 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2765 HW_CID(bp, cid));
28912902 2766 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2767 if (common)
28912902 2768 spe->hdr.type |=
a2fbb9ea
ET
2769 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2770
28912902
MC
2771 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2772 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2773
2774 bp->spq_left--;
2775
cdaa7cb8
VZ
2776 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2777 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2778 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2779 (u32)(U64_LO(bp->spq_mapping) +
2780 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2781 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2782
28912902 2783 bnx2x_sp_prod_update(bp);
34f80b04 2784 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2785 return 0;
2786}
2787
2788/* acquire split MCP access lock register */
4a37fb66 2789static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2790{
72fd0718 2791 u32 j, val;
34f80b04 2792 int rc = 0;
a2fbb9ea
ET
2793
2794 might_sleep();
72fd0718 2795 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2796 val = (1UL << 31);
2797 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2798 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2799 if (val & (1L << 31))
2800 break;
2801
2802 msleep(5);
2803 }
a2fbb9ea 2804 if (!(val & (1L << 31))) {
19680c48 2805 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2806 rc = -EBUSY;
2807 }
2808
2809 return rc;
2810}
2811
4a37fb66
YG
2812/* release split MCP access lock register */
2813static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2814{
72fd0718 2815 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2816}
2817
2818static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2819{
2820 struct host_def_status_block *def_sb = bp->def_status_blk;
2821 u16 rc = 0;
2822
2823 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2824 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2825 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2826 rc |= 1;
2827 }
2828 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2829 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2830 rc |= 2;
2831 }
2832 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2833 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2834 rc |= 4;
2835 }
2836 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2837 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2838 rc |= 8;
2839 }
2840 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2841 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2842 rc |= 16;
2843 }
2844 return rc;
2845}
2846
2847/*
2848 * slow path service functions
2849 */
2850
2851static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2852{
34f80b04 2853 int port = BP_PORT(bp);
5c862848
EG
2854 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2855 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2856 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2857 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2858 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2859 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2860 u32 aeu_mask;
87942b46 2861 u32 nig_mask = 0;
a2fbb9ea 2862
a2fbb9ea
ET
2863 if (bp->attn_state & asserted)
2864 BNX2X_ERR("IGU ERROR\n");
2865
3fcaf2e5
EG
2866 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2867 aeu_mask = REG_RD(bp, aeu_addr);
2868
a2fbb9ea 2869 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2870 aeu_mask, asserted);
72fd0718 2871 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2872 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2873
3fcaf2e5
EG
2874 REG_WR(bp, aeu_addr, aeu_mask);
2875 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2876
3fcaf2e5 2877 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2878 bp->attn_state |= asserted;
3fcaf2e5 2879 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2880
2881 if (asserted & ATTN_HARD_WIRED_MASK) {
2882 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2883
a5e9a7cf
EG
2884 bnx2x_acquire_phy_lock(bp);
2885
877e9aa4 2886 /* save nig interrupt mask */
87942b46 2887 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2888 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2889
c18487ee 2890 bnx2x_link_attn(bp);
a2fbb9ea
ET
2891
2892 /* handle unicore attn? */
2893 }
2894 if (asserted & ATTN_SW_TIMER_4_FUNC)
2895 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2896
2897 if (asserted & GPIO_2_FUNC)
2898 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2899
2900 if (asserted & GPIO_3_FUNC)
2901 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2902
2903 if (asserted & GPIO_4_FUNC)
2904 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2905
2906 if (port == 0) {
2907 if (asserted & ATTN_GENERAL_ATTN_1) {
2908 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2909 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2910 }
2911 if (asserted & ATTN_GENERAL_ATTN_2) {
2912 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2913 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2914 }
2915 if (asserted & ATTN_GENERAL_ATTN_3) {
2916 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2917 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2918 }
2919 } else {
2920 if (asserted & ATTN_GENERAL_ATTN_4) {
2921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2923 }
2924 if (asserted & ATTN_GENERAL_ATTN_5) {
2925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2927 }
2928 if (asserted & ATTN_GENERAL_ATTN_6) {
2929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2931 }
2932 }
2933
2934 } /* if hardwired */
2935
5c862848
EG
2936 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2937 asserted, hc_addr);
2938 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2939
2940 /* now set back the mask */
a5e9a7cf 2941 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2942 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2943 bnx2x_release_phy_lock(bp);
2944 }
a2fbb9ea
ET
2945}
2946
fd4ef40d
EG
2947static inline void bnx2x_fan_failure(struct bnx2x *bp)
2948{
2949 int port = BP_PORT(bp);
2950
2951 /* mark the failure */
2952 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2953 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2954 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2955 bp->link_params.ext_phy_config);
2956
2957 /* log the failure */
cdaa7cb8
VZ
2958 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2959 " the driver to shutdown the card to prevent permanent"
2960 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2961}
ab6ad5a4 2962
877e9aa4 2963static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2964{
34f80b04 2965 int port = BP_PORT(bp);
877e9aa4 2966 int reg_offset;
4d295db0 2967 u32 val, swap_val, swap_override;
877e9aa4 2968
34f80b04
EG
2969 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2970 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2971
34f80b04 2972 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2973
2974 val = REG_RD(bp, reg_offset);
2975 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2976 REG_WR(bp, reg_offset, val);
2977
2978 BNX2X_ERR("SPIO5 hw attention\n");
2979
fd4ef40d 2980 /* Fan failure attention */
35b19ba5
EG
2981 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2982 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2983 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2984 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2985 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2986 /* The PHY reset is controlled by GPIO 1 */
2987 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2988 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2989 break;
2990
4d295db0
EG
2991 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2992 /* The PHY reset is controlled by GPIO 1 */
2993 /* fake the port number to cancel the swap done in
2994 set_gpio() */
2995 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2996 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2997 port = (swap_val && swap_override) ^ 1;
2998 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2999 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3000 break;
3001
877e9aa4
ET
3002 default:
3003 break;
3004 }
fd4ef40d 3005 bnx2x_fan_failure(bp);
877e9aa4 3006 }
34f80b04 3007
589abe3a
EG
3008 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3009 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3010 bnx2x_acquire_phy_lock(bp);
3011 bnx2x_handle_module_detect_int(&bp->link_params);
3012 bnx2x_release_phy_lock(bp);
3013 }
3014
34f80b04
EG
3015 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3016
3017 val = REG_RD(bp, reg_offset);
3018 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3019 REG_WR(bp, reg_offset, val);
3020
3021 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3022 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3023 bnx2x_panic();
3024 }
877e9aa4
ET
3025}
3026
3027static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3028{
3029 u32 val;
3030
0626b899 3031 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3032
3033 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3034 BNX2X_ERR("DB hw attention 0x%x\n", val);
3035 /* DORQ discard attention */
3036 if (val & 0x2)
3037 BNX2X_ERR("FATAL error from DORQ\n");
3038 }
34f80b04
EG
3039
3040 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3041
3042 int port = BP_PORT(bp);
3043 int reg_offset;
3044
3045 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3046 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3047
3048 val = REG_RD(bp, reg_offset);
3049 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3050 REG_WR(bp, reg_offset, val);
3051
3052 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3053 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3054 bnx2x_panic();
3055 }
877e9aa4
ET
3056}
3057
3058static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3059{
3060 u32 val;
3061
3062 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3063
3064 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3065 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3066 /* CFC error attention */
3067 if (val & 0x2)
3068 BNX2X_ERR("FATAL error from CFC\n");
3069 }
3070
3071 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3072
3073 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3074 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3075 /* RQ_USDMDP_FIFO_OVERFLOW */
3076 if (val & 0x18000)
3077 BNX2X_ERR("FATAL error from PXP\n");
3078 }
34f80b04
EG
3079
3080 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3081
3082 int port = BP_PORT(bp);
3083 int reg_offset;
3084
3085 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3086 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3087
3088 val = REG_RD(bp, reg_offset);
3089 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3090 REG_WR(bp, reg_offset, val);
3091
3092 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3093 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3094 bnx2x_panic();
3095 }
877e9aa4
ET
3096}
3097
3098static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3099{
34f80b04
EG
3100 u32 val;
3101
877e9aa4
ET
3102 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3103
34f80b04
EG
3104 if (attn & BNX2X_PMF_LINK_ASSERT) {
3105 int func = BP_FUNC(bp);
3106
3107 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3108 bp->mf_config = SHMEM_RD(bp,
3109 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3110 val = SHMEM_RD(bp, func_mb[func].drv_status);
3111 if (val & DRV_STATUS_DCC_EVENT_MASK)
3112 bnx2x_dcc_event(bp,
3113 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3114 bnx2x__link_status_update(bp);
2691d51d 3115 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3116 bnx2x_pmf_update(bp);
3117
3118 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3119
3120 BNX2X_ERR("MC assert!\n");
3121 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3122 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3123 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3125 bnx2x_panic();
3126
3127 } else if (attn & BNX2X_MCP_ASSERT) {
3128
3129 BNX2X_ERR("MCP assert!\n");
3130 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3131 bnx2x_fw_dump(bp);
877e9aa4
ET
3132
3133 } else
3134 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3135 }
3136
3137 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3138 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3139 if (attn & BNX2X_GRC_TIMEOUT) {
3140 val = CHIP_IS_E1H(bp) ?
3141 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3142 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3143 }
3144 if (attn & BNX2X_GRC_RSV) {
3145 val = CHIP_IS_E1H(bp) ?
3146 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3147 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3148 }
877e9aa4 3149 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3150 }
3151}
3152
72fd0718
VZ
3153static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3154static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3155
3156
3157#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3158#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3159#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3160#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3161#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3162#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3163/*
3164 * should be run under rtnl lock
3165 */
3166static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3167{
3168 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3169 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3170 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3171 barrier();
3172 mmiowb();
3173}
3174
3175/*
3176 * should be run under rtnl lock
3177 */
3178static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3179{
3180 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3181 val |= (1 << 16);
3182 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3183 barrier();
3184 mmiowb();
3185}
3186
3187/*
3188 * should be run under rtnl lock
3189 */
3190static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3191{
3192 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3193 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3194 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3195}
3196
3197/*
3198 * should be run under rtnl lock
3199 */
3200static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3201{
3202 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3203
3204 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3205
3206 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3207 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3208 barrier();
3209 mmiowb();
3210}
3211
3212/*
3213 * should be run under rtnl lock
3214 */
3215static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3216{
3217 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3218
3219 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3220
3221 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3222 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3223 barrier();
3224 mmiowb();
3225
3226 return val1;
3227}
3228
3229/*
3230 * should be run under rtnl lock
3231 */
3232static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3233{
3234 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3235}
3236
3237static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3238{
3239 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3240 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3241}
3242
3243static inline void _print_next_block(int idx, const char *blk)
3244{
3245 if (idx)
3246 pr_cont(", ");
3247 pr_cont("%s", blk);
3248}
3249
3250static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3251{
3252 int i = 0;
3253 u32 cur_bit = 0;
3254 for (i = 0; sig; i++) {
3255 cur_bit = ((u32)0x1 << i);
3256 if (sig & cur_bit) {
3257 switch (cur_bit) {
3258 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3259 _print_next_block(par_num++, "BRB");
3260 break;
3261 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3262 _print_next_block(par_num++, "PARSER");
3263 break;
3264 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3265 _print_next_block(par_num++, "TSDM");
3266 break;
3267 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3268 _print_next_block(par_num++, "SEARCHER");
3269 break;
3270 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3271 _print_next_block(par_num++, "TSEMI");
3272 break;
3273 }
3274
3275 /* Clear the bit */
3276 sig &= ~cur_bit;
3277 }
3278 }
3279
3280 return par_num;
3281}
3282
3283static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3284{
3285 int i = 0;
3286 u32 cur_bit = 0;
3287 for (i = 0; sig; i++) {
3288 cur_bit = ((u32)0x1 << i);
3289 if (sig & cur_bit) {
3290 switch (cur_bit) {
3291 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3292 _print_next_block(par_num++, "PBCLIENT");
3293 break;
3294 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3295 _print_next_block(par_num++, "QM");
3296 break;
3297 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3298 _print_next_block(par_num++, "XSDM");
3299 break;
3300 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3301 _print_next_block(par_num++, "XSEMI");
3302 break;
3303 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3304 _print_next_block(par_num++, "DOORBELLQ");
3305 break;
3306 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3307 _print_next_block(par_num++, "VAUX PCI CORE");
3308 break;
3309 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3310 _print_next_block(par_num++, "DEBUG");
3311 break;
3312 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3313 _print_next_block(par_num++, "USDM");
3314 break;
3315 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3316 _print_next_block(par_num++, "USEMI");
3317 break;
3318 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3319 _print_next_block(par_num++, "UPB");
3320 break;
3321 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3322 _print_next_block(par_num++, "CSDM");
3323 break;
3324 }
3325
3326 /* Clear the bit */
3327 sig &= ~cur_bit;
3328 }
3329 }
3330
3331 return par_num;
3332}
3333
3334static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3335{
3336 int i = 0;
3337 u32 cur_bit = 0;
3338 for (i = 0; sig; i++) {
3339 cur_bit = ((u32)0x1 << i);
3340 if (sig & cur_bit) {
3341 switch (cur_bit) {
3342 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3343 _print_next_block(par_num++, "CSEMI");
3344 break;
3345 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3346 _print_next_block(par_num++, "PXP");
3347 break;
3348 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3349 _print_next_block(par_num++,
3350 "PXPPCICLOCKCLIENT");
3351 break;
3352 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3353 _print_next_block(par_num++, "CFC");
3354 break;
3355 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3356 _print_next_block(par_num++, "CDU");
3357 break;
3358 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3359 _print_next_block(par_num++, "IGU");
3360 break;
3361 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3362 _print_next_block(par_num++, "MISC");
3363 break;
3364 }
3365
3366 /* Clear the bit */
3367 sig &= ~cur_bit;
3368 }
3369 }
3370
3371 return par_num;
3372}
3373
3374static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3375{
3376 int i = 0;
3377 u32 cur_bit = 0;
3378 for (i = 0; sig; i++) {
3379 cur_bit = ((u32)0x1 << i);
3380 if (sig & cur_bit) {
3381 switch (cur_bit) {
3382 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3383 _print_next_block(par_num++, "MCP ROM");
3384 break;
3385 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3386 _print_next_block(par_num++, "MCP UMP RX");
3387 break;
3388 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3389 _print_next_block(par_num++, "MCP UMP TX");
3390 break;
3391 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3392 _print_next_block(par_num++, "MCP SCPAD");
3393 break;
3394 }
3395
3396 /* Clear the bit */
3397 sig &= ~cur_bit;
3398 }
3399 }
3400
3401 return par_num;
3402}
3403
3404static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3405 u32 sig2, u32 sig3)
3406{
3407 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3408 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3409 int par_num = 0;
3410 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3411 "[0]:0x%08x [1]:0x%08x "
3412 "[2]:0x%08x [3]:0x%08x\n",
3413 sig0 & HW_PRTY_ASSERT_SET_0,
3414 sig1 & HW_PRTY_ASSERT_SET_1,
3415 sig2 & HW_PRTY_ASSERT_SET_2,
3416 sig3 & HW_PRTY_ASSERT_SET_3);
3417 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3418 bp->dev->name);
3419 par_num = bnx2x_print_blocks_with_parity0(
3420 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3421 par_num = bnx2x_print_blocks_with_parity1(
3422 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3423 par_num = bnx2x_print_blocks_with_parity2(
3424 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3425 par_num = bnx2x_print_blocks_with_parity3(
3426 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3427 printk("\n");
3428 return true;
3429 } else
3430 return false;
3431}
3432
3433static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3434{
a2fbb9ea 3435 struct attn_route attn;
72fd0718
VZ
3436 int port = BP_PORT(bp);
3437
3438 attn.sig[0] = REG_RD(bp,
3439 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3440 port*4);
3441 attn.sig[1] = REG_RD(bp,
3442 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3443 port*4);
3444 attn.sig[2] = REG_RD(bp,
3445 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3446 port*4);
3447 attn.sig[3] = REG_RD(bp,
3448 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3449 port*4);
3450
3451 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3452 attn.sig[3]);
3453}
3454
3455static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3456{
3457 struct attn_route attn, *group_mask;
34f80b04 3458 int port = BP_PORT(bp);
877e9aa4 3459 int index;
a2fbb9ea
ET
3460 u32 reg_addr;
3461 u32 val;
3fcaf2e5 3462 u32 aeu_mask;
a2fbb9ea
ET
3463
3464 /* need to take HW lock because MCP or other port might also
3465 try to handle this event */
4a37fb66 3466 bnx2x_acquire_alr(bp);
a2fbb9ea 3467
72fd0718
VZ
3468 if (bnx2x_chk_parity_attn(bp)) {
3469 bp->recovery_state = BNX2X_RECOVERY_INIT;
3470 bnx2x_set_reset_in_progress(bp);
3471 schedule_delayed_work(&bp->reset_task, 0);
3472 /* Disable HW interrupts */
3473 bnx2x_int_disable(bp);
3474 bnx2x_release_alr(bp);
3475 /* In case of parity errors don't handle attentions so that
3476 * other function would "see" parity errors.
3477 */
3478 return;
3479 }
3480
a2fbb9ea
ET
3481 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3482 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3483 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3484 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3485 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3486 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3487
3488 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3489 if (deasserted & (1 << index)) {
72fd0718 3490 group_mask = &bp->attn_group[index];
a2fbb9ea 3491
34f80b04 3492 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
3493 index, group_mask->sig[0], group_mask->sig[1],
3494 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 3495
877e9aa4 3496 bnx2x_attn_int_deasserted3(bp,
72fd0718 3497 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3498 bnx2x_attn_int_deasserted1(bp,
72fd0718 3499 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3500 bnx2x_attn_int_deasserted2(bp,
72fd0718 3501 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3502 bnx2x_attn_int_deasserted0(bp,
72fd0718 3503 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3504 }
3505 }
3506
4a37fb66 3507 bnx2x_release_alr(bp);
a2fbb9ea 3508
5c862848 3509 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3510
3511 val = ~deasserted;
3fcaf2e5
EG
3512 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3513 val, reg_addr);
5c862848 3514 REG_WR(bp, reg_addr, val);
a2fbb9ea 3515
a2fbb9ea 3516 if (~bp->attn_state & deasserted)
3fcaf2e5 3517 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3518
3519 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3520 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3521
3fcaf2e5
EG
3522 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3523 aeu_mask = REG_RD(bp, reg_addr);
3524
3525 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3526 aeu_mask, deasserted);
72fd0718 3527 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3528 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3529
3fcaf2e5
EG
3530 REG_WR(bp, reg_addr, aeu_mask);
3531 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3532
3533 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3534 bp->attn_state &= ~deasserted;
3535 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3536}
3537
3538static void bnx2x_attn_int(struct bnx2x *bp)
3539{
3540 /* read local copy of bits */
68d59484
EG
3541 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3542 attn_bits);
3543 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3544 attn_bits_ack);
a2fbb9ea
ET
3545 u32 attn_state = bp->attn_state;
3546
3547 /* look for changed bits */
3548 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3549 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3550
3551 DP(NETIF_MSG_HW,
3552 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3553 attn_bits, attn_ack, asserted, deasserted);
3554
3555 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3556 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3557
3558 /* handle bits that were raised */
3559 if (asserted)
3560 bnx2x_attn_int_asserted(bp, asserted);
3561
3562 if (deasserted)
3563 bnx2x_attn_int_deasserted(bp, deasserted);
3564}
3565
3566static void bnx2x_sp_task(struct work_struct *work)
3567{
1cf167f2 3568 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3569 u16 status;
3570
34f80b04 3571
a2fbb9ea
ET
3572 /* Return here if interrupt is disabled */
3573 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3574 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3575 return;
3576 }
3577
3578 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3579/* if (status == 0) */
3580/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3581
cdaa7cb8 3582 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3583
877e9aa4 3584 /* HW attentions */
cdaa7cb8 3585 if (status & 0x1) {
a2fbb9ea 3586 bnx2x_attn_int(bp);
cdaa7cb8
VZ
3587 status &= ~0x1;
3588 }
3589
3590 /* CStorm events: STAT_QUERY */
3591 if (status & 0x2) {
3592 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3593 status &= ~0x2;
3594 }
3595
3596 if (unlikely(status))
3597 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3598 status);
a2fbb9ea 3599
68d59484 3600 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3601 IGU_INT_NOP, 1);
3602 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3603 IGU_INT_NOP, 1);
3604 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3605 IGU_INT_NOP, 1);
3606 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3607 IGU_INT_NOP, 1);
3608 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3609 IGU_INT_ENABLE, 1);
3610}
3611
3612static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3613{
3614 struct net_device *dev = dev_instance;
3615 struct bnx2x *bp = netdev_priv(dev);
3616
3617 /* Return here if interrupt is disabled */
3618 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3619 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3620 return IRQ_HANDLED;
3621 }
3622
8d9c5f34 3623 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3624
3625#ifdef BNX2X_STOP_ON_ERROR
3626 if (unlikely(bp->panic))
3627 return IRQ_HANDLED;
3628#endif
3629
993ac7b5
MC
3630#ifdef BCM_CNIC
3631 {
3632 struct cnic_ops *c_ops;
3633
3634 rcu_read_lock();
3635 c_ops = rcu_dereference(bp->cnic_ops);
3636 if (c_ops)
3637 c_ops->cnic_handler(bp->cnic_data, NULL);
3638 rcu_read_unlock();
3639 }
3640#endif
1cf167f2 3641 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3642
3643 return IRQ_HANDLED;
3644}
3645
3646/* end of slow path */
3647
3648/* Statistics */
3649
3650/****************************************************************************
3651* Macros
3652****************************************************************************/
3653
a2fbb9ea
ET
3654/* sum[hi:lo] += add[hi:lo] */
3655#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3656 do { \
3657 s_lo += a_lo; \
f5ba6772 3658 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3659 } while (0)
3660
3661/* difference = minuend - subtrahend */
3662#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3663 do { \
bb2a0f7a
YG
3664 if (m_lo < s_lo) { \
3665 /* underflow */ \
a2fbb9ea 3666 d_hi = m_hi - s_hi; \
bb2a0f7a 3667 if (d_hi > 0) { \
6378c025 3668 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3669 d_hi--; \
3670 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3671 } else { \
6378c025 3672 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3673 d_hi = 0; \
3674 d_lo = 0; \
3675 } \
bb2a0f7a
YG
3676 } else { \
3677 /* m_lo >= s_lo */ \
a2fbb9ea 3678 if (m_hi < s_hi) { \
bb2a0f7a
YG
3679 d_hi = 0; \
3680 d_lo = 0; \
3681 } else { \
6378c025 3682 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3683 d_hi = m_hi - s_hi; \
3684 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3685 } \
3686 } \
3687 } while (0)
3688
bb2a0f7a 3689#define UPDATE_STAT64(s, t) \
a2fbb9ea 3690 do { \
bb2a0f7a
YG
3691 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3692 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3693 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3694 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3695 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3696 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3697 } while (0)
3698
bb2a0f7a 3699#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3700 do { \
bb2a0f7a
YG
3701 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3702 diff.lo, new->s##_lo, old->s##_lo); \
3703 ADD_64(estats->t##_hi, diff.hi, \
3704 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3705 } while (0)
3706
3707/* sum[hi:lo] += add */
3708#define ADD_EXTEND_64(s_hi, s_lo, a) \
3709 do { \
3710 s_lo += a; \
3711 s_hi += (s_lo < a) ? 1 : 0; \
3712 } while (0)
3713
bb2a0f7a 3714#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3715 do { \
bb2a0f7a
YG
3716 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3717 pstats->mac_stx[1].s##_lo, \
3718 new->s); \
a2fbb9ea
ET
3719 } while (0)
3720
bb2a0f7a 3721#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3722 do { \
4781bfad
EG
3723 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3724 old_tclient->s = tclient->s; \
de832a55
EG
3725 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3726 } while (0)
3727
3728#define UPDATE_EXTEND_USTAT(s, t) \
3729 do { \
3730 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3731 old_uclient->s = uclient->s; \
3732 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3733 } while (0)
3734
3735#define UPDATE_EXTEND_XSTAT(s, t) \
3736 do { \
4781bfad
EG
3737 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3738 old_xclient->s = xclient->s; \
de832a55
EG
3739 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3740 } while (0)
3741
3742/* minuend -= subtrahend */
3743#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3744 do { \
3745 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3746 } while (0)
3747
3748/* minuend[hi:lo] -= subtrahend */
3749#define SUB_EXTEND_64(m_hi, m_lo, s) \
3750 do { \
3751 SUB_64(m_hi, 0, m_lo, s); \
3752 } while (0)
3753
3754#define SUB_EXTEND_USTAT(s, t) \
3755 do { \
3756 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3757 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3758 } while (0)
3759
3760/*
3761 * General service functions
3762 */
3763
3764static inline long bnx2x_hilo(u32 *hiref)
3765{
3766 u32 lo = *(hiref + 1);
3767#if (BITS_PER_LONG == 64)
3768 u32 hi = *hiref;
3769
3770 return HILO_U64(hi, lo);
3771#else
3772 return lo;
3773#endif
3774}
3775
3776/*
3777 * Init service functions
3778 */
3779
bb2a0f7a
YG
3780static void bnx2x_storm_stats_post(struct bnx2x *bp)
3781{
3782 if (!bp->stats_pending) {
3783 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3784 int i, rc;
bb2a0f7a
YG
3785
3786 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3787 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3788 for_each_queue(bp, i)
3789 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3790
3791 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3792 ((u32 *)&ramrod_data)[1],
3793 ((u32 *)&ramrod_data)[0], 0);
3794 if (rc == 0) {
3795 /* stats ramrod has it's own slot on the spq */
3796 bp->spq_left++;
3797 bp->stats_pending = 1;
3798 }
3799 }
3800}
3801
bb2a0f7a
YG
3802static void bnx2x_hw_stats_post(struct bnx2x *bp)
3803{
3804 struct dmae_command *dmae = &bp->stats_dmae;
3805 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3806
3807 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3808 if (CHIP_REV_IS_SLOW(bp))
3809 return;
bb2a0f7a
YG
3810
3811 /* loader */
3812 if (bp->executer_idx) {
3813 int loader_idx = PMF_DMAE_C(bp);
3814
3815 memset(dmae, 0, sizeof(struct dmae_command));
3816
3817 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3818 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3819 DMAE_CMD_DST_RESET |
3820#ifdef __BIG_ENDIAN
3821 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3822#else
3823 DMAE_CMD_ENDIANITY_DW_SWAP |
3824#endif
3825 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3826 DMAE_CMD_PORT_0) |
3827 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3828 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3829 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3830 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3831 sizeof(struct dmae_command) *
3832 (loader_idx + 1)) >> 2;
3833 dmae->dst_addr_hi = 0;
3834 dmae->len = sizeof(struct dmae_command) >> 2;
3835 if (CHIP_IS_E1(bp))
3836 dmae->len--;
3837 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3838 dmae->comp_addr_hi = 0;
3839 dmae->comp_val = 1;
3840
3841 *stats_comp = 0;
3842 bnx2x_post_dmae(bp, dmae, loader_idx);
3843
3844 } else if (bp->func_stx) {
3845 *stats_comp = 0;
3846 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3847 }
3848}
3849
3850static int bnx2x_stats_comp(struct bnx2x *bp)
3851{
3852 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3853 int cnt = 10;
3854
3855 might_sleep();
3856 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3857 if (!cnt) {
3858 BNX2X_ERR("timeout waiting for stats finished\n");
3859 break;
3860 }
3861 cnt--;
12469401 3862 msleep(1);
bb2a0f7a
YG
3863 }
3864 return 1;
3865}
3866
3867/*
3868 * Statistics service functions
3869 */
3870
3871static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3872{
3873 struct dmae_command *dmae;
3874 u32 opcode;
3875 int loader_idx = PMF_DMAE_C(bp);
3876 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3877
3878 /* sanity */
3879 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3880 BNX2X_ERR("BUG!\n");
3881 return;
3882 }
3883
3884 bp->executer_idx = 0;
3885
3886 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3887 DMAE_CMD_C_ENABLE |
3888 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3889#ifdef __BIG_ENDIAN
3890 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3891#else
3892 DMAE_CMD_ENDIANITY_DW_SWAP |
3893#endif
3894 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3895 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3896
3897 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3898 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3899 dmae->src_addr_lo = bp->port.port_stx >> 2;
3900 dmae->src_addr_hi = 0;
3901 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3902 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3903 dmae->len = DMAE_LEN32_RD_MAX;
3904 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3905 dmae->comp_addr_hi = 0;
3906 dmae->comp_val = 1;
3907
3908 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3909 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3910 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3911 dmae->src_addr_hi = 0;
7a9b2557
VZ
3912 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3913 DMAE_LEN32_RD_MAX * 4);
3914 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3915 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3916 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3917 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3918 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3919 dmae->comp_val = DMAE_COMP_VAL;
3920
3921 *stats_comp = 0;
3922 bnx2x_hw_stats_post(bp);
3923 bnx2x_stats_comp(bp);
3924}
3925
3926static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3927{
3928 struct dmae_command *dmae;
34f80b04 3929 int port = BP_PORT(bp);
bb2a0f7a 3930 int vn = BP_E1HVN(bp);
a2fbb9ea 3931 u32 opcode;
bb2a0f7a 3932 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3933 u32 mac_addr;
bb2a0f7a
YG
3934 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3935
3936 /* sanity */
3937 if (!bp->link_vars.link_up || !bp->port.pmf) {
3938 BNX2X_ERR("BUG!\n");
3939 return;
3940 }
a2fbb9ea
ET
3941
3942 bp->executer_idx = 0;
bb2a0f7a
YG
3943
3944 /* MCP */
3945 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3946 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3947 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3948#ifdef __BIG_ENDIAN
bb2a0f7a 3949 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3950#else
bb2a0f7a 3951 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3952#endif
bb2a0f7a
YG
3953 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3954 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3955
bb2a0f7a 3956 if (bp->port.port_stx) {
a2fbb9ea
ET
3957
3958 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3959 dmae->opcode = opcode;
bb2a0f7a
YG
3960 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3961 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3962 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3963 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3964 dmae->len = sizeof(struct host_port_stats) >> 2;
3965 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3966 dmae->comp_addr_hi = 0;
3967 dmae->comp_val = 1;
a2fbb9ea
ET
3968 }
3969
bb2a0f7a
YG
3970 if (bp->func_stx) {
3971
3972 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3973 dmae->opcode = opcode;
3974 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3975 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3976 dmae->dst_addr_lo = bp->func_stx >> 2;
3977 dmae->dst_addr_hi = 0;
3978 dmae->len = sizeof(struct host_func_stats) >> 2;
3979 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3980 dmae->comp_addr_hi = 0;
3981 dmae->comp_val = 1;
a2fbb9ea
ET
3982 }
3983
bb2a0f7a 3984 /* MAC */
a2fbb9ea
ET
3985 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3986 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3987 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3988#ifdef __BIG_ENDIAN
3989 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3990#else
3991 DMAE_CMD_ENDIANITY_DW_SWAP |
3992#endif
bb2a0f7a
YG
3993 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3994 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3995
c18487ee 3996 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3997
3998 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3999 NIG_REG_INGRESS_BMAC0_MEM);
4000
4001 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4002 BIGMAC_REGISTER_TX_STAT_GTBYT */
4003 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4004 dmae->opcode = opcode;
4005 dmae->src_addr_lo = (mac_addr +
4006 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4007 dmae->src_addr_hi = 0;
4008 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4009 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4010 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4011 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4012 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4013 dmae->comp_addr_hi = 0;
4014 dmae->comp_val = 1;
4015
4016 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4017 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4018 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4019 dmae->opcode = opcode;
4020 dmae->src_addr_lo = (mac_addr +
4021 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4022 dmae->src_addr_hi = 0;
4023 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4024 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 4025 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4026 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
4027 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4028 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4029 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4030 dmae->comp_addr_hi = 0;
4031 dmae->comp_val = 1;
4032
c18487ee 4033 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
4034
4035 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4036
4037 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4038 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4039 dmae->opcode = opcode;
4040 dmae->src_addr_lo = (mac_addr +
4041 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4042 dmae->src_addr_hi = 0;
4043 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4044 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4045 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4046 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4047 dmae->comp_addr_hi = 0;
4048 dmae->comp_val = 1;
4049
4050 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4051 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4052 dmae->opcode = opcode;
4053 dmae->src_addr_lo = (mac_addr +
4054 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4055 dmae->src_addr_hi = 0;
4056 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4057 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 4058 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4059 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
4060 dmae->len = 1;
4061 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4062 dmae->comp_addr_hi = 0;
4063 dmae->comp_val = 1;
4064
4065 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4066 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4067 dmae->opcode = opcode;
4068 dmae->src_addr_lo = (mac_addr +
4069 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4070 dmae->src_addr_hi = 0;
4071 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4072 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 4073 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4074 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
4075 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4076 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4077 dmae->comp_addr_hi = 0;
4078 dmae->comp_val = 1;
4079 }
4080
4081 /* NIG */
bb2a0f7a
YG
4082 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4083 dmae->opcode = opcode;
4084 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4085 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4086 dmae->src_addr_hi = 0;
4087 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4088 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4089 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4090 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4091 dmae->comp_addr_hi = 0;
4092 dmae->comp_val = 1;
4093
4094 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4095 dmae->opcode = opcode;
4096 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4097 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4098 dmae->src_addr_hi = 0;
4099 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4100 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4101 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4102 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4103 dmae->len = (2*sizeof(u32)) >> 2;
4104 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4105 dmae->comp_addr_hi = 0;
4106 dmae->comp_val = 1;
4107
a2fbb9ea
ET
4108 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4109 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4110 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4111 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4112#ifdef __BIG_ENDIAN
4113 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4114#else
4115 DMAE_CMD_ENDIANITY_DW_SWAP |
4116#endif
bb2a0f7a
YG
4117 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4118 (vn << DMAE_CMD_E1HVN_SHIFT));
4119 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4120 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 4121 dmae->src_addr_hi = 0;
bb2a0f7a
YG
4122 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4123 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4124 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4125 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4126 dmae->len = (2*sizeof(u32)) >> 2;
4127 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4128 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4129 dmae->comp_val = DMAE_COMP_VAL;
4130
4131 *stats_comp = 0;
a2fbb9ea
ET
4132}
4133
bb2a0f7a 4134static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 4135{
bb2a0f7a
YG
4136 struct dmae_command *dmae = &bp->stats_dmae;
4137 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4138
bb2a0f7a
YG
4139 /* sanity */
4140 if (!bp->func_stx) {
4141 BNX2X_ERR("BUG!\n");
4142 return;
4143 }
a2fbb9ea 4144
bb2a0f7a
YG
4145 bp->executer_idx = 0;
4146 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 4147
bb2a0f7a
YG
4148 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4149 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4150 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4151#ifdef __BIG_ENDIAN
4152 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4153#else
4154 DMAE_CMD_ENDIANITY_DW_SWAP |
4155#endif
4156 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4157 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4158 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4159 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4160 dmae->dst_addr_lo = bp->func_stx >> 2;
4161 dmae->dst_addr_hi = 0;
4162 dmae->len = sizeof(struct host_func_stats) >> 2;
4163 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4164 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4165 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4166
bb2a0f7a
YG
4167 *stats_comp = 0;
4168}
a2fbb9ea 4169
bb2a0f7a
YG
4170static void bnx2x_stats_start(struct bnx2x *bp)
4171{
4172 if (bp->port.pmf)
4173 bnx2x_port_stats_init(bp);
4174
4175 else if (bp->func_stx)
4176 bnx2x_func_stats_init(bp);
4177
4178 bnx2x_hw_stats_post(bp);
4179 bnx2x_storm_stats_post(bp);
4180}
4181
4182static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4183{
4184 bnx2x_stats_comp(bp);
4185 bnx2x_stats_pmf_update(bp);
4186 bnx2x_stats_start(bp);
4187}
4188
4189static void bnx2x_stats_restart(struct bnx2x *bp)
4190{
4191 bnx2x_stats_comp(bp);
4192 bnx2x_stats_start(bp);
4193}
4194
4195static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4196{
4197 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4198 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4199 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4200 struct {
4201 u32 lo;
4202 u32 hi;
4203 } diff;
bb2a0f7a
YG
4204
4205 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4206 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4207 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4208 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4209 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4210 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 4211 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 4212 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 4213 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
4214 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4215 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4216 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4217 UPDATE_STAT64(tx_stat_gt127,
4218 tx_stat_etherstatspkts65octetsto127octets);
4219 UPDATE_STAT64(tx_stat_gt255,
4220 tx_stat_etherstatspkts128octetsto255octets);
4221 UPDATE_STAT64(tx_stat_gt511,
4222 tx_stat_etherstatspkts256octetsto511octets);
4223 UPDATE_STAT64(tx_stat_gt1023,
4224 tx_stat_etherstatspkts512octetsto1023octets);
4225 UPDATE_STAT64(tx_stat_gt1518,
4226 tx_stat_etherstatspkts1024octetsto1522octets);
4227 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4228 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4229 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4230 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4231 UPDATE_STAT64(tx_stat_gterr,
4232 tx_stat_dot3statsinternalmactransmiterrors);
4233 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
4234
4235 estats->pause_frames_received_hi =
4236 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4237 estats->pause_frames_received_lo =
4238 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4239
4240 estats->pause_frames_sent_hi =
4241 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4242 estats->pause_frames_sent_lo =
4243 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
4244}
4245
4246static void bnx2x_emac_stats_update(struct bnx2x *bp)
4247{
4248 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4249 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4250 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
4251
4252 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4253 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4254 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4255 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4256 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4257 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4258 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4259 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4260 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4261 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4262 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4263 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4264 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4265 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4266 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4267 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4268 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4269 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4270 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4271 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4272 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4273 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4274 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4275 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4276 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4277 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4278 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4279 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4280 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4281 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4282 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
4283
4284 estats->pause_frames_received_hi =
4285 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4286 estats->pause_frames_received_lo =
4287 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4288 ADD_64(estats->pause_frames_received_hi,
4289 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4290 estats->pause_frames_received_lo,
4291 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4292
4293 estats->pause_frames_sent_hi =
4294 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4295 estats->pause_frames_sent_lo =
4296 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4297 ADD_64(estats->pause_frames_sent_hi,
4298 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4299 estats->pause_frames_sent_lo,
4300 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
4301}
4302
4303static int bnx2x_hw_stats_update(struct bnx2x *bp)
4304{
4305 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4306 struct nig_stats *old = &(bp->port.old_nig_stats);
4307 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4308 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4309 struct {
4310 u32 lo;
4311 u32 hi;
4312 } diff;
bb2a0f7a
YG
4313
4314 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4315 bnx2x_bmac_stats_update(bp);
4316
4317 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4318 bnx2x_emac_stats_update(bp);
4319
4320 else { /* unreached */
c3eefaf6 4321 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
4322 return -1;
4323 }
a2fbb9ea 4324
bb2a0f7a
YG
4325 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4326 new->brb_discard - old->brb_discard);
66e855f3
YG
4327 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4328 new->brb_truncate - old->brb_truncate);
a2fbb9ea 4329
bb2a0f7a
YG
4330 UPDATE_STAT64_NIG(egress_mac_pkt0,
4331 etherstatspkts1024octetsto1522octets);
4332 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 4333
bb2a0f7a 4334 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 4335
bb2a0f7a
YG
4336 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4337 sizeof(struct mac_stx));
4338 estats->brb_drop_hi = pstats->brb_drop_hi;
4339 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 4340
bb2a0f7a 4341 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 4342
2145a920
VZ
4343 if (!BP_NOMCP(bp)) {
4344 u32 nig_timer_max =
4345 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4346 if (nig_timer_max != estats->nig_timer_max) {
4347 estats->nig_timer_max = nig_timer_max;
4348 BNX2X_ERR("NIG timer max (%u)\n",
4349 estats->nig_timer_max);
4350 }
de832a55
EG
4351 }
4352
bb2a0f7a 4353 return 0;
a2fbb9ea
ET
4354}
4355
bb2a0f7a 4356static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
4357{
4358 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4359 struct tstorm_per_port_stats *tport =
de832a55 4360 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4361 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4362 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4363 int i;
4364
6fe49bb9
EG
4365 memcpy(&(fstats->total_bytes_received_hi),
4366 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4367 sizeof(struct host_func_stats) - 2*sizeof(u32));
4368 estats->error_bytes_received_hi = 0;
4369 estats->error_bytes_received_lo = 0;
4370 estats->etherstatsoverrsizepkts_hi = 0;
4371 estats->etherstatsoverrsizepkts_lo = 0;
4372 estats->no_buff_discard_hi = 0;
4373 estats->no_buff_discard_lo = 0;
a2fbb9ea 4374
54b9ddaa 4375 for_each_queue(bp, i) {
de832a55
EG
4376 struct bnx2x_fastpath *fp = &bp->fp[i];
4377 int cl_id = fp->cl_id;
4378 struct tstorm_per_client_stats *tclient =
4379 &stats->tstorm_common.client_statistics[cl_id];
4380 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4381 struct ustorm_per_client_stats *uclient =
4382 &stats->ustorm_common.client_statistics[cl_id];
4383 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4384 struct xstorm_per_client_stats *xclient =
4385 &stats->xstorm_common.client_statistics[cl_id];
4386 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4387 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4388 u32 diff;
4389
4390 /* are storm stats valid? */
4391 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4392 bp->stats_counter) {
de832a55 4393 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
cdaa7cb8 4394 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4395 i, xclient->stats_counter, bp->stats_counter);
4396 return -1;
4397 }
4398 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4399 bp->stats_counter) {
de832a55 4400 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
cdaa7cb8 4401 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4402 i, tclient->stats_counter, bp->stats_counter);
4403 return -2;
4404 }
4405 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4406 bp->stats_counter) {
4407 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
cdaa7cb8 4408 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4409 i, uclient->stats_counter, bp->stats_counter);
4410 return -4;
4411 }
a2fbb9ea 4412
de832a55 4413 qstats->total_bytes_received_hi =
ca00392c 4414 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4415 qstats->total_bytes_received_lo =
ca00392c
EG
4416 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4417
4418 ADD_64(qstats->total_bytes_received_hi,
4419 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4420 qstats->total_bytes_received_lo,
4421 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4422
4423 ADD_64(qstats->total_bytes_received_hi,
4424 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4425 qstats->total_bytes_received_lo,
4426 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4427
4428 qstats->valid_bytes_received_hi =
4429 qstats->total_bytes_received_hi;
de832a55 4430 qstats->valid_bytes_received_lo =
ca00392c 4431 qstats->total_bytes_received_lo;
bb2a0f7a 4432
de832a55 4433 qstats->error_bytes_received_hi =
bb2a0f7a 4434 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4435 qstats->error_bytes_received_lo =
bb2a0f7a 4436 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4437
de832a55
EG
4438 ADD_64(qstats->total_bytes_received_hi,
4439 qstats->error_bytes_received_hi,
4440 qstats->total_bytes_received_lo,
4441 qstats->error_bytes_received_lo);
4442
4443 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4444 total_unicast_packets_received);
4445 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4446 total_multicast_packets_received);
4447 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4448 total_broadcast_packets_received);
4449 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4450 etherstatsoverrsizepkts);
4451 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4452
4453 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4454 total_unicast_packets_received);
4455 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4456 total_multicast_packets_received);
4457 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4458 total_broadcast_packets_received);
4459 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4460 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4461 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4462
4463 qstats->total_bytes_transmitted_hi =
ca00392c 4464 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4465 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4466 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4467
4468 ADD_64(qstats->total_bytes_transmitted_hi,
4469 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4470 qstats->total_bytes_transmitted_lo,
4471 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4472
4473 ADD_64(qstats->total_bytes_transmitted_hi,
4474 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4475 qstats->total_bytes_transmitted_lo,
4476 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4477
de832a55
EG
4478 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4479 total_unicast_packets_transmitted);
4480 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4481 total_multicast_packets_transmitted);
4482 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4483 total_broadcast_packets_transmitted);
4484
4485 old_tclient->checksum_discard = tclient->checksum_discard;
4486 old_tclient->ttl0_discard = tclient->ttl0_discard;
4487
4488 ADD_64(fstats->total_bytes_received_hi,
4489 qstats->total_bytes_received_hi,
4490 fstats->total_bytes_received_lo,
4491 qstats->total_bytes_received_lo);
4492 ADD_64(fstats->total_bytes_transmitted_hi,
4493 qstats->total_bytes_transmitted_hi,
4494 fstats->total_bytes_transmitted_lo,
4495 qstats->total_bytes_transmitted_lo);
4496 ADD_64(fstats->total_unicast_packets_received_hi,
4497 qstats->total_unicast_packets_received_hi,
4498 fstats->total_unicast_packets_received_lo,
4499 qstats->total_unicast_packets_received_lo);
4500 ADD_64(fstats->total_multicast_packets_received_hi,
4501 qstats->total_multicast_packets_received_hi,
4502 fstats->total_multicast_packets_received_lo,
4503 qstats->total_multicast_packets_received_lo);
4504 ADD_64(fstats->total_broadcast_packets_received_hi,
4505 qstats->total_broadcast_packets_received_hi,
4506 fstats->total_broadcast_packets_received_lo,
4507 qstats->total_broadcast_packets_received_lo);
4508 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4509 qstats->total_unicast_packets_transmitted_hi,
4510 fstats->total_unicast_packets_transmitted_lo,
4511 qstats->total_unicast_packets_transmitted_lo);
4512 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4513 qstats->total_multicast_packets_transmitted_hi,
4514 fstats->total_multicast_packets_transmitted_lo,
4515 qstats->total_multicast_packets_transmitted_lo);
4516 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4517 qstats->total_broadcast_packets_transmitted_hi,
4518 fstats->total_broadcast_packets_transmitted_lo,
4519 qstats->total_broadcast_packets_transmitted_lo);
4520 ADD_64(fstats->valid_bytes_received_hi,
4521 qstats->valid_bytes_received_hi,
4522 fstats->valid_bytes_received_lo,
4523 qstats->valid_bytes_received_lo);
4524
4525 ADD_64(estats->error_bytes_received_hi,
4526 qstats->error_bytes_received_hi,
4527 estats->error_bytes_received_lo,
4528 qstats->error_bytes_received_lo);
4529 ADD_64(estats->etherstatsoverrsizepkts_hi,
4530 qstats->etherstatsoverrsizepkts_hi,
4531 estats->etherstatsoverrsizepkts_lo,
4532 qstats->etherstatsoverrsizepkts_lo);
4533 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4534 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4535 }
4536
4537 ADD_64(fstats->total_bytes_received_hi,
4538 estats->rx_stat_ifhcinbadoctets_hi,
4539 fstats->total_bytes_received_lo,
4540 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4541
4542 memcpy(estats, &(fstats->total_bytes_received_hi),
4543 sizeof(struct host_func_stats) - 2*sizeof(u32));
4544
de832a55
EG
4545 ADD_64(estats->etherstatsoverrsizepkts_hi,
4546 estats->rx_stat_dot3statsframestoolong_hi,
4547 estats->etherstatsoverrsizepkts_lo,
4548 estats->rx_stat_dot3statsframestoolong_lo);
4549 ADD_64(estats->error_bytes_received_hi,
4550 estats->rx_stat_ifhcinbadoctets_hi,
4551 estats->error_bytes_received_lo,
4552 estats->rx_stat_ifhcinbadoctets_lo);
4553
4554 if (bp->port.pmf) {
4555 estats->mac_filter_discard =
4556 le32_to_cpu(tport->mac_filter_discard);
4557 estats->xxoverflow_discard =
4558 le32_to_cpu(tport->xxoverflow_discard);
4559 estats->brb_truncate_discard =
bb2a0f7a 4560 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4561 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4562 }
bb2a0f7a
YG
4563
4564 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4565
de832a55
EG
4566 bp->stats_pending = 0;
4567
a2fbb9ea
ET
4568 return 0;
4569}
4570
bb2a0f7a 4571static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4572{
bb2a0f7a 4573 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4574 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4575 int i;
a2fbb9ea
ET
4576
4577 nstats->rx_packets =
4578 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4579 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4580 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4581
4582 nstats->tx_packets =
4583 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4584 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4585 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4586
de832a55 4587 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4588
0e39e645 4589 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4590
de832a55 4591 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4592 for_each_queue(bp, i)
de832a55
EG
4593 nstats->rx_dropped +=
4594 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4595
a2fbb9ea
ET
4596 nstats->tx_dropped = 0;
4597
4598 nstats->multicast =
de832a55 4599 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4600
bb2a0f7a 4601 nstats->collisions =
de832a55 4602 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4603
4604 nstats->rx_length_errors =
de832a55
EG
4605 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4606 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4607 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4608 bnx2x_hilo(&estats->brb_truncate_hi);
4609 nstats->rx_crc_errors =
4610 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4611 nstats->rx_frame_errors =
4612 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4613 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4614 nstats->rx_missed_errors = estats->xxoverflow_discard;
4615
4616 nstats->rx_errors = nstats->rx_length_errors +
4617 nstats->rx_over_errors +
4618 nstats->rx_crc_errors +
4619 nstats->rx_frame_errors +
0e39e645
ET
4620 nstats->rx_fifo_errors +
4621 nstats->rx_missed_errors;
a2fbb9ea 4622
bb2a0f7a 4623 nstats->tx_aborted_errors =
de832a55
EG
4624 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4625 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4626 nstats->tx_carrier_errors =
4627 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4628 nstats->tx_fifo_errors = 0;
4629 nstats->tx_heartbeat_errors = 0;
4630 nstats->tx_window_errors = 0;
4631
4632 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4633 nstats->tx_carrier_errors +
4634 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4635}
4636
4637static void bnx2x_drv_stats_update(struct bnx2x *bp)
4638{
4639 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4640 int i;
4641
4642 estats->driver_xoff = 0;
4643 estats->rx_err_discard_pkt = 0;
4644 estats->rx_skb_alloc_failed = 0;
4645 estats->hw_csum_err = 0;
54b9ddaa 4646 for_each_queue(bp, i) {
de832a55
EG
4647 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4648
4649 estats->driver_xoff += qstats->driver_xoff;
4650 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4651 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4652 estats->hw_csum_err += qstats->hw_csum_err;
4653 }
a2fbb9ea
ET
4654}
4655
bb2a0f7a 4656static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4657{
bb2a0f7a 4658 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4659
bb2a0f7a
YG
4660 if (*stats_comp != DMAE_COMP_VAL)
4661 return;
4662
4663 if (bp->port.pmf)
de832a55 4664 bnx2x_hw_stats_update(bp);
a2fbb9ea 4665
de832a55
EG
4666 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4667 BNX2X_ERR("storm stats were not updated for 3 times\n");
4668 bnx2x_panic();
4669 return;
a2fbb9ea
ET
4670 }
4671
de832a55
EG
4672 bnx2x_net_stats_update(bp);
4673 bnx2x_drv_stats_update(bp);
4674
7995c64e 4675 if (netif_msg_timer(bp)) {
ca00392c 4676 struct bnx2x_fastpath *fp0_rx = bp->fp;
54b9ddaa 4677 struct bnx2x_fastpath *fp0_tx = bp->fp;
de832a55
EG
4678 struct tstorm_per_client_stats *old_tclient =
4679 &bp->fp->old_tclient;
4680 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4681 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4682 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4683 int i;
a2fbb9ea 4684
7995c64e 4685 netdev_printk(KERN_DEBUG, bp->dev, "\n");
a2fbb9ea
ET
4686 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4687 " tx pkt (%lx)\n",
ca00392c
EG
4688 bnx2x_tx_avail(fp0_tx),
4689 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4690 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4691 " rx pkt (%lx)\n",
ca00392c
EG
4692 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4693 fp0_rx->rx_comp_cons),
4694 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4695 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4696 "brb truncate %u\n",
4697 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4698 qstats->driver_xoff,
4699 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4700 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4701 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4702 "mac_discard %u mac_filter_discard %u "
4703 "xxovrflow_discard %u brb_truncate_discard %u "
4704 "ttl0_discard %u\n",
4781bfad 4705 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4706 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4707 bnx2x_hilo(&qstats->no_buff_discard_hi),
4708 estats->mac_discard, estats->mac_filter_discard,
4709 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4710 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4711
4712 for_each_queue(bp, i) {
4713 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4714 bnx2x_fp(bp, i, tx_pkt),
4715 bnx2x_fp(bp, i, rx_pkt),
4716 bnx2x_fp(bp, i, rx_calls));
4717 }
4718 }
4719
bb2a0f7a
YG
4720 bnx2x_hw_stats_post(bp);
4721 bnx2x_storm_stats_post(bp);
4722}
a2fbb9ea 4723
bb2a0f7a
YG
4724static void bnx2x_port_stats_stop(struct bnx2x *bp)
4725{
4726 struct dmae_command *dmae;
4727 u32 opcode;
4728 int loader_idx = PMF_DMAE_C(bp);
4729 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4730
bb2a0f7a 4731 bp->executer_idx = 0;
a2fbb9ea 4732
bb2a0f7a
YG
4733 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4734 DMAE_CMD_C_ENABLE |
4735 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4736#ifdef __BIG_ENDIAN
bb2a0f7a 4737 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4738#else
bb2a0f7a 4739 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4740#endif
bb2a0f7a
YG
4741 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4742 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4743
4744 if (bp->port.port_stx) {
4745
4746 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4747 if (bp->func_stx)
4748 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4749 else
4750 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4751 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4752 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4753 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4754 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4755 dmae->len = sizeof(struct host_port_stats) >> 2;
4756 if (bp->func_stx) {
4757 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4758 dmae->comp_addr_hi = 0;
4759 dmae->comp_val = 1;
4760 } else {
4761 dmae->comp_addr_lo =
4762 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4763 dmae->comp_addr_hi =
4764 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4765 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4766
bb2a0f7a
YG
4767 *stats_comp = 0;
4768 }
a2fbb9ea
ET
4769 }
4770
bb2a0f7a
YG
4771 if (bp->func_stx) {
4772
4773 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4774 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4775 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4776 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4777 dmae->dst_addr_lo = bp->func_stx >> 2;
4778 dmae->dst_addr_hi = 0;
4779 dmae->len = sizeof(struct host_func_stats) >> 2;
4780 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4781 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4782 dmae->comp_val = DMAE_COMP_VAL;
4783
4784 *stats_comp = 0;
a2fbb9ea 4785 }
bb2a0f7a
YG
4786}
4787
4788static void bnx2x_stats_stop(struct bnx2x *bp)
4789{
4790 int update = 0;
4791
4792 bnx2x_stats_comp(bp);
4793
4794 if (bp->port.pmf)
4795 update = (bnx2x_hw_stats_update(bp) == 0);
4796
4797 update |= (bnx2x_storm_stats_update(bp) == 0);
4798
4799 if (update) {
4800 bnx2x_net_stats_update(bp);
a2fbb9ea 4801
bb2a0f7a
YG
4802 if (bp->port.pmf)
4803 bnx2x_port_stats_stop(bp);
4804
4805 bnx2x_hw_stats_post(bp);
4806 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4807 }
4808}
4809
bb2a0f7a
YG
4810static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4811{
4812}
4813
4814static const struct {
4815 void (*action)(struct bnx2x *bp);
4816 enum bnx2x_stats_state next_state;
4817} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4818/* state event */
4819{
4820/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4821/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4822/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4823/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4824},
4825{
4826/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4827/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4828/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4829/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4830}
4831};
4832
4833static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4834{
4835 enum bnx2x_stats_state state = bp->stats_state;
4836
cdaa7cb8
VZ
4837 if (unlikely(bp->panic))
4838 return;
4839
bb2a0f7a
YG
4840 bnx2x_stats_stm[state][event].action(bp);
4841 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4842
8924665a
EG
4843 /* Make sure the state has been "changed" */
4844 smp_wmb();
4845
7995c64e 4846 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
4847 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4848 state, event, bp->stats_state);
4849}
4850
6fe49bb9
EG
4851static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4852{
4853 struct dmae_command *dmae;
4854 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4855
4856 /* sanity */
4857 if (!bp->port.pmf || !bp->port.port_stx) {
4858 BNX2X_ERR("BUG!\n");
4859 return;
4860 }
4861
4862 bp->executer_idx = 0;
4863
4864 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4865 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4866 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4867 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4868#ifdef __BIG_ENDIAN
4869 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4870#else
4871 DMAE_CMD_ENDIANITY_DW_SWAP |
4872#endif
4873 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4874 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4875 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4876 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4877 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4878 dmae->dst_addr_hi = 0;
4879 dmae->len = sizeof(struct host_port_stats) >> 2;
4880 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4881 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4882 dmae->comp_val = DMAE_COMP_VAL;
4883
4884 *stats_comp = 0;
4885 bnx2x_hw_stats_post(bp);
4886 bnx2x_stats_comp(bp);
4887}
4888
4889static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4890{
4891 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4892 int port = BP_PORT(bp);
4893 int func;
4894 u32 func_stx;
4895
4896 /* sanity */
4897 if (!bp->port.pmf || !bp->func_stx) {
4898 BNX2X_ERR("BUG!\n");
4899 return;
4900 }
4901
4902 /* save our func_stx */
4903 func_stx = bp->func_stx;
4904
4905 for (vn = VN_0; vn < vn_max; vn++) {
4906 func = 2*vn + port;
4907
4908 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4909 bnx2x_func_stats_init(bp);
4910 bnx2x_hw_stats_post(bp);
4911 bnx2x_stats_comp(bp);
4912 }
4913
4914 /* restore our func_stx */
4915 bp->func_stx = func_stx;
4916}
4917
4918static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4919{
4920 struct dmae_command *dmae = &bp->stats_dmae;
4921 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4922
4923 /* sanity */
4924 if (!bp->func_stx) {
4925 BNX2X_ERR("BUG!\n");
4926 return;
4927 }
4928
4929 bp->executer_idx = 0;
4930 memset(dmae, 0, sizeof(struct dmae_command));
4931
4932 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4933 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4934 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4935#ifdef __BIG_ENDIAN
4936 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4937#else
4938 DMAE_CMD_ENDIANITY_DW_SWAP |
4939#endif
4940 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4941 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4942 dmae->src_addr_lo = bp->func_stx >> 2;
4943 dmae->src_addr_hi = 0;
4944 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4945 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4946 dmae->len = sizeof(struct host_func_stats) >> 2;
4947 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4948 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4949 dmae->comp_val = DMAE_COMP_VAL;
4950
4951 *stats_comp = 0;
4952 bnx2x_hw_stats_post(bp);
4953 bnx2x_stats_comp(bp);
4954}
4955
4956static void bnx2x_stats_init(struct bnx2x *bp)
4957{
4958 int port = BP_PORT(bp);
4959 int func = BP_FUNC(bp);
4960 int i;
4961
4962 bp->stats_pending = 0;
4963 bp->executer_idx = 0;
4964 bp->stats_counter = 0;
4965
4966 /* port and func stats for management */
4967 if (!BP_NOMCP(bp)) {
4968 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4969 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4970
4971 } else {
4972 bp->port.port_stx = 0;
4973 bp->func_stx = 0;
4974 }
4975 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4976 bp->port.port_stx, bp->func_stx);
4977
4978 /* port stats */
4979 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4980 bp->port.old_nig_stats.brb_discard =
4981 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4982 bp->port.old_nig_stats.brb_truncate =
4983 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4984 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4985 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4986 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4987 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4988
4989 /* function stats */
4990 for_each_queue(bp, i) {
4991 struct bnx2x_fastpath *fp = &bp->fp[i];
4992
4993 memset(&fp->old_tclient, 0,
4994 sizeof(struct tstorm_per_client_stats));
4995 memset(&fp->old_uclient, 0,
4996 sizeof(struct ustorm_per_client_stats));
4997 memset(&fp->old_xclient, 0,
4998 sizeof(struct xstorm_per_client_stats));
4999 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5000 }
5001
5002 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5003 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5004
5005 bp->stats_state = STATS_STATE_DISABLED;
5006
5007 if (bp->port.pmf) {
5008 if (bp->port.port_stx)
5009 bnx2x_port_stats_base_init(bp);
5010
5011 if (bp->func_stx)
5012 bnx2x_func_stats_base_init(bp);
5013
5014 } else if (bp->func_stx)
5015 bnx2x_func_stats_base_update(bp);
5016}
5017
a2fbb9ea
ET
5018static void bnx2x_timer(unsigned long data)
5019{
5020 struct bnx2x *bp = (struct bnx2x *) data;
5021
5022 if (!netif_running(bp->dev))
5023 return;
5024
5025 if (atomic_read(&bp->intr_sem) != 0)
f1410647 5026 goto timer_restart;
a2fbb9ea
ET
5027
5028 if (poll) {
5029 struct bnx2x_fastpath *fp = &bp->fp[0];
5030 int rc;
5031
7961f791 5032 bnx2x_tx_int(fp);
a2fbb9ea
ET
5033 rc = bnx2x_rx_int(fp, 1000);
5034 }
5035
34f80b04
EG
5036 if (!BP_NOMCP(bp)) {
5037 int func = BP_FUNC(bp);
a2fbb9ea
ET
5038 u32 drv_pulse;
5039 u32 mcp_pulse;
5040
5041 ++bp->fw_drv_pulse_wr_seq;
5042 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5043 /* TBD - add SYSTEM_TIME */
5044 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 5045 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 5046
34f80b04 5047 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
5048 MCP_PULSE_SEQ_MASK);
5049 /* The delta between driver pulse and mcp response
5050 * should be 1 (before mcp response) or 0 (after mcp response)
5051 */
5052 if ((drv_pulse != mcp_pulse) &&
5053 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5054 /* someone lost a heartbeat... */
5055 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5056 drv_pulse, mcp_pulse);
5057 }
5058 }
5059
f34d28ea 5060 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 5061 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 5062
f1410647 5063timer_restart:
a2fbb9ea
ET
5064 mod_timer(&bp->timer, jiffies + bp->current_interval);
5065}
5066
5067/* end of Statistics */
5068
5069/* nic init */
5070
5071/*
5072 * nic init service functions
5073 */
5074
34f80b04 5075static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 5076{
34f80b04
EG
5077 int port = BP_PORT(bp);
5078
ca00392c
EG
5079 /* "CSTORM" */
5080 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5081 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5082 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5083 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5084 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5085 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
5086}
5087
5c862848
EG
5088static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5089 dma_addr_t mapping, int sb_id)
34f80b04
EG
5090{
5091 int port = BP_PORT(bp);
bb2a0f7a 5092 int func = BP_FUNC(bp);
a2fbb9ea 5093 int index;
34f80b04 5094 u64 section;
a2fbb9ea
ET
5095
5096 /* USTORM */
5097 section = ((u64)mapping) + offsetof(struct host_status_block,
5098 u_status_block);
34f80b04 5099 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 5100
ca00392c
EG
5101 REG_WR(bp, BAR_CSTRORM_INTMEM +
5102 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5103 REG_WR(bp, BAR_CSTRORM_INTMEM +
5104 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5105 U64_HI(section));
ca00392c
EG
5106 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5107 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5108
5109 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
5110 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5111 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
5112
5113 /* CSTORM */
5114 section = ((u64)mapping) + offsetof(struct host_status_block,
5115 c_status_block);
34f80b04 5116 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5117
5118 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5119 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 5120 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5121 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5122 U64_HI(section));
7a9b2557 5123 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 5124 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5125
5126 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5127 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5128 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
5129
5130 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5131}
5132
5133static void bnx2x_zero_def_sb(struct bnx2x *bp)
5134{
5135 int func = BP_FUNC(bp);
a2fbb9ea 5136
ca00392c 5137 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
5138 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5139 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
5140 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5141 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5142 sizeof(struct cstorm_def_status_block_u)/4);
5143 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5144 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5145 sizeof(struct cstorm_def_status_block_c)/4);
5146 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
5147 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5148 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
5149}
5150
5151static void bnx2x_init_def_sb(struct bnx2x *bp,
5152 struct host_def_status_block *def_sb,
34f80b04 5153 dma_addr_t mapping, int sb_id)
a2fbb9ea 5154{
34f80b04
EG
5155 int port = BP_PORT(bp);
5156 int func = BP_FUNC(bp);
a2fbb9ea
ET
5157 int index, val, reg_offset;
5158 u64 section;
5159
5160 /* ATTN */
5161 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5162 atten_status_block);
34f80b04 5163 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 5164
49d66772
ET
5165 bp->attn_state = 0;
5166
a2fbb9ea
ET
5167 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5168 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5169
34f80b04 5170 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
5171 bp->attn_group[index].sig[0] = REG_RD(bp,
5172 reg_offset + 0x10*index);
5173 bp->attn_group[index].sig[1] = REG_RD(bp,
5174 reg_offset + 0x4 + 0x10*index);
5175 bp->attn_group[index].sig[2] = REG_RD(bp,
5176 reg_offset + 0x8 + 0x10*index);
5177 bp->attn_group[index].sig[3] = REG_RD(bp,
5178 reg_offset + 0xc + 0x10*index);
5179 }
5180
a2fbb9ea
ET
5181 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5182 HC_REG_ATTN_MSG0_ADDR_L);
5183
5184 REG_WR(bp, reg_offset, U64_LO(section));
5185 REG_WR(bp, reg_offset + 4, U64_HI(section));
5186
5187 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5188
5189 val = REG_RD(bp, reg_offset);
34f80b04 5190 val |= sb_id;
a2fbb9ea
ET
5191 REG_WR(bp, reg_offset, val);
5192
5193 /* USTORM */
5194 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5195 u_def_status_block);
34f80b04 5196 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 5197
ca00392c
EG
5198 REG_WR(bp, BAR_CSTRORM_INTMEM +
5199 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5200 REG_WR(bp, BAR_CSTRORM_INTMEM +
5201 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 5202 U64_HI(section));
ca00392c
EG
5203 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5204 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
5205
5206 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
5207 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5208 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
5209
5210 /* CSTORM */
5211 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5212 c_def_status_block);
34f80b04 5213 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5214
5215 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5216 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 5217 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5218 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 5219 U64_HI(section));
5c862848 5220 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 5221 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
5222
5223 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5224 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5225 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
5226
5227 /* TSTORM */
5228 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5229 t_def_status_block);
34f80b04 5230 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5231
5232 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5233 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5234 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5235 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5236 U64_HI(section));
5c862848 5237 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 5238 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5239
5240 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5241 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 5242 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
5243
5244 /* XSTORM */
5245 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5246 x_def_status_block);
34f80b04 5247 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5248
5249 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5250 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5251 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5252 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5253 U64_HI(section));
5c862848 5254 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 5255 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5256
5257 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5258 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 5259 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 5260
bb2a0f7a 5261 bp->stats_pending = 0;
66e855f3 5262 bp->set_mac_pending = 0;
bb2a0f7a 5263
34f80b04 5264 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5265}
5266
5267static void bnx2x_update_coalesce(struct bnx2x *bp)
5268{
34f80b04 5269 int port = BP_PORT(bp);
a2fbb9ea
ET
5270 int i;
5271
5272 for_each_queue(bp, i) {
34f80b04 5273 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
5274
5275 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
5276 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5277 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5278 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5279 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
5280 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5281 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5282 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5283 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5284
5285 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5286 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5287 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5288 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5289 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 5290 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5291 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5292 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5293 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5294 }
5295}
5296
7a9b2557
VZ
5297static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5298 struct bnx2x_fastpath *fp, int last)
5299{
5300 int i;
5301
5302 for (i = 0; i < last; i++) {
5303 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5304 struct sk_buff *skb = rx_buf->skb;
5305
5306 if (skb == NULL) {
5307 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5308 continue;
5309 }
5310
5311 if (fp->tpa_state[i] == BNX2X_TPA_START)
1a983142
FT
5312 dma_unmap_single(&bp->pdev->dev,
5313 dma_unmap_addr(rx_buf, mapping),
5314 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557
VZ
5315
5316 dev_kfree_skb(skb);
5317 rx_buf->skb = NULL;
5318 }
5319}
5320
a2fbb9ea
ET
5321static void bnx2x_init_rx_rings(struct bnx2x *bp)
5322{
7a9b2557 5323 int func = BP_FUNC(bp);
32626230
EG
5324 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5325 ETH_MAX_AGGREGATION_QUEUES_E1H;
5326 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 5327 int i, j;
a2fbb9ea 5328
87942b46 5329 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
5330 DP(NETIF_MSG_IFUP,
5331 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 5332
7a9b2557 5333 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 5334
54b9ddaa 5335 for_each_queue(bp, j) {
32626230 5336 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 5337
32626230 5338 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
5339 fp->tpa_pool[i].skb =
5340 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5341 if (!fp->tpa_pool[i].skb) {
5342 BNX2X_ERR("Failed to allocate TPA "
5343 "skb pool for queue[%d] - "
5344 "disabling TPA on this "
5345 "queue!\n", j);
5346 bnx2x_free_tpa_pool(bp, fp, i);
5347 fp->disable_tpa = 1;
5348 break;
5349 }
1a983142 5350 dma_unmap_addr_set((struct sw_rx_bd *)
7a9b2557
VZ
5351 &bp->fp->tpa_pool[i],
5352 mapping, 0);
5353 fp->tpa_state[i] = BNX2X_TPA_STOP;
5354 }
5355 }
5356 }
5357
54b9ddaa 5358 for_each_queue(bp, j) {
a2fbb9ea
ET
5359 struct bnx2x_fastpath *fp = &bp->fp[j];
5360
5361 fp->rx_bd_cons = 0;
5362 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5363 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5364
5365 /* "next page" elements initialization */
5366 /* SGE ring */
5367 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5368 struct eth_rx_sge *sge;
5369
5370 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5371 sge->addr_hi =
5372 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5373 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5374 sge->addr_lo =
5375 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5376 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5377 }
5378
5379 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5380
7a9b2557 5381 /* RX BD ring */
a2fbb9ea
ET
5382 for (i = 1; i <= NUM_RX_RINGS; i++) {
5383 struct eth_rx_bd *rx_bd;
5384
5385 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5386 rx_bd->addr_hi =
5387 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5388 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5389 rx_bd->addr_lo =
5390 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5391 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5392 }
5393
34f80b04 5394 /* CQ ring */
a2fbb9ea
ET
5395 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5396 struct eth_rx_cqe_next_page *nextpg;
5397
5398 nextpg = (struct eth_rx_cqe_next_page *)
5399 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5400 nextpg->addr_hi =
5401 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5402 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5403 nextpg->addr_lo =
5404 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5405 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5406 }
5407
7a9b2557
VZ
5408 /* Allocate SGEs and initialize the ring elements */
5409 for (i = 0, ring_prod = 0;
5410 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5411
7a9b2557
VZ
5412 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5413 BNX2X_ERR("was only able to allocate "
5414 "%d rx sges\n", i);
5415 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5416 /* Cleanup already allocated elements */
5417 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5418 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5419 fp->disable_tpa = 1;
5420 ring_prod = 0;
5421 break;
5422 }
5423 ring_prod = NEXT_SGE_IDX(ring_prod);
5424 }
5425 fp->rx_sge_prod = ring_prod;
5426
5427 /* Allocate BDs and initialize BD ring */
66e855f3 5428 fp->rx_comp_cons = 0;
7a9b2557 5429 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5430 for (i = 0; i < bp->rx_ring_size; i++) {
5431 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5432 BNX2X_ERR("was only able to allocate "
de832a55
EG
5433 "%d rx skbs on queue[%d]\n", i, j);
5434 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5435 break;
5436 }
5437 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5438 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5439 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5440 }
5441
7a9b2557
VZ
5442 fp->rx_bd_prod = ring_prod;
5443 /* must not have more available CQEs than BDs */
cdaa7cb8
VZ
5444 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5445 cqe_ring_prod);
a2fbb9ea
ET
5446 fp->rx_pkt = fp->rx_calls = 0;
5447
7a9b2557
VZ
5448 /* Warning!
5449 * this will generate an interrupt (to the TSTORM)
5450 * must only be done after chip is initialized
5451 */
5452 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5453 fp->rx_sge_prod);
a2fbb9ea
ET
5454 if (j != 0)
5455 continue;
5456
5457 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5458 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5459 U64_LO(fp->rx_comp_mapping));
5460 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5461 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5462 U64_HI(fp->rx_comp_mapping));
5463 }
5464}
5465
5466static void bnx2x_init_tx_ring(struct bnx2x *bp)
5467{
5468 int i, j;
5469
54b9ddaa 5470 for_each_queue(bp, j) {
a2fbb9ea
ET
5471 struct bnx2x_fastpath *fp = &bp->fp[j];
5472
5473 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5474 struct eth_tx_next_bd *tx_next_bd =
5475 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5476
ca00392c 5477 tx_next_bd->addr_hi =
a2fbb9ea 5478 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5479 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5480 tx_next_bd->addr_lo =
a2fbb9ea 5481 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5482 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5483 }
5484
ca00392c
EG
5485 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5486 fp->tx_db.data.zero_fill1 = 0;
5487 fp->tx_db.data.prod = 0;
5488
a2fbb9ea
ET
5489 fp->tx_pkt_prod = 0;
5490 fp->tx_pkt_cons = 0;
5491 fp->tx_bd_prod = 0;
5492 fp->tx_bd_cons = 0;
5493 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5494 fp->tx_pkt = 0;
5495 }
5496}
5497
5498static void bnx2x_init_sp_ring(struct bnx2x *bp)
5499{
34f80b04 5500 int func = BP_FUNC(bp);
a2fbb9ea
ET
5501
5502 spin_lock_init(&bp->spq_lock);
5503
5504 bp->spq_left = MAX_SPQ_PENDING;
5505 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5506 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5507 bp->spq_prod_bd = bp->spq;
5508 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5509
34f80b04 5510 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5511 U64_LO(bp->spq_mapping));
34f80b04
EG
5512 REG_WR(bp,
5513 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5514 U64_HI(bp->spq_mapping));
5515
34f80b04 5516 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5517 bp->spq_prod_idx);
5518}
5519
5520static void bnx2x_init_context(struct bnx2x *bp)
5521{
5522 int i;
5523
54b9ddaa
VZ
5524 /* Rx */
5525 for_each_queue(bp, i) {
a2fbb9ea
ET
5526 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5527 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5528 u8 cl_id = fp->cl_id;
a2fbb9ea 5529
34f80b04
EG
5530 context->ustorm_st_context.common.sb_index_numbers =
5531 BNX2X_RX_SB_INDEX_NUM;
0626b899 5532 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5533 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5534 context->ustorm_st_context.common.flags =
de832a55
EG
5535 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5536 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5537 context->ustorm_st_context.common.statistics_counter_id =
5538 cl_id;
8d9c5f34 5539 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5540 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5541 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5542 bp->rx_buf_size;
34f80b04 5543 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5544 U64_HI(fp->rx_desc_mapping);
34f80b04 5545 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5546 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5547 if (!fp->disable_tpa) {
5548 context->ustorm_st_context.common.flags |=
ca00392c 5549 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5550 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
5551 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5552 0xffff);
7a9b2557
VZ
5553 context->ustorm_st_context.common.sge_page_base_hi =
5554 U64_HI(fp->rx_sge_mapping);
5555 context->ustorm_st_context.common.sge_page_base_lo =
5556 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5557
5558 context->ustorm_st_context.common.max_sges_for_packet =
5559 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5560 context->ustorm_st_context.common.max_sges_for_packet =
5561 ((context->ustorm_st_context.common.
5562 max_sges_for_packet + PAGES_PER_SGE - 1) &
5563 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5564 }
5565
8d9c5f34
EG
5566 context->ustorm_ag_context.cdu_usage =
5567 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5568 CDU_REGION_NUMBER_UCM_AG,
5569 ETH_CONNECTION_TYPE);
5570
ca00392c
EG
5571 context->xstorm_ag_context.cdu_reserved =
5572 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5573 CDU_REGION_NUMBER_XCM_AG,
5574 ETH_CONNECTION_TYPE);
5575 }
5576
54b9ddaa
VZ
5577 /* Tx */
5578 for_each_queue(bp, i) {
ca00392c
EG
5579 struct bnx2x_fastpath *fp = &bp->fp[i];
5580 struct eth_context *context =
54b9ddaa 5581 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5582
5583 context->cstorm_st_context.sb_index_number =
5584 C_SB_ETH_TX_CQ_INDEX;
5585 context->cstorm_st_context.status_block_id = fp->sb_id;
5586
8d9c5f34
EG
5587 context->xstorm_st_context.tx_bd_page_base_hi =
5588 U64_HI(fp->tx_desc_mapping);
5589 context->xstorm_st_context.tx_bd_page_base_lo =
5590 U64_LO(fp->tx_desc_mapping);
ca00392c 5591 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5592 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5593 }
5594}
5595
5596static void bnx2x_init_ind_table(struct bnx2x *bp)
5597{
26c8fa4d 5598 int func = BP_FUNC(bp);
a2fbb9ea
ET
5599 int i;
5600
555f6c78 5601 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5602 return;
5603
555f6c78
EG
5604 DP(NETIF_MSG_IFUP,
5605 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5606 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5607 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5608 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5609 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5610}
5611
49d66772
ET
5612static void bnx2x_set_client_config(struct bnx2x *bp)
5613{
49d66772 5614 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5615 int port = BP_PORT(bp);
5616 int i;
49d66772 5617
e7799c5f 5618 tstorm_client.mtu = bp->dev->mtu;
49d66772 5619 tstorm_client.config_flags =
de832a55
EG
5620 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5621 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5622#ifdef BCM_VLAN
0c6671b0 5623 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5624 tstorm_client.config_flags |=
8d9c5f34 5625 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5626 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5627 }
5628#endif
49d66772
ET
5629
5630 for_each_queue(bp, i) {
de832a55
EG
5631 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5632
49d66772 5633 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5634 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5635 ((u32 *)&tstorm_client)[0]);
5636 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5637 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5638 ((u32 *)&tstorm_client)[1]);
5639 }
5640
34f80b04
EG
5641 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5642 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5643}
5644
a2fbb9ea
ET
5645static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5646{
a2fbb9ea 5647 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5648 int mode = bp->rx_mode;
37b091ba 5649 int mask = bp->rx_mode_cl_mask;
34f80b04 5650 int func = BP_FUNC(bp);
581ce43d 5651 int port = BP_PORT(bp);
a2fbb9ea 5652 int i;
581ce43d
EG
5653 /* All but management unicast packets should pass to the host as well */
5654 u32 llh_mask =
5655 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5656 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5657 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5658 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5659
3196a88a 5660 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5661
5662 switch (mode) {
5663 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5664 tstorm_mac_filter.ucast_drop_all = mask;
5665 tstorm_mac_filter.mcast_drop_all = mask;
5666 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5667 break;
356e2385 5668
a2fbb9ea 5669 case BNX2X_RX_MODE_NORMAL:
34f80b04 5670 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5671 break;
356e2385 5672
a2fbb9ea 5673 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5674 tstorm_mac_filter.mcast_accept_all = mask;
5675 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5676 break;
356e2385 5677
a2fbb9ea 5678 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5679 tstorm_mac_filter.ucast_accept_all = mask;
5680 tstorm_mac_filter.mcast_accept_all = mask;
5681 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5682 /* pass management unicast packets as well */
5683 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5684 break;
356e2385 5685
a2fbb9ea 5686 default:
34f80b04
EG
5687 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5688 break;
a2fbb9ea
ET
5689 }
5690
581ce43d
EG
5691 REG_WR(bp,
5692 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5693 llh_mask);
5694
a2fbb9ea
ET
5695 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5696 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5697 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5698 ((u32 *)&tstorm_mac_filter)[i]);
5699
34f80b04 5700/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5701 ((u32 *)&tstorm_mac_filter)[i]); */
5702 }
a2fbb9ea 5703
49d66772
ET
5704 if (mode != BNX2X_RX_MODE_NONE)
5705 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5706}
5707
471de716
EG
5708static void bnx2x_init_internal_common(struct bnx2x *bp)
5709{
5710 int i;
5711
5712 /* Zero this manually as its initialization is
5713 currently missing in the initTool */
5714 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5715 REG_WR(bp, BAR_USTRORM_INTMEM +
5716 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5717}
5718
5719static void bnx2x_init_internal_port(struct bnx2x *bp)
5720{
5721 int port = BP_PORT(bp);
5722
ca00392c
EG
5723 REG_WR(bp,
5724 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5725 REG_WR(bp,
5726 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5727 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5728 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5729}
5730
5731static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5732{
a2fbb9ea
ET
5733 struct tstorm_eth_function_common_config tstorm_config = {0};
5734 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5735 int port = BP_PORT(bp);
5736 int func = BP_FUNC(bp);
de832a55
EG
5737 int i, j;
5738 u32 offset;
471de716 5739 u16 max_agg_size;
a2fbb9ea
ET
5740
5741 if (is_multi(bp)) {
555f6c78 5742 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5743 tstorm_config.rss_result_mask = MULTI_MASK;
5744 }
ca00392c
EG
5745
5746 /* Enable TPA if needed */
5747 if (bp->flags & TPA_ENABLE_FLAG)
5748 tstorm_config.config_flags |=
5749 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5750
8d9c5f34
EG
5751 if (IS_E1HMF(bp))
5752 tstorm_config.config_flags |=
5753 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5754
34f80b04
EG
5755 tstorm_config.leading_client_id = BP_L_ID(bp);
5756
a2fbb9ea 5757 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5758 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5759 (*(u32 *)&tstorm_config));
5760
c14423fe 5761 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5762 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5763 bnx2x_set_storm_rx_mode(bp);
5764
de832a55
EG
5765 for_each_queue(bp, i) {
5766 u8 cl_id = bp->fp[i].cl_id;
5767
5768 /* reset xstorm per client statistics */
5769 offset = BAR_XSTRORM_INTMEM +
5770 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5771 for (j = 0;
5772 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5773 REG_WR(bp, offset + j*4, 0);
5774
5775 /* reset tstorm per client statistics */
5776 offset = BAR_TSTRORM_INTMEM +
5777 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5778 for (j = 0;
5779 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5780 REG_WR(bp, offset + j*4, 0);
5781
5782 /* reset ustorm per client statistics */
5783 offset = BAR_USTRORM_INTMEM +
5784 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5785 for (j = 0;
5786 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5787 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5788 }
5789
5790 /* Init statistics related context */
34f80b04 5791 stats_flags.collect_eth = 1;
a2fbb9ea 5792
66e855f3 5793 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5794 ((u32 *)&stats_flags)[0]);
66e855f3 5795 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5796 ((u32 *)&stats_flags)[1]);
5797
66e855f3 5798 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5799 ((u32 *)&stats_flags)[0]);
66e855f3 5800 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5801 ((u32 *)&stats_flags)[1]);
5802
de832a55
EG
5803 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5804 ((u32 *)&stats_flags)[0]);
5805 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5806 ((u32 *)&stats_flags)[1]);
5807
66e855f3 5808 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5809 ((u32 *)&stats_flags)[0]);
66e855f3 5810 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5811 ((u32 *)&stats_flags)[1]);
5812
66e855f3
YG
5813 REG_WR(bp, BAR_XSTRORM_INTMEM +
5814 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5815 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5816 REG_WR(bp, BAR_XSTRORM_INTMEM +
5817 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5818 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5819
5820 REG_WR(bp, BAR_TSTRORM_INTMEM +
5821 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5822 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5823 REG_WR(bp, BAR_TSTRORM_INTMEM +
5824 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5825 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5826
de832a55
EG
5827 REG_WR(bp, BAR_USTRORM_INTMEM +
5828 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5829 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5830 REG_WR(bp, BAR_USTRORM_INTMEM +
5831 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5832 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5833
34f80b04
EG
5834 if (CHIP_IS_E1H(bp)) {
5835 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5836 IS_E1HMF(bp));
5837 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5838 IS_E1HMF(bp));
5839 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5840 IS_E1HMF(bp));
5841 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5842 IS_E1HMF(bp));
5843
7a9b2557
VZ
5844 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5845 bp->e1hov);
34f80b04
EG
5846 }
5847
4f40f2cb 5848 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
5849 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5850 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 5851 for_each_queue(bp, i) {
7a9b2557 5852 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5853
5854 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5855 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5856 U64_LO(fp->rx_comp_mapping));
5857 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5858 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5859 U64_HI(fp->rx_comp_mapping));
5860
ca00392c
EG
5861 /* Next page */
5862 REG_WR(bp, BAR_USTRORM_INTMEM +
5863 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5864 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5865 REG_WR(bp, BAR_USTRORM_INTMEM +
5866 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5867 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5868
7a9b2557 5869 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5870 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5871 max_agg_size);
5872 }
8a1c38d1 5873
1c06328c
EG
5874 /* dropless flow control */
5875 if (CHIP_IS_E1H(bp)) {
5876 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5877
5878 rx_pause.bd_thr_low = 250;
5879 rx_pause.cqe_thr_low = 250;
5880 rx_pause.cos = 1;
5881 rx_pause.sge_thr_low = 0;
5882 rx_pause.bd_thr_high = 350;
5883 rx_pause.cqe_thr_high = 350;
5884 rx_pause.sge_thr_high = 0;
5885
54b9ddaa 5886 for_each_queue(bp, i) {
1c06328c
EG
5887 struct bnx2x_fastpath *fp = &bp->fp[i];
5888
5889 if (!fp->disable_tpa) {
5890 rx_pause.sge_thr_low = 150;
5891 rx_pause.sge_thr_high = 250;
5892 }
5893
5894
5895 offset = BAR_USTRORM_INTMEM +
5896 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5897 fp->cl_id);
5898 for (j = 0;
5899 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5900 j++)
5901 REG_WR(bp, offset + j*4,
5902 ((u32 *)&rx_pause)[j]);
5903 }
5904 }
5905
8a1c38d1
EG
5906 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5907
5908 /* Init rate shaping and fairness contexts */
5909 if (IS_E1HMF(bp)) {
5910 int vn;
5911
5912 /* During init there is no active link
5913 Until link is up, set link rate to 10Gbps */
5914 bp->link_vars.line_speed = SPEED_10000;
5915 bnx2x_init_port_minmax(bp);
5916
b015e3d1
EG
5917 if (!BP_NOMCP(bp))
5918 bp->mf_config =
5919 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5920 bnx2x_calc_vn_weight_sum(bp);
5921
5922 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5923 bnx2x_init_vn_minmax(bp, 2*vn + port);
5924
5925 /* Enable rate shaping and fairness */
b015e3d1 5926 bp->cmng.flags.cmng_enables |=
8a1c38d1 5927 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5928
8a1c38d1
EG
5929 } else {
5930 /* rate shaping and fairness are disabled */
5931 DP(NETIF_MSG_IFUP,
5932 "single function mode minmax will be disabled\n");
5933 }
5934
5935
cdaa7cb8 5936 /* Store cmng structures to internal memory */
8a1c38d1
EG
5937 if (bp->port.pmf)
5938 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5939 REG_WR(bp, BAR_XSTRORM_INTMEM +
5940 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5941 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5942}
5943
471de716
EG
5944static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5945{
5946 switch (load_code) {
5947 case FW_MSG_CODE_DRV_LOAD_COMMON:
5948 bnx2x_init_internal_common(bp);
5949 /* no break */
5950
5951 case FW_MSG_CODE_DRV_LOAD_PORT:
5952 bnx2x_init_internal_port(bp);
5953 /* no break */
5954
5955 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5956 bnx2x_init_internal_func(bp);
5957 break;
5958
5959 default:
5960 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5961 break;
5962 }
5963}
5964
5965static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5966{
5967 int i;
5968
5969 for_each_queue(bp, i) {
5970 struct bnx2x_fastpath *fp = &bp->fp[i];
5971
34f80b04 5972 fp->bp = bp;
a2fbb9ea 5973 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5974 fp->index = i;
34f80b04 5975 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5976#ifdef BCM_CNIC
5977 fp->sb_id = fp->cl_id + 1;
5978#else
34f80b04 5979 fp->sb_id = fp->cl_id;
37b091ba 5980#endif
34f80b04 5981 DP(NETIF_MSG_IFUP,
f5372251
EG
5982 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5983 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5984 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5985 fp->sb_id);
5c862848 5986 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5987 }
5988
16119785
EG
5989 /* ensure status block indices were read */
5990 rmb();
5991
5992
5c862848
EG
5993 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5994 DEF_SB_ID);
5995 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5996 bnx2x_update_coalesce(bp);
5997 bnx2x_init_rx_rings(bp);
5998 bnx2x_init_tx_ring(bp);
5999 bnx2x_init_sp_ring(bp);
6000 bnx2x_init_context(bp);
471de716 6001 bnx2x_init_internal(bp, load_code);
a2fbb9ea 6002 bnx2x_init_ind_table(bp);
0ef00459
EG
6003 bnx2x_stats_init(bp);
6004
6005 /* At this point, we are ready for interrupts */
6006 atomic_set(&bp->intr_sem, 0);
6007
6008 /* flush all before enabling interrupts */
6009 mb();
6010 mmiowb();
6011
615f8fd9 6012 bnx2x_int_enable(bp);
eb8da205
EG
6013
6014 /* Check for SPIO5 */
6015 bnx2x_attn_int_deasserted0(bp,
6016 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6017 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
6018}
6019
6020/* end of nic init */
6021
6022/*
6023 * gzip service functions
6024 */
6025
6026static int bnx2x_gunzip_init(struct bnx2x *bp)
6027{
1a983142
FT
6028 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6029 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
6030 if (bp->gunzip_buf == NULL)
6031 goto gunzip_nomem1;
6032
6033 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6034 if (bp->strm == NULL)
6035 goto gunzip_nomem2;
6036
6037 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6038 GFP_KERNEL);
6039 if (bp->strm->workspace == NULL)
6040 goto gunzip_nomem3;
6041
6042 return 0;
6043
6044gunzip_nomem3:
6045 kfree(bp->strm);
6046 bp->strm = NULL;
6047
6048gunzip_nomem2:
1a983142
FT
6049 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6050 bp->gunzip_mapping);
a2fbb9ea
ET
6051 bp->gunzip_buf = NULL;
6052
6053gunzip_nomem1:
cdaa7cb8
VZ
6054 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6055 " un-compression\n");
a2fbb9ea
ET
6056 return -ENOMEM;
6057}
6058
6059static void bnx2x_gunzip_end(struct bnx2x *bp)
6060{
6061 kfree(bp->strm->workspace);
6062
6063 kfree(bp->strm);
6064 bp->strm = NULL;
6065
6066 if (bp->gunzip_buf) {
1a983142
FT
6067 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6068 bp->gunzip_mapping);
a2fbb9ea
ET
6069 bp->gunzip_buf = NULL;
6070 }
6071}
6072
94a78b79 6073static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
6074{
6075 int n, rc;
6076
6077 /* check gzip header */
94a78b79
VZ
6078 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6079 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 6080 return -EINVAL;
94a78b79 6081 }
a2fbb9ea
ET
6082
6083 n = 10;
6084
34f80b04 6085#define FNAME 0x8
a2fbb9ea
ET
6086
6087 if (zbuf[3] & FNAME)
6088 while ((zbuf[n++] != 0) && (n < len));
6089
94a78b79 6090 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
6091 bp->strm->avail_in = len - n;
6092 bp->strm->next_out = bp->gunzip_buf;
6093 bp->strm->avail_out = FW_BUF_SIZE;
6094
6095 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6096 if (rc != Z_OK)
6097 return rc;
6098
6099 rc = zlib_inflate(bp->strm, Z_FINISH);
6100 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
6101 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6102 bp->strm->msg);
a2fbb9ea
ET
6103
6104 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6105 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
6106 netdev_err(bp->dev, "Firmware decompression error:"
6107 " gunzip_outlen (%d) not aligned\n",
6108 bp->gunzip_outlen);
a2fbb9ea
ET
6109 bp->gunzip_outlen >>= 2;
6110
6111 zlib_inflateEnd(bp->strm);
6112
6113 if (rc == Z_STREAM_END)
6114 return 0;
6115
6116 return rc;
6117}
6118
6119/* nic load/unload */
6120
6121/*
34f80b04 6122 * General service functions
a2fbb9ea
ET
6123 */
6124
6125/* send a NIG loopback debug packet */
6126static void bnx2x_lb_pckt(struct bnx2x *bp)
6127{
a2fbb9ea 6128 u32 wb_write[3];
a2fbb9ea
ET
6129
6130 /* Ethernet source and destination addresses */
a2fbb9ea
ET
6131 wb_write[0] = 0x55555555;
6132 wb_write[1] = 0x55555555;
34f80b04 6133 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 6134 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6135
6136 /* NON-IP protocol */
a2fbb9ea
ET
6137 wb_write[0] = 0x09000000;
6138 wb_write[1] = 0x55555555;
34f80b04 6139 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 6140 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6141}
6142
6143/* some of the internal memories
6144 * are not directly readable from the driver
6145 * to test them we send debug packets
6146 */
6147static int bnx2x_int_mem_test(struct bnx2x *bp)
6148{
6149 int factor;
6150 int count, i;
6151 u32 val = 0;
6152
ad8d3948 6153 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 6154 factor = 120;
ad8d3948
EG
6155 else if (CHIP_REV_IS_EMUL(bp))
6156 factor = 200;
6157 else
a2fbb9ea 6158 factor = 1;
a2fbb9ea
ET
6159
6160 DP(NETIF_MSG_HW, "start part1\n");
6161
6162 /* Disable inputs of parser neighbor blocks */
6163 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6164 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6165 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6166 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6167
6168 /* Write 0 to parser credits for CFC search request */
6169 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6170
6171 /* send Ethernet packet */
6172 bnx2x_lb_pckt(bp);
6173
6174 /* TODO do i reset NIG statistic? */
6175 /* Wait until NIG register shows 1 packet of size 0x10 */
6176 count = 1000 * factor;
6177 while (count) {
34f80b04 6178
a2fbb9ea
ET
6179 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6180 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6181 if (val == 0x10)
6182 break;
6183
6184 msleep(10);
6185 count--;
6186 }
6187 if (val != 0x10) {
6188 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6189 return -1;
6190 }
6191
6192 /* Wait until PRS register shows 1 packet */
6193 count = 1000 * factor;
6194 while (count) {
6195 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
6196 if (val == 1)
6197 break;
6198
6199 msleep(10);
6200 count--;
6201 }
6202 if (val != 0x1) {
6203 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6204 return -2;
6205 }
6206
6207 /* Reset and init BRB, PRS */
34f80b04 6208 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 6209 msleep(50);
34f80b04 6210 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 6211 msleep(50);
94a78b79
VZ
6212 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6213 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
6214
6215 DP(NETIF_MSG_HW, "part2\n");
6216
6217 /* Disable inputs of parser neighbor blocks */
6218 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6219 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6220 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6221 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6222
6223 /* Write 0 to parser credits for CFC search request */
6224 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6225
6226 /* send 10 Ethernet packets */
6227 for (i = 0; i < 10; i++)
6228 bnx2x_lb_pckt(bp);
6229
6230 /* Wait until NIG register shows 10 + 1
6231 packets of size 11*0x10 = 0xb0 */
6232 count = 1000 * factor;
6233 while (count) {
34f80b04 6234
a2fbb9ea
ET
6235 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6236 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6237 if (val == 0xb0)
6238 break;
6239
6240 msleep(10);
6241 count--;
6242 }
6243 if (val != 0xb0) {
6244 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6245 return -3;
6246 }
6247
6248 /* Wait until PRS register shows 2 packets */
6249 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6250 if (val != 2)
6251 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6252
6253 /* Write 1 to parser credits for CFC search request */
6254 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6255
6256 /* Wait until PRS register shows 3 packets */
6257 msleep(10 * factor);
6258 /* Wait until NIG register shows 1 packet of size 0x10 */
6259 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6260 if (val != 3)
6261 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6262
6263 /* clear NIG EOP FIFO */
6264 for (i = 0; i < 11; i++)
6265 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6266 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6267 if (val != 1) {
6268 BNX2X_ERR("clear of NIG failed\n");
6269 return -4;
6270 }
6271
6272 /* Reset and init BRB, PRS, NIG */
6273 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6274 msleep(50);
6275 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6276 msleep(50);
94a78b79
VZ
6277 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6278 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 6279#ifndef BCM_CNIC
a2fbb9ea
ET
6280 /* set NIC mode */
6281 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6282#endif
6283
6284 /* Enable inputs of parser neighbor blocks */
6285 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6286 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6287 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 6288 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
6289
6290 DP(NETIF_MSG_HW, "done\n");
6291
6292 return 0; /* OK */
6293}
6294
6295static void enable_blocks_attention(struct bnx2x *bp)
6296{
6297 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6298 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6299 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6300 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6301 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6302 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6303 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6304 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6305 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
6306/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6307/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6308 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6309 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6310 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
6311/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6312/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6313 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6314 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6315 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6316 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
6317/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6318/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6319 if (CHIP_REV_IS_FPGA(bp))
6320 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6321 else
6322 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
6323 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6324 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6325 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
6326/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6327/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6328 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6329 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
6330/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6331 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
6332}
6333
72fd0718
VZ
6334static const struct {
6335 u32 addr;
6336 u32 mask;
6337} bnx2x_parity_mask[] = {
6338 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6339 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6340 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6341 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6342 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6343 {QM_REG_QM_PRTY_MASK, 0x0},
6344 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6345 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6346 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6347 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6348 {CDU_REG_CDU_PRTY_MASK, 0x0},
6349 {CFC_REG_CFC_PRTY_MASK, 0x0},
6350 {DBG_REG_DBG_PRTY_MASK, 0x0},
6351 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6352 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6353 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6354 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6355 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6356 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6357 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6358 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6359 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6360 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6361 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6362 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6363 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6364 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6365 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6366};
6367
6368static void enable_blocks_parity(struct bnx2x *bp)
6369{
6370 int i, mask_arr_len =
6371 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6372
6373 for (i = 0; i < mask_arr_len; i++)
6374 REG_WR(bp, bnx2x_parity_mask[i].addr,
6375 bnx2x_parity_mask[i].mask);
6376}
6377
34f80b04 6378
81f75bbf
EG
6379static void bnx2x_reset_common(struct bnx2x *bp)
6380{
6381 /* reset_common */
6382 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6383 0xd3ffff7f);
6384 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6385}
6386
573f2035
EG
6387static void bnx2x_init_pxp(struct bnx2x *bp)
6388{
6389 u16 devctl;
6390 int r_order, w_order;
6391
6392 pci_read_config_word(bp->pdev,
6393 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6394 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6395 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6396 if (bp->mrrs == -1)
6397 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6398 else {
6399 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6400 r_order = bp->mrrs;
6401 }
6402
6403 bnx2x_init_pxp_arb(bp, r_order, w_order);
6404}
fd4ef40d
EG
6405
6406static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6407{
2145a920 6408 int is_required;
fd4ef40d 6409 u32 val;
2145a920 6410 int port;
fd4ef40d 6411
2145a920
VZ
6412 if (BP_NOMCP(bp))
6413 return;
6414
6415 is_required = 0;
fd4ef40d
EG
6416 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6417 SHARED_HW_CFG_FAN_FAILURE_MASK;
6418
6419 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6420 is_required = 1;
6421
6422 /*
6423 * The fan failure mechanism is usually related to the PHY type since
6424 * the power consumption of the board is affected by the PHY. Currently,
6425 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6426 */
6427 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6428 for (port = PORT_0; port < PORT_MAX; port++) {
6429 u32 phy_type =
6430 SHMEM_RD(bp, dev_info.port_hw_config[port].
6431 external_phy_config) &
6432 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6433 is_required |=
6434 ((phy_type ==
6435 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6436 (phy_type ==
6437 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6438 (phy_type ==
6439 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6440 }
6441
6442 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6443
6444 if (is_required == 0)
6445 return;
6446
6447 /* Fan failure is indicated by SPIO 5 */
6448 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6449 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6450
6451 /* set to active low mode */
6452 val = REG_RD(bp, MISC_REG_SPIO_INT);
6453 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 6454 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
6455 REG_WR(bp, MISC_REG_SPIO_INT, val);
6456
6457 /* enable interrupt to signal the IGU */
6458 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6459 val |= (1 << MISC_REGISTERS_SPIO_5);
6460 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6461}
6462
34f80b04 6463static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6464{
a2fbb9ea 6465 u32 val, i;
37b091ba
MC
6466#ifdef BCM_CNIC
6467 u32 wb_write[2];
6468#endif
a2fbb9ea 6469
34f80b04 6470 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6471
81f75bbf 6472 bnx2x_reset_common(bp);
34f80b04
EG
6473 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6474 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6475
94a78b79 6476 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6477 if (CHIP_IS_E1H(bp))
6478 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6479
34f80b04
EG
6480 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6481 msleep(30);
6482 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6483
94a78b79 6484 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6485 if (CHIP_IS_E1(bp)) {
6486 /* enable HW interrupt from PXP on USDM overflow
6487 bit 16 on INT_MASK_0 */
6488 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6489 }
a2fbb9ea 6490
94a78b79 6491 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6492 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6493
6494#ifdef __BIG_ENDIAN
34f80b04
EG
6495 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6496 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6497 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6498 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6499 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6500 /* make sure this value is 0 */
6501 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6502
6503/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6504 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6505 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6506 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6507 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6508#endif
6509
34f80b04 6510 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6511#ifdef BCM_CNIC
34f80b04
EG
6512 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6513 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6514 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6515#endif
6516
34f80b04
EG
6517 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6518 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6519
34f80b04
EG
6520 /* let the HW do it's magic ... */
6521 msleep(100);
6522 /* finish PXP init */
6523 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6524 if (val != 1) {
6525 BNX2X_ERR("PXP2 CFG failed\n");
6526 return -EBUSY;
6527 }
6528 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6529 if (val != 1) {
6530 BNX2X_ERR("PXP2 RD_INIT failed\n");
6531 return -EBUSY;
6532 }
a2fbb9ea 6533
34f80b04
EG
6534 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6535 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6536
94a78b79 6537 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6538
34f80b04
EG
6539 /* clean the DMAE memory */
6540 bp->dmae_ready = 1;
6541 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6542
94a78b79
VZ
6543 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6544 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6545 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6546 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6547
34f80b04
EG
6548 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6549 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6550 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6551 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6552
94a78b79 6553 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6554
6555#ifdef BCM_CNIC
6556 wb_write[0] = 0;
6557 wb_write[1] = 0;
6558 for (i = 0; i < 64; i++) {
6559 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6560 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6561
6562 if (CHIP_IS_E1H(bp)) {
6563 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6564 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6565 wb_write, 2);
6566 }
6567 }
6568#endif
34f80b04
EG
6569 /* soft reset pulse */
6570 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6571 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6572
37b091ba 6573#ifdef BCM_CNIC
94a78b79 6574 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6575#endif
a2fbb9ea 6576
94a78b79 6577 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6578 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6579 if (!CHIP_REV_IS_SLOW(bp)) {
6580 /* enable hw interrupt from doorbell Q */
6581 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6582 }
a2fbb9ea 6583
94a78b79
VZ
6584 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6585 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6586 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6587#ifndef BCM_CNIC
3196a88a
EG
6588 /* set NIC mode */
6589 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6590#endif
34f80b04
EG
6591 if (CHIP_IS_E1H(bp))
6592 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6593
94a78b79
VZ
6594 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6595 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6596 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6597 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6598
ca00392c
EG
6599 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6600 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6601 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6602 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6603
94a78b79
VZ
6604 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6605 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6606 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6607 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6608
34f80b04
EG
6609 /* sync semi rtc */
6610 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6611 0x80000000);
6612 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6613 0x80000000);
a2fbb9ea 6614
94a78b79
VZ
6615 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6616 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6617 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6618
34f80b04
EG
6619 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6620 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6621 REG_WR(bp, i, 0xc0cac01a);
6622 /* TODO: replace with something meaningful */
6623 }
94a78b79 6624 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6625#ifdef BCM_CNIC
6626 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6627 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6628 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6629 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6630 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6631 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6632 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6633 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6634 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6635 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6636#endif
34f80b04 6637 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6638
34f80b04
EG
6639 if (sizeof(union cdu_context) != 1024)
6640 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
6641 dev_alert(&bp->pdev->dev, "please adjust the size "
6642 "of cdu_context(%ld)\n",
7995c64e 6643 (long)sizeof(union cdu_context));
a2fbb9ea 6644
94a78b79 6645 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6646 val = (4 << 24) + (0 << 12) + 1024;
6647 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6648
94a78b79 6649 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6650 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6651 /* enable context validation interrupt from CFC */
6652 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6653
6654 /* set the thresholds to prevent CFC/CDU race */
6655 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6656
94a78b79
VZ
6657 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6658 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6659
94a78b79 6660 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6661 /* Reset PCIE errors for debug */
6662 REG_WR(bp, 0x2814, 0xffffffff);
6663 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6664
94a78b79 6665 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6666 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6667 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6668 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6669
94a78b79 6670 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6671 if (CHIP_IS_E1H(bp)) {
6672 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6673 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6674 }
6675
6676 if (CHIP_REV_IS_SLOW(bp))
6677 msleep(200);
6678
6679 /* finish CFC init */
6680 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6681 if (val != 1) {
6682 BNX2X_ERR("CFC LL_INIT failed\n");
6683 return -EBUSY;
6684 }
6685 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6686 if (val != 1) {
6687 BNX2X_ERR("CFC AC_INIT failed\n");
6688 return -EBUSY;
6689 }
6690 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6691 if (val != 1) {
6692 BNX2X_ERR("CFC CAM_INIT failed\n");
6693 return -EBUSY;
6694 }
6695 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6696
34f80b04
EG
6697 /* read NIG statistic
6698 to see if this is our first up since powerup */
6699 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6700 val = *bnx2x_sp(bp, wb_data[0]);
6701
6702 /* do internal memory self test */
6703 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6704 BNX2X_ERR("internal mem self test failed\n");
6705 return -EBUSY;
6706 }
6707
35b19ba5 6708 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6709 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6710 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6711 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6712 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6713 bp->port.need_hw_lock = 1;
6714 break;
6715
34f80b04
EG
6716 default:
6717 break;
6718 }
f1410647 6719
fd4ef40d
EG
6720 bnx2x_setup_fan_failure_detection(bp);
6721
34f80b04
EG
6722 /* clear PXP2 attentions */
6723 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6724
34f80b04 6725 enable_blocks_attention(bp);
72fd0718
VZ
6726 if (CHIP_PARITY_SUPPORTED(bp))
6727 enable_blocks_parity(bp);
a2fbb9ea 6728
6bbca910
YR
6729 if (!BP_NOMCP(bp)) {
6730 bnx2x_acquire_phy_lock(bp);
6731 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6732 bnx2x_release_phy_lock(bp);
6733 } else
6734 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6735
34f80b04
EG
6736 return 0;
6737}
a2fbb9ea 6738
34f80b04
EG
6739static int bnx2x_init_port(struct bnx2x *bp)
6740{
6741 int port = BP_PORT(bp);
94a78b79 6742 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6743 u32 low, high;
34f80b04 6744 u32 val;
a2fbb9ea 6745
cdaa7cb8 6746 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
6747
6748 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6749
94a78b79 6750 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6751 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6752
6753 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6754 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6755 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6756 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6757
37b091ba
MC
6758#ifdef BCM_CNIC
6759 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6760
94a78b79 6761 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6762 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6763 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6764#endif
cdaa7cb8 6765
94a78b79 6766 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6767
94a78b79 6768 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6769 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6770 /* no pause for emulation and FPGA */
6771 low = 0;
6772 high = 513;
6773 } else {
6774 if (IS_E1HMF(bp))
6775 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6776 else if (bp->dev->mtu > 4096) {
6777 if (bp->flags & ONE_PORT_FLAG)
6778 low = 160;
6779 else {
6780 val = bp->dev->mtu;
6781 /* (24*1024 + val*4)/256 */
6782 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6783 }
6784 } else
6785 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6786 high = low + 56; /* 14*1024/256 */
6787 }
6788 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6789 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6790
6791
94a78b79 6792 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6793
94a78b79 6794 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6795 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6796 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6797 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6798
94a78b79
VZ
6799 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6800 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6801 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6802 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6803
94a78b79 6804 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6805 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6806
94a78b79 6807 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6808
6809 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6810 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6811
6812 /* update threshold */
34f80b04 6813 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6814 /* update init credit */
34f80b04 6815 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6816
6817 /* probe changes */
34f80b04 6818 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6819 msleep(5);
34f80b04 6820 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6821
37b091ba
MC
6822#ifdef BCM_CNIC
6823 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6824#endif
94a78b79 6825 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6826 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6827
6828 if (CHIP_IS_E1(bp)) {
6829 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6830 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6831 }
94a78b79 6832 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6833
94a78b79 6834 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6835 /* init aeu_mask_attn_func_0/1:
6836 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6837 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6838 * bits 4-7 are used for "per vn group attention" */
6839 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6840 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6841
94a78b79 6842 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6843 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6844 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6845 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6846 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6847
94a78b79 6848 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6849
6850 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6851
6852 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6853 /* 0x2 disable e1hov, 0x1 enable */
6854 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6855 (IS_E1HMF(bp) ? 0x1 : 0x2));
6856
1c06328c
EG
6857 {
6858 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6859 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6860 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6861 }
34f80b04
EG
6862 }
6863
94a78b79 6864 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6865 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6866
35b19ba5 6867 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6868 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6869 {
6870 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6871
6872 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6873 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6874
6875 /* The GPIO should be swapped if the swap register is
6876 set and active */
6877 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6878 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6879
6880 /* Select function upon port-swap configuration */
6881 if (port == 0) {
6882 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6883 aeu_gpio_mask = (swap_val && swap_override) ?
6884 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6885 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6886 } else {
6887 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6888 aeu_gpio_mask = (swap_val && swap_override) ?
6889 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6890 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6891 }
6892 val = REG_RD(bp, offset);
6893 /* add GPIO3 to group */
6894 val |= aeu_gpio_mask;
6895 REG_WR(bp, offset, val);
6896 }
6897 break;
6898
35b19ba5 6899 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6900 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6901 /* add SPIO 5 to group 0 */
4d295db0
EG
6902 {
6903 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6904 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6905 val = REG_RD(bp, reg_addr);
f1410647 6906 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6907 REG_WR(bp, reg_addr, val);
6908 }
f1410647
ET
6909 break;
6910
6911 default:
6912 break;
6913 }
6914
c18487ee 6915 bnx2x__link_reset(bp);
a2fbb9ea 6916
34f80b04
EG
6917 return 0;
6918}
6919
6920#define ILT_PER_FUNC (768/2)
6921#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6922/* the phys address is shifted right 12 bits and has an added
6923 1=valid bit added to the 53rd bit
6924 then since this is a wide register(TM)
6925 we split it into two 32 bit writes
6926 */
6927#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6928#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6929#define PXP_ONE_ILT(x) (((x) << 10) | x)
6930#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6931
37b091ba
MC
6932#ifdef BCM_CNIC
6933#define CNIC_ILT_LINES 127
6934#define CNIC_CTX_PER_ILT 16
6935#else
34f80b04 6936#define CNIC_ILT_LINES 0
37b091ba 6937#endif
34f80b04
EG
6938
6939static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6940{
6941 int reg;
6942
6943 if (CHIP_IS_E1H(bp))
6944 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6945 else /* E1 */
6946 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6947
6948 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6949}
6950
6951static int bnx2x_init_func(struct bnx2x *bp)
6952{
6953 int port = BP_PORT(bp);
6954 int func = BP_FUNC(bp);
8badd27a 6955 u32 addr, val;
34f80b04
EG
6956 int i;
6957
cdaa7cb8 6958 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 6959
8badd27a
EG
6960 /* set MSI reconfigure capability */
6961 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6962 val = REG_RD(bp, addr);
6963 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6964 REG_WR(bp, addr, val);
6965
34f80b04
EG
6966 i = FUNC_ILT_BASE(func);
6967
6968 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6969 if (CHIP_IS_E1H(bp)) {
6970 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6971 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6972 } else /* E1 */
6973 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6974 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6975
37b091ba
MC
6976#ifdef BCM_CNIC
6977 i += 1 + CNIC_ILT_LINES;
6978 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6979 if (CHIP_IS_E1(bp))
6980 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6981 else {
6982 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6983 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6984 }
6985
6986 i++;
6987 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6988 if (CHIP_IS_E1(bp))
6989 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6990 else {
6991 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6992 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6993 }
6994
6995 i++;
6996 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6997 if (CHIP_IS_E1(bp))
6998 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6999 else {
7000 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7001 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7002 }
7003
7004 /* tell the searcher where the T2 table is */
7005 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7006
7007 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7008 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7009
7010 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7011 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7012 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7013
7014 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7015#endif
34f80b04
EG
7016
7017 if (CHIP_IS_E1H(bp)) {
573f2035
EG
7018 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7019 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7020 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7021 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7022 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7023 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7024 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7025 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7026 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
7027
7028 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7029 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7030 }
7031
7032 /* HC init per function */
7033 if (CHIP_IS_E1H(bp)) {
7034 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7035
7036 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7037 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7038 }
94a78b79 7039 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 7040
c14423fe 7041 /* Reset PCIE errors for debug */
a2fbb9ea
ET
7042 REG_WR(bp, 0x2114, 0xffffffff);
7043 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 7044
34f80b04
EG
7045 return 0;
7046}
7047
7048static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7049{
7050 int i, rc = 0;
a2fbb9ea 7051
34f80b04
EG
7052 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7053 BP_FUNC(bp), load_code);
a2fbb9ea 7054
34f80b04
EG
7055 bp->dmae_ready = 0;
7056 mutex_init(&bp->dmae_mutex);
54016b26
EG
7057 rc = bnx2x_gunzip_init(bp);
7058 if (rc)
7059 return rc;
a2fbb9ea 7060
34f80b04
EG
7061 switch (load_code) {
7062 case FW_MSG_CODE_DRV_LOAD_COMMON:
7063 rc = bnx2x_init_common(bp);
7064 if (rc)
7065 goto init_hw_err;
7066 /* no break */
7067
7068 case FW_MSG_CODE_DRV_LOAD_PORT:
7069 bp->dmae_ready = 1;
7070 rc = bnx2x_init_port(bp);
7071 if (rc)
7072 goto init_hw_err;
7073 /* no break */
7074
7075 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7076 bp->dmae_ready = 1;
7077 rc = bnx2x_init_func(bp);
7078 if (rc)
7079 goto init_hw_err;
7080 break;
7081
7082 default:
7083 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7084 break;
7085 }
7086
7087 if (!BP_NOMCP(bp)) {
7088 int func = BP_FUNC(bp);
a2fbb9ea
ET
7089
7090 bp->fw_drv_pulse_wr_seq =
34f80b04 7091 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 7092 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
7093 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7094 }
a2fbb9ea 7095
34f80b04
EG
7096 /* this needs to be done before gunzip end */
7097 bnx2x_zero_def_sb(bp);
7098 for_each_queue(bp, i)
7099 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
7100#ifdef BCM_CNIC
7101 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7102#endif
34f80b04
EG
7103
7104init_hw_err:
7105 bnx2x_gunzip_end(bp);
7106
7107 return rc;
a2fbb9ea
ET
7108}
7109
a2fbb9ea
ET
7110static void bnx2x_free_mem(struct bnx2x *bp)
7111{
7112
7113#define BNX2X_PCI_FREE(x, y, size) \
7114 do { \
7115 if (x) { \
1a983142 7116 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
7117 x = NULL; \
7118 y = 0; \
7119 } \
7120 } while (0)
7121
7122#define BNX2X_FREE(x) \
7123 do { \
7124 if (x) { \
7125 vfree(x); \
7126 x = NULL; \
7127 } \
7128 } while (0)
7129
7130 int i;
7131
7132 /* fastpath */
555f6c78 7133 /* Common */
a2fbb9ea
ET
7134 for_each_queue(bp, i) {
7135
555f6c78 7136 /* status blocks */
a2fbb9ea
ET
7137 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7138 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7139 sizeof(struct host_status_block));
555f6c78
EG
7140 }
7141 /* Rx */
54b9ddaa 7142 for_each_queue(bp, i) {
a2fbb9ea 7143
555f6c78 7144 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7145 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7146 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7147 bnx2x_fp(bp, i, rx_desc_mapping),
7148 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7149
7150 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7151 bnx2x_fp(bp, i, rx_comp_mapping),
7152 sizeof(struct eth_fast_path_rx_cqe) *
7153 NUM_RCQ_BD);
a2fbb9ea 7154
7a9b2557 7155 /* SGE ring */
32626230 7156 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
7157 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7158 bnx2x_fp(bp, i, rx_sge_mapping),
7159 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7160 }
555f6c78 7161 /* Tx */
54b9ddaa 7162 for_each_queue(bp, i) {
555f6c78
EG
7163
7164 /* fastpath tx rings: tx_buf tx_desc */
7165 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7166 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7167 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7168 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7169 }
a2fbb9ea
ET
7170 /* end of fastpath */
7171
7172 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 7173 sizeof(struct host_def_status_block));
a2fbb9ea
ET
7174
7175 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 7176 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7177
37b091ba 7178#ifdef BCM_CNIC
a2fbb9ea
ET
7179 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7180 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7181 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7182 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
7183 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7184 sizeof(struct host_status_block));
a2fbb9ea 7185#endif
7a9b2557 7186 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
7187
7188#undef BNX2X_PCI_FREE
7189#undef BNX2X_KFREE
7190}
7191
7192static int bnx2x_alloc_mem(struct bnx2x *bp)
7193{
7194
7195#define BNX2X_PCI_ALLOC(x, y, size) \
7196 do { \
1a983142 7197 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
a2fbb9ea
ET
7198 if (x == NULL) \
7199 goto alloc_mem_err; \
7200 memset(x, 0, size); \
7201 } while (0)
7202
7203#define BNX2X_ALLOC(x, size) \
7204 do { \
7205 x = vmalloc(size); \
7206 if (x == NULL) \
7207 goto alloc_mem_err; \
7208 memset(x, 0, size); \
7209 } while (0)
7210
7211 int i;
7212
7213 /* fastpath */
555f6c78 7214 /* Common */
a2fbb9ea
ET
7215 for_each_queue(bp, i) {
7216 bnx2x_fp(bp, i, bp) = bp;
7217
555f6c78 7218 /* status blocks */
a2fbb9ea
ET
7219 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7220 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7221 sizeof(struct host_status_block));
555f6c78
EG
7222 }
7223 /* Rx */
54b9ddaa 7224 for_each_queue(bp, i) {
a2fbb9ea 7225
555f6c78 7226 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7227 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7228 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7229 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7230 &bnx2x_fp(bp, i, rx_desc_mapping),
7231 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7232
7233 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7234 &bnx2x_fp(bp, i, rx_comp_mapping),
7235 sizeof(struct eth_fast_path_rx_cqe) *
7236 NUM_RCQ_BD);
7237
7a9b2557
VZ
7238 /* SGE ring */
7239 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7240 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7241 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7242 &bnx2x_fp(bp, i, rx_sge_mapping),
7243 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 7244 }
555f6c78 7245 /* Tx */
54b9ddaa 7246 for_each_queue(bp, i) {
555f6c78 7247
555f6c78
EG
7248 /* fastpath tx rings: tx_buf tx_desc */
7249 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7250 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7251 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7252 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7253 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7254 }
a2fbb9ea
ET
7255 /* end of fastpath */
7256
7257 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7258 sizeof(struct host_def_status_block));
7259
7260 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7261 sizeof(struct bnx2x_slowpath));
7262
37b091ba 7263#ifdef BCM_CNIC
a2fbb9ea
ET
7264 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7265
a2fbb9ea
ET
7266 /* allocate searcher T2 table
7267 we allocate 1/4 of alloc num for T2
7268 (which is not entered into the ILT) */
7269 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7270
37b091ba 7271 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 7272 for (i = 0; i < 16*1024; i += 64)
37b091ba 7273 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 7274
37b091ba 7275 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
7276 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7277
7278 /* QM queues (128*MAX_CONN) */
7279 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
7280
7281 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7282 sizeof(struct host_status_block));
a2fbb9ea
ET
7283#endif
7284
7285 /* Slow path ring */
7286 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7287
7288 return 0;
7289
7290alloc_mem_err:
7291 bnx2x_free_mem(bp);
7292 return -ENOMEM;
7293
7294#undef BNX2X_PCI_ALLOC
7295#undef BNX2X_ALLOC
7296}
7297
7298static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7299{
7300 int i;
7301
54b9ddaa 7302 for_each_queue(bp, i) {
a2fbb9ea
ET
7303 struct bnx2x_fastpath *fp = &bp->fp[i];
7304
7305 u16 bd_cons = fp->tx_bd_cons;
7306 u16 sw_prod = fp->tx_pkt_prod;
7307 u16 sw_cons = fp->tx_pkt_cons;
7308
a2fbb9ea
ET
7309 while (sw_cons != sw_prod) {
7310 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7311 sw_cons++;
7312 }
7313 }
7314}
7315
7316static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7317{
7318 int i, j;
7319
54b9ddaa 7320 for_each_queue(bp, j) {
a2fbb9ea
ET
7321 struct bnx2x_fastpath *fp = &bp->fp[j];
7322
a2fbb9ea
ET
7323 for (i = 0; i < NUM_RX_BD; i++) {
7324 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7325 struct sk_buff *skb = rx_buf->skb;
7326
7327 if (skb == NULL)
7328 continue;
7329
1a983142
FT
7330 dma_unmap_single(&bp->pdev->dev,
7331 dma_unmap_addr(rx_buf, mapping),
7332 bp->rx_buf_size, DMA_FROM_DEVICE);
a2fbb9ea
ET
7333
7334 rx_buf->skb = NULL;
7335 dev_kfree_skb(skb);
7336 }
7a9b2557 7337 if (!fp->disable_tpa)
32626230
EG
7338 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7339 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 7340 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
7341 }
7342}
7343
7344static void bnx2x_free_skbs(struct bnx2x *bp)
7345{
7346 bnx2x_free_tx_skbs(bp);
7347 bnx2x_free_rx_skbs(bp);
7348}
7349
7350static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7351{
34f80b04 7352 int i, offset = 1;
a2fbb9ea
ET
7353
7354 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 7355 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
7356 bp->msix_table[0].vector);
7357
37b091ba
MC
7358#ifdef BCM_CNIC
7359 offset++;
7360#endif
a2fbb9ea 7361 for_each_queue(bp, i) {
c14423fe 7362 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 7363 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
7364 bnx2x_fp(bp, i, state));
7365
34f80b04 7366 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 7367 }
a2fbb9ea
ET
7368}
7369
6cbe5065 7370static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 7371{
a2fbb9ea 7372 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
7373 if (!disable_only)
7374 bnx2x_free_msix_irqs(bp);
a2fbb9ea 7375 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
7376 bp->flags &= ~USING_MSIX_FLAG;
7377
8badd27a 7378 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
7379 if (!disable_only)
7380 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
7381 pci_disable_msi(bp->pdev);
7382 bp->flags &= ~USING_MSI_FLAG;
7383
6cbe5065 7384 } else if (!disable_only)
a2fbb9ea
ET
7385 free_irq(bp->pdev->irq, bp->dev);
7386}
7387
7388static int bnx2x_enable_msix(struct bnx2x *bp)
7389{
8badd27a
EG
7390 int i, rc, offset = 1;
7391 int igu_vec = 0;
a2fbb9ea 7392
8badd27a
EG
7393 bp->msix_table[0].entry = igu_vec;
7394 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 7395
37b091ba
MC
7396#ifdef BCM_CNIC
7397 igu_vec = BP_L_ID(bp) + offset;
7398 bp->msix_table[1].entry = igu_vec;
7399 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7400 offset++;
7401#endif
34f80b04 7402 for_each_queue(bp, i) {
8badd27a 7403 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7404 bp->msix_table[i + offset].entry = igu_vec;
7405 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7406 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7407 }
7408
34f80b04 7409 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7410 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 7411 if (rc) {
8badd27a
EG
7412 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7413 return rc;
34f80b04 7414 }
8badd27a 7415
a2fbb9ea
ET
7416 bp->flags |= USING_MSIX_FLAG;
7417
7418 return 0;
a2fbb9ea
ET
7419}
7420
a2fbb9ea
ET
7421static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7422{
34f80b04 7423 int i, rc, offset = 1;
a2fbb9ea 7424
a2fbb9ea
ET
7425 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7426 bp->dev->name, bp->dev);
a2fbb9ea
ET
7427 if (rc) {
7428 BNX2X_ERR("request sp irq failed\n");
7429 return -EBUSY;
7430 }
7431
37b091ba
MC
7432#ifdef BCM_CNIC
7433 offset++;
7434#endif
a2fbb9ea 7435 for_each_queue(bp, i) {
555f6c78 7436 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7437 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7438 bp->dev->name, i);
ca00392c 7439
34f80b04 7440 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7441 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7442 if (rc) {
555f6c78 7443 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7444 bnx2x_free_msix_irqs(bp);
7445 return -EBUSY;
7446 }
7447
555f6c78 7448 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7449 }
7450
555f6c78 7451 i = BNX2X_NUM_QUEUES(bp);
cdaa7cb8
VZ
7452 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7453 " ... fp[%d] %d\n",
7454 bp->msix_table[0].vector,
7455 0, bp->msix_table[offset].vector,
7456 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7457
a2fbb9ea 7458 return 0;
a2fbb9ea
ET
7459}
7460
8badd27a
EG
7461static int bnx2x_enable_msi(struct bnx2x *bp)
7462{
7463 int rc;
7464
7465 rc = pci_enable_msi(bp->pdev);
7466 if (rc) {
7467 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7468 return -1;
7469 }
7470 bp->flags |= USING_MSI_FLAG;
7471
7472 return 0;
7473}
7474
a2fbb9ea
ET
7475static int bnx2x_req_irq(struct bnx2x *bp)
7476{
8badd27a 7477 unsigned long flags;
34f80b04 7478 int rc;
a2fbb9ea 7479
8badd27a
EG
7480 if (bp->flags & USING_MSI_FLAG)
7481 flags = 0;
7482 else
7483 flags = IRQF_SHARED;
7484
7485 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7486 bp->dev->name, bp->dev);
a2fbb9ea
ET
7487 if (!rc)
7488 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7489
7490 return rc;
a2fbb9ea
ET
7491}
7492
65abd74d
YG
7493static void bnx2x_napi_enable(struct bnx2x *bp)
7494{
7495 int i;
7496
54b9ddaa 7497 for_each_queue(bp, i)
65abd74d
YG
7498 napi_enable(&bnx2x_fp(bp, i, napi));
7499}
7500
7501static void bnx2x_napi_disable(struct bnx2x *bp)
7502{
7503 int i;
7504
54b9ddaa 7505 for_each_queue(bp, i)
65abd74d
YG
7506 napi_disable(&bnx2x_fp(bp, i, napi));
7507}
7508
7509static void bnx2x_netif_start(struct bnx2x *bp)
7510{
e1510706
EG
7511 int intr_sem;
7512
7513 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7514 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7515
7516 if (intr_sem) {
65abd74d 7517 if (netif_running(bp->dev)) {
65abd74d
YG
7518 bnx2x_napi_enable(bp);
7519 bnx2x_int_enable(bp);
555f6c78
EG
7520 if (bp->state == BNX2X_STATE_OPEN)
7521 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7522 }
7523 }
7524}
7525
f8ef6e44 7526static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7527{
f8ef6e44 7528 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7529 bnx2x_napi_disable(bp);
762d5f6c 7530 netif_tx_disable(bp->dev);
65abd74d
YG
7531}
7532
a2fbb9ea
ET
7533/*
7534 * Init service functions
7535 */
7536
e665bfda
MC
7537/**
7538 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7539 *
7540 * @param bp driver descriptor
7541 * @param set set or clear an entry (1 or 0)
7542 * @param mac pointer to a buffer containing a MAC
7543 * @param cl_bit_vec bit vector of clients to register a MAC for
7544 * @param cam_offset offset in a CAM to use
7545 * @param with_bcast set broadcast MAC as well
7546 */
7547static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7548 u32 cl_bit_vec, u8 cam_offset,
7549 u8 with_bcast)
a2fbb9ea
ET
7550{
7551 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7552 int port = BP_PORT(bp);
a2fbb9ea
ET
7553
7554 /* CAM allocation
7555 * unicasts 0-31:port0 32-63:port1
7556 * multicast 64-127:port0 128-191:port1
7557 */
e665bfda
MC
7558 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7559 config->hdr.offset = cam_offset;
7560 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7561 config->hdr.reserved1 = 0;
7562
7563 /* primary MAC */
7564 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7565 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7566 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7567 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7568 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7569 swab16(*(u16 *)&mac[4]);
34f80b04 7570 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7571 if (set)
7572 config->config_table[0].target_table_entry.flags = 0;
7573 else
7574 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7575 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7576 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7577 config->config_table[0].target_table_entry.vlan_id = 0;
7578
3101c2bc
YG
7579 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7580 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7581 config->config_table[0].cam_entry.msb_mac_addr,
7582 config->config_table[0].cam_entry.middle_mac_addr,
7583 config->config_table[0].cam_entry.lsb_mac_addr);
7584
7585 /* broadcast */
e665bfda
MC
7586 if (with_bcast) {
7587 config->config_table[1].cam_entry.msb_mac_addr =
7588 cpu_to_le16(0xffff);
7589 config->config_table[1].cam_entry.middle_mac_addr =
7590 cpu_to_le16(0xffff);
7591 config->config_table[1].cam_entry.lsb_mac_addr =
7592 cpu_to_le16(0xffff);
7593 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7594 if (set)
7595 config->config_table[1].target_table_entry.flags =
7596 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7597 else
7598 CAM_INVALIDATE(config->config_table[1]);
7599 config->config_table[1].target_table_entry.clients_bit_vector =
7600 cpu_to_le32(cl_bit_vec);
7601 config->config_table[1].target_table_entry.vlan_id = 0;
7602 }
a2fbb9ea
ET
7603
7604 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7605 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7606 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7607}
7608
e665bfda
MC
7609/**
7610 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7611 *
7612 * @param bp driver descriptor
7613 * @param set set or clear an entry (1 or 0)
7614 * @param mac pointer to a buffer containing a MAC
7615 * @param cl_bit_vec bit vector of clients to register a MAC for
7616 * @param cam_offset offset in a CAM to use
7617 */
7618static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7619 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7620{
7621 struct mac_configuration_cmd_e1h *config =
7622 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7623
8d9c5f34 7624 config->hdr.length = 1;
e665bfda
MC
7625 config->hdr.offset = cam_offset;
7626 config->hdr.client_id = 0xff;
34f80b04
EG
7627 config->hdr.reserved1 = 0;
7628
7629 /* primary MAC */
7630 config->config_table[0].msb_mac_addr =
e665bfda 7631 swab16(*(u16 *)&mac[0]);
34f80b04 7632 config->config_table[0].middle_mac_addr =
e665bfda 7633 swab16(*(u16 *)&mac[2]);
34f80b04 7634 config->config_table[0].lsb_mac_addr =
e665bfda 7635 swab16(*(u16 *)&mac[4]);
ca00392c 7636 config->config_table[0].clients_bit_vector =
e665bfda 7637 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7638 config->config_table[0].vlan_id = 0;
7639 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7640 if (set)
7641 config->config_table[0].flags = BP_PORT(bp);
7642 else
7643 config->config_table[0].flags =
7644 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7645
e665bfda 7646 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7647 (set ? "setting" : "clearing"),
34f80b04
EG
7648 config->config_table[0].msb_mac_addr,
7649 config->config_table[0].middle_mac_addr,
e665bfda 7650 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7651
7652 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7653 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7654 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7655}
7656
a2fbb9ea
ET
7657static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7658 int *state_p, int poll)
7659{
7660 /* can take a while if any port is running */
8b3a0f0b 7661 int cnt = 5000;
a2fbb9ea 7662
c14423fe
ET
7663 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7664 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7665
7666 might_sleep();
34f80b04 7667 while (cnt--) {
a2fbb9ea
ET
7668 if (poll) {
7669 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7670 /* if index is different from 0
7671 * the reply for some commands will
3101c2bc 7672 * be on the non default queue
a2fbb9ea
ET
7673 */
7674 if (idx)
7675 bnx2x_rx_int(&bp->fp[idx], 10);
7676 }
a2fbb9ea 7677
3101c2bc 7678 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7679 if (*state_p == state) {
7680#ifdef BNX2X_STOP_ON_ERROR
7681 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7682#endif
a2fbb9ea 7683 return 0;
8b3a0f0b 7684 }
a2fbb9ea 7685
a2fbb9ea 7686 msleep(1);
e3553b29
EG
7687
7688 if (bp->panic)
7689 return -EIO;
a2fbb9ea
ET
7690 }
7691
a2fbb9ea 7692 /* timeout! */
49d66772
ET
7693 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7694 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7695#ifdef BNX2X_STOP_ON_ERROR
7696 bnx2x_panic();
7697#endif
a2fbb9ea 7698
49d66772 7699 return -EBUSY;
a2fbb9ea
ET
7700}
7701
e665bfda
MC
7702static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7703{
7704 bp->set_mac_pending++;
7705 smp_wmb();
7706
7707 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7708 (1 << bp->fp->cl_id), BP_FUNC(bp));
7709
7710 /* Wait for a completion */
7711 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7712}
7713
7714static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7715{
7716 bp->set_mac_pending++;
7717 smp_wmb();
7718
7719 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7720 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7721 1);
7722
7723 /* Wait for a completion */
7724 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7725}
7726
993ac7b5
MC
7727#ifdef BCM_CNIC
7728/**
7729 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7730 * MAC(s). This function will wait until the ramdord completion
7731 * returns.
7732 *
7733 * @param bp driver handle
7734 * @param set set or clear the CAM entry
7735 *
7736 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7737 */
7738static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7739{
7740 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7741
7742 bp->set_mac_pending++;
7743 smp_wmb();
7744
7745 /* Send a SET_MAC ramrod */
7746 if (CHIP_IS_E1(bp))
7747 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7748 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7749 1);
7750 else
7751 /* CAM allocation for E1H
7752 * unicasts: by func number
7753 * multicast: 20+FUNC*20, 20 each
7754 */
7755 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7756 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7757
7758 /* Wait for a completion when setting */
7759 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7760
7761 return 0;
7762}
7763#endif
7764
a2fbb9ea
ET
7765static int bnx2x_setup_leading(struct bnx2x *bp)
7766{
34f80b04 7767 int rc;
a2fbb9ea 7768
c14423fe 7769 /* reset IGU state */
34f80b04 7770 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7771
7772 /* SETUP ramrod */
7773 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7774
34f80b04
EG
7775 /* Wait for completion */
7776 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7777
34f80b04 7778 return rc;
a2fbb9ea
ET
7779}
7780
7781static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7782{
555f6c78
EG
7783 struct bnx2x_fastpath *fp = &bp->fp[index];
7784
a2fbb9ea 7785 /* reset IGU state */
555f6c78 7786 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7787
228241eb 7788 /* SETUP ramrod */
555f6c78
EG
7789 fp->state = BNX2X_FP_STATE_OPENING;
7790 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7791 fp->cl_id, 0);
a2fbb9ea
ET
7792
7793 /* Wait for completion */
7794 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7795 &(fp->state), 0);
a2fbb9ea
ET
7796}
7797
a2fbb9ea 7798static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7799
54b9ddaa 7800static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7801{
ca00392c
EG
7802
7803 switch (bp->multi_mode) {
7804 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7805 bp->num_queues = 1;
ca00392c
EG
7806 break;
7807
7808 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7809 if (num_queues)
7810 bp->num_queues = min_t(u32, num_queues,
7811 BNX2X_MAX_QUEUES(bp));
ca00392c 7812 else
54b9ddaa
VZ
7813 bp->num_queues = min_t(u32, num_online_cpus(),
7814 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7815 break;
7816
7817
7818 default:
54b9ddaa 7819 bp->num_queues = 1;
ca00392c
EG
7820 break;
7821 }
ca00392c
EG
7822}
7823
54b9ddaa 7824static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7825{
ca00392c 7826 int rc = 0;
a2fbb9ea 7827
8badd27a
EG
7828 switch (int_mode) {
7829 case INT_MODE_INTx:
7830 case INT_MODE_MSI:
54b9ddaa 7831 bp->num_queues = 1;
ca00392c 7832 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7833 break;
7834
7835 case INT_MODE_MSIX:
7836 default:
54b9ddaa
VZ
7837 /* Set number of queues according to bp->multi_mode value */
7838 bnx2x_set_num_queues_msix(bp);
ca00392c 7839
54b9ddaa
VZ
7840 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7841 bp->num_queues);
ca00392c 7842
2dfe0e1f
EG
7843 /* if we can't use MSI-X we only need one fp,
7844 * so try to enable MSI-X with the requested number of fp's
7845 * and fallback to MSI or legacy INTx with one fp
7846 */
ca00392c 7847 rc = bnx2x_enable_msix(bp);
54b9ddaa 7848 if (rc)
34f80b04 7849 /* failed to enable MSI-X */
54b9ddaa 7850 bp->num_queues = 1;
8badd27a 7851 break;
a2fbb9ea 7852 }
54b9ddaa 7853 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7854 return rc;
8badd27a
EG
7855}
7856
993ac7b5
MC
7857#ifdef BCM_CNIC
7858static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7859static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7860#endif
8badd27a
EG
7861
7862/* must be called with rtnl_lock */
7863static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7864{
7865 u32 load_code;
ca00392c
EG
7866 int i, rc;
7867
8badd27a 7868#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7869 if (unlikely(bp->panic))
7870 return -EPERM;
7871#endif
7872
7873 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7874
54b9ddaa 7875 rc = bnx2x_set_num_queues(bp);
c14423fe 7876
6cbe5065
VZ
7877 if (bnx2x_alloc_mem(bp)) {
7878 bnx2x_free_irq(bp, true);
a2fbb9ea 7879 return -ENOMEM;
6cbe5065 7880 }
a2fbb9ea 7881
54b9ddaa 7882 for_each_queue(bp, i)
7a9b2557
VZ
7883 bnx2x_fp(bp, i, disable_tpa) =
7884 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7885
54b9ddaa 7886 for_each_queue(bp, i)
2dfe0e1f
EG
7887 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7888 bnx2x_poll, 128);
7889
2dfe0e1f
EG
7890 bnx2x_napi_enable(bp);
7891
34f80b04
EG
7892 if (bp->flags & USING_MSIX_FLAG) {
7893 rc = bnx2x_req_msix_irqs(bp);
7894 if (rc) {
6cbe5065 7895 bnx2x_free_irq(bp, true);
2dfe0e1f 7896 goto load_error1;
34f80b04
EG
7897 }
7898 } else {
ca00392c 7899 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7900 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7901 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7902 bnx2x_enable_msi(bp);
34f80b04
EG
7903 bnx2x_ack_int(bp);
7904 rc = bnx2x_req_irq(bp);
7905 if (rc) {
2dfe0e1f 7906 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7907 bnx2x_free_irq(bp, true);
2dfe0e1f 7908 goto load_error1;
a2fbb9ea 7909 }
8badd27a
EG
7910 if (bp->flags & USING_MSI_FLAG) {
7911 bp->dev->irq = bp->pdev->irq;
7995c64e
JP
7912 netdev_info(bp->dev, "using MSI IRQ %d\n",
7913 bp->pdev->irq);
8badd27a 7914 }
a2fbb9ea
ET
7915 }
7916
2dfe0e1f
EG
7917 /* Send LOAD_REQUEST command to MCP
7918 Returns the type of LOAD command:
7919 if it is the first port to be initialized
7920 common blocks should be initialized, otherwise - not
7921 */
7922 if (!BP_NOMCP(bp)) {
7923 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7924 if (!load_code) {
7925 BNX2X_ERR("MCP response failure, aborting\n");
7926 rc = -EBUSY;
7927 goto load_error2;
7928 }
7929 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7930 rc = -EBUSY; /* other port in diagnostic mode */
7931 goto load_error2;
7932 }
7933
7934 } else {
7935 int port = BP_PORT(bp);
7936
f5372251 7937 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7938 load_count[0], load_count[1], load_count[2]);
7939 load_count[0]++;
7940 load_count[1 + port]++;
f5372251 7941 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7942 load_count[0], load_count[1], load_count[2]);
7943 if (load_count[0] == 1)
7944 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7945 else if (load_count[1 + port] == 1)
7946 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7947 else
7948 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7949 }
7950
7951 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7952 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7953 bp->port.pmf = 1;
7954 else
7955 bp->port.pmf = 0;
7956 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7957
a2fbb9ea 7958 /* Initialize HW */
34f80b04
EG
7959 rc = bnx2x_init_hw(bp, load_code);
7960 if (rc) {
a2fbb9ea 7961 BNX2X_ERR("HW init failed, aborting\n");
f1e1a199
VZ
7962 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7963 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7964 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
2dfe0e1f 7965 goto load_error2;
a2fbb9ea
ET
7966 }
7967
a2fbb9ea 7968 /* Setup NIC internals and enable interrupts */
471de716 7969 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7970
2691d51d
EG
7971 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7972 (bp->common.shmem2_base))
7973 SHMEM2_WR(bp, dcc_support,
7974 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7975 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7976
a2fbb9ea 7977 /* Send LOAD_DONE command to MCP */
34f80b04 7978 if (!BP_NOMCP(bp)) {
228241eb
ET
7979 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7980 if (!load_code) {
da5a662a 7981 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7982 rc = -EBUSY;
2dfe0e1f 7983 goto load_error3;
a2fbb9ea
ET
7984 }
7985 }
7986
7987 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7988
34f80b04
EG
7989 rc = bnx2x_setup_leading(bp);
7990 if (rc) {
da5a662a 7991 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7992#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7993 goto load_error3;
e3553b29
EG
7994#else
7995 bp->panic = 1;
7996 return -EBUSY;
7997#endif
34f80b04 7998 }
a2fbb9ea 7999
34f80b04
EG
8000 if (CHIP_IS_E1H(bp))
8001 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 8002 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 8003 bp->flags |= MF_FUNC_DIS;
34f80b04 8004 }
a2fbb9ea 8005
ca00392c 8006 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
8007#ifdef BCM_CNIC
8008 /* Enable Timer scan */
8009 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8010#endif
34f80b04
EG
8011 for_each_nondefault_queue(bp, i) {
8012 rc = bnx2x_setup_multi(bp, i);
8013 if (rc)
37b091ba
MC
8014#ifdef BCM_CNIC
8015 goto load_error4;
8016#else
2dfe0e1f 8017 goto load_error3;
37b091ba 8018#endif
34f80b04 8019 }
a2fbb9ea 8020
ca00392c 8021 if (CHIP_IS_E1(bp))
e665bfda 8022 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 8023 else
e665bfda 8024 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
8025#ifdef BCM_CNIC
8026 /* Set iSCSI L2 MAC */
8027 mutex_lock(&bp->cnic_mutex);
8028 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8029 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8030 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
8031 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8032 CNIC_SB_ID(bp));
993ac7b5
MC
8033 }
8034 mutex_unlock(&bp->cnic_mutex);
8035#endif
ca00392c 8036 }
34f80b04
EG
8037
8038 if (bp->port.pmf)
b5bf9068 8039 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
8040
8041 /* Start fast path */
34f80b04
EG
8042 switch (load_mode) {
8043 case LOAD_NORMAL:
ca00392c
EG
8044 if (bp->state == BNX2X_STATE_OPEN) {
8045 /* Tx queue should be only reenabled */
8046 netif_tx_wake_all_queues(bp->dev);
8047 }
2dfe0e1f 8048 /* Initialize the receive filter. */
34f80b04
EG
8049 bnx2x_set_rx_mode(bp->dev);
8050 break;
8051
8052 case LOAD_OPEN:
555f6c78 8053 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
8054 if (bp->state != BNX2X_STATE_OPEN)
8055 netif_tx_disable(bp->dev);
2dfe0e1f 8056 /* Initialize the receive filter. */
34f80b04 8057 bnx2x_set_rx_mode(bp->dev);
34f80b04 8058 break;
a2fbb9ea 8059
34f80b04 8060 case LOAD_DIAG:
2dfe0e1f 8061 /* Initialize the receive filter. */
a2fbb9ea 8062 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
8063 bp->state = BNX2X_STATE_DIAG;
8064 break;
8065
8066 default:
8067 break;
a2fbb9ea
ET
8068 }
8069
34f80b04
EG
8070 if (!bp->port.pmf)
8071 bnx2x__link_status_update(bp);
8072
a2fbb9ea
ET
8073 /* start the timer */
8074 mod_timer(&bp->timer, jiffies + bp->current_interval);
8075
993ac7b5
MC
8076#ifdef BCM_CNIC
8077 bnx2x_setup_cnic_irq_info(bp);
8078 if (bp->state == BNX2X_STATE_OPEN)
8079 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8080#endif
72fd0718 8081 bnx2x_inc_load_cnt(bp);
34f80b04 8082
a2fbb9ea
ET
8083 return 0;
8084
37b091ba
MC
8085#ifdef BCM_CNIC
8086load_error4:
8087 /* Disable Timer scan */
8088 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8089#endif
2dfe0e1f
EG
8090load_error3:
8091 bnx2x_int_disable_sync(bp, 1);
8092 if (!BP_NOMCP(bp)) {
8093 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8094 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8095 }
8096 bp->port.pmf = 0;
7a9b2557
VZ
8097 /* Free SKBs, SGEs, TPA pool and driver internals */
8098 bnx2x_free_skbs(bp);
54b9ddaa 8099 for_each_queue(bp, i)
3196a88a 8100 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 8101load_error2:
d1014634 8102 /* Release IRQs */
6cbe5065 8103 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
8104load_error1:
8105 bnx2x_napi_disable(bp);
54b9ddaa 8106 for_each_queue(bp, i)
7cde1c8b 8107 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8108 bnx2x_free_mem(bp);
8109
34f80b04 8110 return rc;
a2fbb9ea
ET
8111}
8112
8113static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8114{
555f6c78 8115 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
8116 int rc;
8117
c14423fe 8118 /* halt the connection */
555f6c78
EG
8119 fp->state = BNX2X_FP_STATE_HALTING;
8120 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 8121
34f80b04 8122 /* Wait for completion */
a2fbb9ea 8123 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 8124 &(fp->state), 1);
c14423fe 8125 if (rc) /* timeout */
a2fbb9ea
ET
8126 return rc;
8127
8128 /* delete cfc entry */
8129 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8130
34f80b04
EG
8131 /* Wait for completion */
8132 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 8133 &(fp->state), 1);
34f80b04 8134 return rc;
a2fbb9ea
ET
8135}
8136
da5a662a 8137static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 8138{
4781bfad 8139 __le16 dsb_sp_prod_idx;
c14423fe 8140 /* if the other port is handling traffic,
a2fbb9ea 8141 this can take a lot of time */
34f80b04
EG
8142 int cnt = 500;
8143 int rc;
a2fbb9ea
ET
8144
8145 might_sleep();
8146
8147 /* Send HALT ramrod */
8148 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 8149 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 8150
34f80b04
EG
8151 /* Wait for completion */
8152 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8153 &(bp->fp[0].state), 1);
8154 if (rc) /* timeout */
da5a662a 8155 return rc;
a2fbb9ea 8156
49d66772 8157 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 8158
228241eb 8159 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
8160 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8161
49d66772 8162 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
8163 we are going to reset the chip anyway
8164 so there is not much to do if this times out
8165 */
34f80b04 8166 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
8167 if (!cnt) {
8168 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8169 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8170 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8171#ifdef BNX2X_STOP_ON_ERROR
8172 bnx2x_panic();
8173#endif
36e552ab 8174 rc = -EBUSY;
34f80b04
EG
8175 break;
8176 }
8177 cnt--;
da5a662a 8178 msleep(1);
5650d9d4 8179 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
8180 }
8181 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8182 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
8183
8184 return rc;
a2fbb9ea
ET
8185}
8186
34f80b04
EG
8187static void bnx2x_reset_func(struct bnx2x *bp)
8188{
8189 int port = BP_PORT(bp);
8190 int func = BP_FUNC(bp);
8191 int base, i;
8192
8193 /* Configure IGU */
8194 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8195 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8196
37b091ba
MC
8197#ifdef BCM_CNIC
8198 /* Disable Timer scan */
8199 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8200 /*
8201 * Wait for at least 10ms and up to 2 second for the timers scan to
8202 * complete
8203 */
8204 for (i = 0; i < 200; i++) {
8205 msleep(10);
8206 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8207 break;
8208 }
8209#endif
34f80b04
EG
8210 /* Clear ILT */
8211 base = FUNC_ILT_BASE(func);
8212 for (i = base; i < base + ILT_PER_FUNC; i++)
8213 bnx2x_ilt_wr(bp, i, 0);
8214}
8215
8216static void bnx2x_reset_port(struct bnx2x *bp)
8217{
8218 int port = BP_PORT(bp);
8219 u32 val;
8220
8221 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8222
8223 /* Do not rcv packets to BRB */
8224 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8225 /* Do not direct rcv packets that are not for MCP to the BRB */
8226 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8227 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8228
8229 /* Configure AEU */
8230 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8231
8232 msleep(100);
8233 /* Check for BRB port occupancy */
8234 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8235 if (val)
8236 DP(NETIF_MSG_IFDOWN,
33471629 8237 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
8238
8239 /* TODO: Close Doorbell port? */
8240}
8241
34f80b04
EG
8242static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8243{
8244 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8245 BP_FUNC(bp), reset_code);
8246
8247 switch (reset_code) {
8248 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8249 bnx2x_reset_port(bp);
8250 bnx2x_reset_func(bp);
8251 bnx2x_reset_common(bp);
8252 break;
8253
8254 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8255 bnx2x_reset_port(bp);
8256 bnx2x_reset_func(bp);
8257 break;
8258
8259 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8260 bnx2x_reset_func(bp);
8261 break;
49d66772 8262
34f80b04
EG
8263 default:
8264 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8265 break;
8266 }
8267}
8268
72fd0718 8269static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 8270{
da5a662a 8271 int port = BP_PORT(bp);
a2fbb9ea 8272 u32 reset_code = 0;
da5a662a 8273 int i, cnt, rc;
a2fbb9ea 8274
555f6c78 8275 /* Wait until tx fastpath tasks complete */
54b9ddaa 8276 for_each_queue(bp, i) {
228241eb
ET
8277 struct bnx2x_fastpath *fp = &bp->fp[i];
8278
34f80b04 8279 cnt = 1000;
e8b5fc51 8280 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 8281
7961f791 8282 bnx2x_tx_int(fp);
34f80b04
EG
8283 if (!cnt) {
8284 BNX2X_ERR("timeout waiting for queue[%d]\n",
8285 i);
8286#ifdef BNX2X_STOP_ON_ERROR
8287 bnx2x_panic();
8288 return -EBUSY;
8289#else
8290 break;
8291#endif
8292 }
8293 cnt--;
da5a662a 8294 msleep(1);
34f80b04 8295 }
228241eb 8296 }
da5a662a
VZ
8297 /* Give HW time to discard old tx messages */
8298 msleep(1);
a2fbb9ea 8299
3101c2bc
YG
8300 if (CHIP_IS_E1(bp)) {
8301 struct mac_configuration_cmd *config =
8302 bnx2x_sp(bp, mcast_config);
8303
e665bfda 8304 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 8305
8d9c5f34 8306 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
8307 CAM_INVALIDATE(config->config_table[i]);
8308
8d9c5f34 8309 config->hdr.length = i;
3101c2bc
YG
8310 if (CHIP_REV_IS_SLOW(bp))
8311 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8312 else
8313 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 8314 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
8315 config->hdr.reserved1 = 0;
8316
e665bfda
MC
8317 bp->set_mac_pending++;
8318 smp_wmb();
8319
3101c2bc
YG
8320 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8321 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8322 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8323
8324 } else { /* E1H */
65abd74d
YG
8325 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8326
e665bfda 8327 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
8328
8329 for (i = 0; i < MC_HASH_SIZE; i++)
8330 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
8331
8332 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 8333 }
993ac7b5
MC
8334#ifdef BCM_CNIC
8335 /* Clear iSCSI L2 MAC */
8336 mutex_lock(&bp->cnic_mutex);
8337 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8338 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8339 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8340 }
8341 mutex_unlock(&bp->cnic_mutex);
8342#endif
3101c2bc 8343
65abd74d
YG
8344 if (unload_mode == UNLOAD_NORMAL)
8345 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8346
7d0446c2 8347 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8348 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8349
7d0446c2 8350 else if (bp->wol) {
65abd74d
YG
8351 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8352 u8 *mac_addr = bp->dev->dev_addr;
8353 u32 val;
8354 /* The mac address is written to entries 1-4 to
8355 preserve entry 0 which is used by the PMF */
8356 u8 entry = (BP_E1HVN(bp) + 1)*8;
8357
8358 val = (mac_addr[0] << 8) | mac_addr[1];
8359 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8360
8361 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8362 (mac_addr[4] << 8) | mac_addr[5];
8363 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8364
8365 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8366
8367 } else
8368 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8369
34f80b04
EG
8370 /* Close multi and leading connections
8371 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8372 for_each_nondefault_queue(bp, i)
8373 if (bnx2x_stop_multi(bp, i))
228241eb 8374 goto unload_error;
a2fbb9ea 8375
da5a662a
VZ
8376 rc = bnx2x_stop_leading(bp);
8377 if (rc) {
34f80b04 8378 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8379#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8380 return -EBUSY;
da5a662a
VZ
8381#else
8382 goto unload_error;
34f80b04 8383#endif
228241eb
ET
8384 }
8385
8386unload_error:
34f80b04 8387 if (!BP_NOMCP(bp))
228241eb 8388 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8389 else {
f5372251 8390 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8391 load_count[0], load_count[1], load_count[2]);
8392 load_count[0]--;
da5a662a 8393 load_count[1 + port]--;
f5372251 8394 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8395 load_count[0], load_count[1], load_count[2]);
8396 if (load_count[0] == 0)
8397 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8398 else if (load_count[1 + port] == 0)
34f80b04
EG
8399 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8400 else
8401 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8402 }
a2fbb9ea 8403
34f80b04
EG
8404 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8405 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8406 bnx2x__link_reset(bp);
a2fbb9ea
ET
8407
8408 /* Reset the chip */
228241eb 8409 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8410
8411 /* Report UNLOAD_DONE to MCP */
34f80b04 8412 if (!BP_NOMCP(bp))
a2fbb9ea 8413 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8414
72fd0718
VZ
8415}
8416
8417static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8418{
8419 u32 val;
8420
8421 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8422
8423 if (CHIP_IS_E1(bp)) {
8424 int port = BP_PORT(bp);
8425 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8426 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8427
8428 val = REG_RD(bp, addr);
8429 val &= ~(0x300);
8430 REG_WR(bp, addr, val);
8431 } else if (CHIP_IS_E1H(bp)) {
8432 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8433 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8434 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8435 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8436 }
8437}
8438
8439/* must be called with rtnl_lock */
8440static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8441{
8442 int i;
8443
8444 if (bp->state == BNX2X_STATE_CLOSED) {
8445 /* Interface has been removed - nothing to recover */
8446 bp->recovery_state = BNX2X_RECOVERY_DONE;
8447 bp->is_leader = 0;
8448 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8449 smp_wmb();
8450
8451 return -EINVAL;
8452 }
8453
8454#ifdef BCM_CNIC
8455 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8456#endif
8457 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8458
8459 /* Set "drop all" */
8460 bp->rx_mode = BNX2X_RX_MODE_NONE;
8461 bnx2x_set_storm_rx_mode(bp);
8462
8463 /* Disable HW interrupts, NAPI and Tx */
8464 bnx2x_netif_stop(bp, 1);
8465
8466 del_timer_sync(&bp->timer);
8467 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8468 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8469 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8470
8471 /* Release IRQs */
8472 bnx2x_free_irq(bp, false);
8473
8474 /* Cleanup the chip if needed */
8475 if (unload_mode != UNLOAD_RECOVERY)
8476 bnx2x_chip_cleanup(bp, unload_mode);
8477
9a035440 8478 bp->port.pmf = 0;
a2fbb9ea 8479
7a9b2557 8480 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8481 bnx2x_free_skbs(bp);
54b9ddaa 8482 for_each_queue(bp, i)
3196a88a 8483 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8484 for_each_queue(bp, i)
7cde1c8b 8485 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8486 bnx2x_free_mem(bp);
8487
8488 bp->state = BNX2X_STATE_CLOSED;
228241eb 8489
a2fbb9ea
ET
8490 netif_carrier_off(bp->dev);
8491
72fd0718
VZ
8492 /* The last driver must disable a "close the gate" if there is no
8493 * parity attention or "process kill" pending.
8494 */
8495 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8496 bnx2x_reset_is_done(bp))
8497 bnx2x_disable_close_the_gate(bp);
8498
8499 /* Reset MCP mail box sequence if there is on going recovery */
8500 if (unload_mode == UNLOAD_RECOVERY)
8501 bp->fw_seq = 0;
8502
8503 return 0;
8504}
8505
8506/* Close gates #2, #3 and #4: */
8507static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8508{
8509 u32 val, addr;
8510
8511 /* Gates #2 and #4a are closed/opened for "not E1" only */
8512 if (!CHIP_IS_E1(bp)) {
8513 /* #4 */
8514 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8515 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8516 close ? (val | 0x1) : (val & (~(u32)1)));
8517 /* #2 */
8518 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8519 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8520 close ? (val | 0x1) : (val & (~(u32)1)));
8521 }
8522
8523 /* #3 */
8524 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8525 val = REG_RD(bp, addr);
8526 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8527
8528 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8529 close ? "closing" : "opening");
8530 mmiowb();
8531}
8532
8533#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8534
8535static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8536{
8537 /* Do some magic... */
8538 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8539 *magic_val = val & SHARED_MF_CLP_MAGIC;
8540 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8541}
8542
8543/* Restore the value of the `magic' bit.
8544 *
8545 * @param pdev Device handle.
8546 * @param magic_val Old value of the `magic' bit.
8547 */
8548static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8549{
8550 /* Restore the `magic' bit value... */
8551 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8552 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8553 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8554 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8555 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8556 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8557}
8558
8559/* Prepares for MCP reset: takes care of CLP configurations.
8560 *
8561 * @param bp
8562 * @param magic_val Old value of 'magic' bit.
8563 */
8564static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8565{
8566 u32 shmem;
8567 u32 validity_offset;
8568
8569 DP(NETIF_MSG_HW, "Starting\n");
8570
8571 /* Set `magic' bit in order to save MF config */
8572 if (!CHIP_IS_E1(bp))
8573 bnx2x_clp_reset_prep(bp, magic_val);
8574
8575 /* Get shmem offset */
8576 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8577 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8578
8579 /* Clear validity map flags */
8580 if (shmem > 0)
8581 REG_WR(bp, shmem + validity_offset, 0);
8582}
8583
8584#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8585#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8586
8587/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8588 * depending on the HW type.
8589 *
8590 * @param bp
8591 */
8592static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8593{
8594 /* special handling for emulation and FPGA,
8595 wait 10 times longer */
8596 if (CHIP_REV_IS_SLOW(bp))
8597 msleep(MCP_ONE_TIMEOUT*10);
8598 else
8599 msleep(MCP_ONE_TIMEOUT);
8600}
8601
8602static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8603{
8604 u32 shmem, cnt, validity_offset, val;
8605 int rc = 0;
8606
8607 msleep(100);
8608
8609 /* Get shmem offset */
8610 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8611 if (shmem == 0) {
8612 BNX2X_ERR("Shmem 0 return failure\n");
8613 rc = -ENOTTY;
8614 goto exit_lbl;
8615 }
8616
8617 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8618
8619 /* Wait for MCP to come up */
8620 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8621 /* TBD: its best to check validity map of last port.
8622 * currently checks on port 0.
8623 */
8624 val = REG_RD(bp, shmem + validity_offset);
8625 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8626 shmem + validity_offset, val);
8627
8628 /* check that shared memory is valid. */
8629 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8630 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8631 break;
8632
8633 bnx2x_mcp_wait_one(bp);
8634 }
8635
8636 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8637
8638 /* Check that shared memory is valid. This indicates that MCP is up. */
8639 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8640 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8641 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8642 rc = -ENOTTY;
8643 goto exit_lbl;
8644 }
8645
8646exit_lbl:
8647 /* Restore the `magic' bit value */
8648 if (!CHIP_IS_E1(bp))
8649 bnx2x_clp_reset_done(bp, magic_val);
8650
8651 return rc;
8652}
8653
8654static void bnx2x_pxp_prep(struct bnx2x *bp)
8655{
8656 if (!CHIP_IS_E1(bp)) {
8657 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8658 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8659 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8660 mmiowb();
8661 }
8662}
8663
8664/*
8665 * Reset the whole chip except for:
8666 * - PCIE core
8667 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8668 * one reset bit)
8669 * - IGU
8670 * - MISC (including AEU)
8671 * - GRC
8672 * - RBCN, RBCP
8673 */
8674static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8675{
8676 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8677
8678 not_reset_mask1 =
8679 MISC_REGISTERS_RESET_REG_1_RST_HC |
8680 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8681 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8682
8683 not_reset_mask2 =
8684 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8685 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8686 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8687 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8688 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8689 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8690 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8691 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8692
8693 reset_mask1 = 0xffffffff;
8694
8695 if (CHIP_IS_E1(bp))
8696 reset_mask2 = 0xffff;
8697 else
8698 reset_mask2 = 0x1ffff;
8699
8700 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8701 reset_mask1 & (~not_reset_mask1));
8702 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8703 reset_mask2 & (~not_reset_mask2));
8704
8705 barrier();
8706 mmiowb();
8707
8708 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8709 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8710 mmiowb();
8711}
8712
8713static int bnx2x_process_kill(struct bnx2x *bp)
8714{
8715 int cnt = 1000;
8716 u32 val = 0;
8717 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8718
8719
8720 /* Empty the Tetris buffer, wait for 1s */
8721 do {
8722 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8723 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8724 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8725 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8726 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8727 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8728 ((port_is_idle_0 & 0x1) == 0x1) &&
8729 ((port_is_idle_1 & 0x1) == 0x1) &&
8730 (pgl_exp_rom2 == 0xffffffff))
8731 break;
8732 msleep(1);
8733 } while (cnt-- > 0);
8734
8735 if (cnt <= 0) {
8736 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8737 " are still"
8738 " outstanding read requests after 1s!\n");
8739 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8740 " port_is_idle_0=0x%08x,"
8741 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8742 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8743 pgl_exp_rom2);
8744 return -EAGAIN;
8745 }
8746
8747 barrier();
8748
8749 /* Close gates #2, #3 and #4 */
8750 bnx2x_set_234_gates(bp, true);
8751
8752 /* TBD: Indicate that "process kill" is in progress to MCP */
8753
8754 /* Clear "unprepared" bit */
8755 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8756 barrier();
8757
8758 /* Make sure all is written to the chip before the reset */
8759 mmiowb();
8760
8761 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8762 * PSWHST, GRC and PSWRD Tetris buffer.
8763 */
8764 msleep(1);
8765
8766 /* Prepare to chip reset: */
8767 /* MCP */
8768 bnx2x_reset_mcp_prep(bp, &val);
8769
8770 /* PXP */
8771 bnx2x_pxp_prep(bp);
8772 barrier();
8773
8774 /* reset the chip */
8775 bnx2x_process_kill_chip_reset(bp);
8776 barrier();
8777
8778 /* Recover after reset: */
8779 /* MCP */
8780 if (bnx2x_reset_mcp_comp(bp, val))
8781 return -EAGAIN;
8782
8783 /* PXP */
8784 bnx2x_pxp_prep(bp);
8785
8786 /* Open the gates #2, #3 and #4 */
8787 bnx2x_set_234_gates(bp, false);
8788
8789 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8790 * reset state, re-enable attentions. */
8791
a2fbb9ea
ET
8792 return 0;
8793}
8794
72fd0718
VZ
8795static int bnx2x_leader_reset(struct bnx2x *bp)
8796{
8797 int rc = 0;
8798 /* Try to recover after the failure */
8799 if (bnx2x_process_kill(bp)) {
8800 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8801 bp->dev->name);
8802 rc = -EAGAIN;
8803 goto exit_leader_reset;
8804 }
8805
8806 /* Clear "reset is in progress" bit and update the driver state */
8807 bnx2x_set_reset_done(bp);
8808 bp->recovery_state = BNX2X_RECOVERY_DONE;
8809
8810exit_leader_reset:
8811 bp->is_leader = 0;
8812 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8813 smp_wmb();
8814 return rc;
8815}
8816
8817static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8818
8819/* Assumption: runs under rtnl lock. This together with the fact
8820 * that it's called only from bnx2x_reset_task() ensure that it
8821 * will never be called when netif_running(bp->dev) is false.
8822 */
8823static void bnx2x_parity_recover(struct bnx2x *bp)
8824{
8825 DP(NETIF_MSG_HW, "Handling parity\n");
8826 while (1) {
8827 switch (bp->recovery_state) {
8828 case BNX2X_RECOVERY_INIT:
8829 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8830 /* Try to get a LEADER_LOCK HW lock */
8831 if (bnx2x_trylock_hw_lock(bp,
8832 HW_LOCK_RESOURCE_RESERVED_08))
8833 bp->is_leader = 1;
8834
8835 /* Stop the driver */
8836 /* If interface has been removed - break */
8837 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8838 return;
8839
8840 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8841 /* Ensure "is_leader" and "recovery_state"
8842 * update values are seen on other CPUs
8843 */
8844 smp_wmb();
8845 break;
8846
8847 case BNX2X_RECOVERY_WAIT:
8848 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8849 if (bp->is_leader) {
8850 u32 load_counter = bnx2x_get_load_cnt(bp);
8851 if (load_counter) {
8852 /* Wait until all other functions get
8853 * down.
8854 */
8855 schedule_delayed_work(&bp->reset_task,
8856 HZ/10);
8857 return;
8858 } else {
8859 /* If all other functions got down -
8860 * try to bring the chip back to
8861 * normal. In any case it's an exit
8862 * point for a leader.
8863 */
8864 if (bnx2x_leader_reset(bp) ||
8865 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8866 printk(KERN_ERR"%s: Recovery "
8867 "has failed. Power cycle is "
8868 "needed.\n", bp->dev->name);
8869 /* Disconnect this device */
8870 netif_device_detach(bp->dev);
8871 /* Block ifup for all function
8872 * of this ASIC until
8873 * "process kill" or power
8874 * cycle.
8875 */
8876 bnx2x_set_reset_in_progress(bp);
8877 /* Shut down the power */
8878 bnx2x_set_power_state(bp,
8879 PCI_D3hot);
8880 return;
8881 }
8882
8883 return;
8884 }
8885 } else { /* non-leader */
8886 if (!bnx2x_reset_is_done(bp)) {
8887 /* Try to get a LEADER_LOCK HW lock as
8888 * long as a former leader may have
8889 * been unloaded by the user or
8890 * released a leadership by another
8891 * reason.
8892 */
8893 if (bnx2x_trylock_hw_lock(bp,
8894 HW_LOCK_RESOURCE_RESERVED_08)) {
8895 /* I'm a leader now! Restart a
8896 * switch case.
8897 */
8898 bp->is_leader = 1;
8899 break;
8900 }
8901
8902 schedule_delayed_work(&bp->reset_task,
8903 HZ/10);
8904 return;
8905
8906 } else { /* A leader has completed
8907 * the "process kill". It's an exit
8908 * point for a non-leader.
8909 */
8910 bnx2x_nic_load(bp, LOAD_NORMAL);
8911 bp->recovery_state =
8912 BNX2X_RECOVERY_DONE;
8913 smp_wmb();
8914 return;
8915 }
8916 }
8917 default:
8918 return;
8919 }
8920 }
8921}
8922
8923/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8924 * scheduled on a general queue in order to prevent a dead lock.
8925 */
34f80b04
EG
8926static void bnx2x_reset_task(struct work_struct *work)
8927{
72fd0718 8928 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
8929
8930#ifdef BNX2X_STOP_ON_ERROR
8931 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8932 " so reset not done to allow debug dump,\n"
72fd0718 8933 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
8934 return;
8935#endif
8936
8937 rtnl_lock();
8938
8939 if (!netif_running(bp->dev))
8940 goto reset_task_exit;
8941
72fd0718
VZ
8942 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8943 bnx2x_parity_recover(bp);
8944 else {
8945 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8946 bnx2x_nic_load(bp, LOAD_NORMAL);
8947 }
34f80b04
EG
8948
8949reset_task_exit:
8950 rtnl_unlock();
8951}
8952
a2fbb9ea
ET
8953/* end of nic load/unload */
8954
8955/* ethtool_ops */
8956
8957/*
8958 * Init service functions
8959 */
8960
f1ef27ef
EG
8961static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8962{
8963 switch (func) {
8964 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8965 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8966 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8967 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8968 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8969 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8970 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8971 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8972 default:
8973 BNX2X_ERR("Unsupported function index: %d\n", func);
8974 return (u32)(-1);
8975 }
8976}
8977
8978static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8979{
8980 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8981
8982 /* Flush all outstanding writes */
8983 mmiowb();
8984
8985 /* Pretend to be function 0 */
8986 REG_WR(bp, reg, 0);
8987 /* Flush the GRC transaction (in the chip) */
8988 new_val = REG_RD(bp, reg);
8989 if (new_val != 0) {
8990 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8991 new_val);
8992 BUG();
8993 }
8994
8995 /* From now we are in the "like-E1" mode */
8996 bnx2x_int_disable(bp);
8997
8998 /* Flush all outstanding writes */
8999 mmiowb();
9000
9001 /* Restore the original funtion settings */
9002 REG_WR(bp, reg, orig_func);
9003 new_val = REG_RD(bp, reg);
9004 if (new_val != orig_func) {
9005 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9006 orig_func, new_val);
9007 BUG();
9008 }
9009}
9010
9011static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9012{
9013 if (CHIP_IS_E1H(bp))
9014 bnx2x_undi_int_disable_e1h(bp, func);
9015 else
9016 bnx2x_int_disable(bp);
9017}
9018
34f80b04
EG
9019static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9020{
9021 u32 val;
9022
9023 /* Check if there is any driver already loaded */
9024 val = REG_RD(bp, MISC_REG_UNPREPARED);
9025 if (val == 0x1) {
9026 /* Check if it is the UNDI driver
9027 * UNDI driver initializes CID offset for normal bell to 0x7
9028 */
4a37fb66 9029 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9030 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9031 if (val == 0x7) {
9032 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 9033 /* save our func */
34f80b04 9034 int func = BP_FUNC(bp);
da5a662a
VZ
9035 u32 swap_en;
9036 u32 swap_val;
34f80b04 9037
b4661739
EG
9038 /* clear the UNDI indication */
9039 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9040
34f80b04
EG
9041 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9042
9043 /* try unload UNDI on port 0 */
9044 bp->func = 0;
da5a662a
VZ
9045 bp->fw_seq =
9046 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9047 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 9048 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9049
9050 /* if UNDI is loaded on the other port */
9051 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9052
da5a662a
VZ
9053 /* send "DONE" for previous unload */
9054 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9055
9056 /* unload UNDI on port 1 */
34f80b04 9057 bp->func = 1;
da5a662a
VZ
9058 bp->fw_seq =
9059 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9060 DRV_MSG_SEQ_NUMBER_MASK);
9061 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9062
9063 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9064 }
9065
b4661739
EG
9066 /* now it's safe to release the lock */
9067 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9068
f1ef27ef 9069 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
9070
9071 /* close input traffic and wait for it */
9072 /* Do not rcv packets to BRB */
9073 REG_WR(bp,
9074 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9075 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9076 /* Do not direct rcv packets that are not for MCP to
9077 * the BRB */
9078 REG_WR(bp,
9079 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9080 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9081 /* clear AEU */
9082 REG_WR(bp,
9083 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9084 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9085 msleep(10);
9086
9087 /* save NIG port swap info */
9088 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9089 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
9090 /* reset device */
9091 REG_WR(bp,
9092 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 9093 0xd3ffffff);
34f80b04
EG
9094 REG_WR(bp,
9095 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9096 0x1403);
da5a662a
VZ
9097 /* take the NIG out of reset and restore swap values */
9098 REG_WR(bp,
9099 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9100 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9101 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9102 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9103
9104 /* send unload done to the MCP */
9105 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9106
9107 /* restore our func and fw_seq */
9108 bp->func = func;
9109 bp->fw_seq =
9110 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9111 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
9112
9113 } else
9114 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9115 }
9116}
9117
9118static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9119{
9120 u32 val, val2, val3, val4, id;
72ce58c3 9121 u16 pmc;
34f80b04
EG
9122
9123 /* Get the chip revision id and number. */
9124 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9125 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9126 id = ((val & 0xffff) << 16);
9127 val = REG_RD(bp, MISC_REG_CHIP_REV);
9128 id |= ((val & 0xf) << 12);
9129 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9130 id |= ((val & 0xff) << 4);
5a40e08e 9131 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
9132 id |= (val & 0xf);
9133 bp->common.chip_id = id;
9134 bp->link_params.chip_id = bp->common.chip_id;
9135 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9136
1c06328c
EG
9137 val = (REG_RD(bp, 0x2874) & 0x55);
9138 if ((bp->common.chip_id & 0x1) ||
9139 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9140 bp->flags |= ONE_PORT_FLAG;
9141 BNX2X_DEV_INFO("single port device\n");
9142 }
9143
34f80b04
EG
9144 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9145 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9146 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9147 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9148 bp->common.flash_size, bp->common.flash_size);
9149
9150 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 9151 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 9152 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
9153 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9154 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
9155
9156 if (!bp->common.shmem_base ||
9157 (bp->common.shmem_base < 0xA0000) ||
9158 (bp->common.shmem_base >= 0xC0000)) {
9159 BNX2X_DEV_INFO("MCP not active\n");
9160 bp->flags |= NO_MCP_FLAG;
9161 return;
9162 }
9163
9164 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9165 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9166 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 9167 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
9168
9169 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 9170 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
9171
9172 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9173 SHARED_HW_CFG_LED_MODE_MASK) >>
9174 SHARED_HW_CFG_LED_MODE_SHIFT);
9175
c2c8b03e
EG
9176 bp->link_params.feature_config_flags = 0;
9177 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9178 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9179 bp->link_params.feature_config_flags |=
9180 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9181 else
9182 bp->link_params.feature_config_flags &=
9183 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9184
34f80b04
EG
9185 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9186 bp->common.bc_ver = val;
9187 BNX2X_DEV_INFO("bc_ver %X\n", val);
9188 if (val < BNX2X_BC_VER) {
9189 /* for now only warn
9190 * later we might need to enforce this */
cdaa7cb8
VZ
9191 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9192 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 9193 }
4d295db0
EG
9194 bp->link_params.feature_config_flags |=
9195 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9196 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
9197
9198 if (BP_E1HVN(bp) == 0) {
9199 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9200 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9201 } else {
9202 /* no WOL capability for E1HVN != 0 */
9203 bp->flags |= NO_WOL_FLAG;
9204 }
9205 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 9206 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
9207
9208 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9209 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9210 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9211 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9212
cdaa7cb8
VZ
9213 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9214 val, val2, val3, val4);
34f80b04
EG
9215}
9216
9217static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9218 u32 switch_cfg)
a2fbb9ea 9219{
34f80b04 9220 int port = BP_PORT(bp);
a2fbb9ea
ET
9221 u32 ext_phy_type;
9222
a2fbb9ea
ET
9223 switch (switch_cfg) {
9224 case SWITCH_CFG_1G:
9225 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9226
c18487ee
YR
9227 ext_phy_type =
9228 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9229 switch (ext_phy_type) {
9230 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9231 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9232 ext_phy_type);
9233
34f80b04
EG
9234 bp->port.supported |= (SUPPORTED_10baseT_Half |
9235 SUPPORTED_10baseT_Full |
9236 SUPPORTED_100baseT_Half |
9237 SUPPORTED_100baseT_Full |
9238 SUPPORTED_1000baseT_Full |
9239 SUPPORTED_2500baseX_Full |
9240 SUPPORTED_TP |
9241 SUPPORTED_FIBRE |
9242 SUPPORTED_Autoneg |
9243 SUPPORTED_Pause |
9244 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9245 break;
9246
9247 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9248 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9249 ext_phy_type);
9250
34f80b04
EG
9251 bp->port.supported |= (SUPPORTED_10baseT_Half |
9252 SUPPORTED_10baseT_Full |
9253 SUPPORTED_100baseT_Half |
9254 SUPPORTED_100baseT_Full |
9255 SUPPORTED_1000baseT_Full |
9256 SUPPORTED_TP |
9257 SUPPORTED_FIBRE |
9258 SUPPORTED_Autoneg |
9259 SUPPORTED_Pause |
9260 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9261 break;
9262
9263 default:
9264 BNX2X_ERR("NVRAM config error. "
9265 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 9266 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9267 return;
9268 }
9269
34f80b04
EG
9270 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9271 port*0x10);
9272 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
9273 break;
9274
9275 case SWITCH_CFG_10G:
9276 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9277
c18487ee
YR
9278 ext_phy_type =
9279 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9280 switch (ext_phy_type) {
9281 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9282 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9283 ext_phy_type);
9284
34f80b04
EG
9285 bp->port.supported |= (SUPPORTED_10baseT_Half |
9286 SUPPORTED_10baseT_Full |
9287 SUPPORTED_100baseT_Half |
9288 SUPPORTED_100baseT_Full |
9289 SUPPORTED_1000baseT_Full |
9290 SUPPORTED_2500baseX_Full |
9291 SUPPORTED_10000baseT_Full |
9292 SUPPORTED_TP |
9293 SUPPORTED_FIBRE |
9294 SUPPORTED_Autoneg |
9295 SUPPORTED_Pause |
9296 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9297 break;
9298
589abe3a
EG
9299 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9300 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 9301 ext_phy_type);
f1410647 9302
34f80b04 9303 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9304 SUPPORTED_1000baseT_Full |
34f80b04 9305 SUPPORTED_FIBRE |
589abe3a 9306 SUPPORTED_Autoneg |
34f80b04
EG
9307 SUPPORTED_Pause |
9308 SUPPORTED_Asym_Pause);
f1410647
ET
9309 break;
9310
589abe3a
EG
9311 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9312 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
9313 ext_phy_type);
9314
34f80b04 9315 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9316 SUPPORTED_2500baseX_Full |
34f80b04 9317 SUPPORTED_1000baseT_Full |
589abe3a
EG
9318 SUPPORTED_FIBRE |
9319 SUPPORTED_Autoneg |
9320 SUPPORTED_Pause |
9321 SUPPORTED_Asym_Pause);
9322 break;
9323
9324 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9325 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9326 ext_phy_type);
9327
9328 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
9329 SUPPORTED_FIBRE |
9330 SUPPORTED_Pause |
9331 SUPPORTED_Asym_Pause);
f1410647
ET
9332 break;
9333
589abe3a
EG
9334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9335 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
9336 ext_phy_type);
9337
34f80b04
EG
9338 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9339 SUPPORTED_1000baseT_Full |
9340 SUPPORTED_FIBRE |
34f80b04
EG
9341 SUPPORTED_Pause |
9342 SUPPORTED_Asym_Pause);
f1410647
ET
9343 break;
9344
589abe3a
EG
9345 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9346 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
9347 ext_phy_type);
9348
34f80b04 9349 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 9350 SUPPORTED_1000baseT_Full |
34f80b04 9351 SUPPORTED_Autoneg |
589abe3a 9352 SUPPORTED_FIBRE |
34f80b04
EG
9353 SUPPORTED_Pause |
9354 SUPPORTED_Asym_Pause);
c18487ee
YR
9355 break;
9356
4d295db0
EG
9357 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9358 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9359 ext_phy_type);
9360
9361 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9362 SUPPORTED_1000baseT_Full |
9363 SUPPORTED_Autoneg |
9364 SUPPORTED_FIBRE |
9365 SUPPORTED_Pause |
9366 SUPPORTED_Asym_Pause);
9367 break;
9368
f1410647
ET
9369 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9370 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9371 ext_phy_type);
9372
34f80b04
EG
9373 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9374 SUPPORTED_TP |
9375 SUPPORTED_Autoneg |
9376 SUPPORTED_Pause |
9377 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9378 break;
9379
28577185
EG
9380 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9381 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9382 ext_phy_type);
9383
9384 bp->port.supported |= (SUPPORTED_10baseT_Half |
9385 SUPPORTED_10baseT_Full |
9386 SUPPORTED_100baseT_Half |
9387 SUPPORTED_100baseT_Full |
9388 SUPPORTED_1000baseT_Full |
9389 SUPPORTED_10000baseT_Full |
9390 SUPPORTED_TP |
9391 SUPPORTED_Autoneg |
9392 SUPPORTED_Pause |
9393 SUPPORTED_Asym_Pause);
9394 break;
9395
c18487ee
YR
9396 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9397 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9398 bp->link_params.ext_phy_config);
9399 break;
9400
a2fbb9ea
ET
9401 default:
9402 BNX2X_ERR("NVRAM config error. "
9403 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 9404 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9405 return;
9406 }
9407
34f80b04
EG
9408 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9409 port*0x18);
9410 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 9411
a2fbb9ea
ET
9412 break;
9413
9414 default:
9415 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 9416 bp->port.link_config);
a2fbb9ea
ET
9417 return;
9418 }
34f80b04 9419 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
9420
9421 /* mask what we support according to speed_cap_mask */
c18487ee
YR
9422 if (!(bp->link_params.speed_cap_mask &
9423 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 9424 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 9425
c18487ee
YR
9426 if (!(bp->link_params.speed_cap_mask &
9427 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 9428 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 9429
c18487ee
YR
9430 if (!(bp->link_params.speed_cap_mask &
9431 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 9432 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 9433
c18487ee
YR
9434 if (!(bp->link_params.speed_cap_mask &
9435 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 9436 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 9437
c18487ee
YR
9438 if (!(bp->link_params.speed_cap_mask &
9439 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
9440 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9441 SUPPORTED_1000baseT_Full);
a2fbb9ea 9442
c18487ee
YR
9443 if (!(bp->link_params.speed_cap_mask &
9444 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 9445 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 9446
c18487ee
YR
9447 if (!(bp->link_params.speed_cap_mask &
9448 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 9449 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 9450
34f80b04 9451 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
9452}
9453
34f80b04 9454static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 9455{
c18487ee 9456 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 9457
34f80b04 9458 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 9459 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 9460 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 9461 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9462 bp->port.advertising = bp->port.supported;
a2fbb9ea 9463 } else {
c18487ee
YR
9464 u32 ext_phy_type =
9465 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9466
9467 if ((ext_phy_type ==
9468 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9469 (ext_phy_type ==
9470 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 9471 /* force 10G, no AN */
c18487ee 9472 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 9473 bp->port.advertising =
a2fbb9ea
ET
9474 (ADVERTISED_10000baseT_Full |
9475 ADVERTISED_FIBRE);
9476 break;
9477 }
9478 BNX2X_ERR("NVRAM config error. "
9479 "Invalid link_config 0x%x"
9480 " Autoneg not supported\n",
34f80b04 9481 bp->port.link_config);
a2fbb9ea
ET
9482 return;
9483 }
9484 break;
9485
9486 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 9487 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 9488 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
9489 bp->port.advertising = (ADVERTISED_10baseT_Full |
9490 ADVERTISED_TP);
a2fbb9ea 9491 } else {
cdaa7cb8
VZ
9492 BNX2X_ERROR("NVRAM config error. "
9493 "Invalid link_config 0x%x"
9494 " speed_cap_mask 0x%x\n",
9495 bp->port.link_config,
9496 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9497 return;
9498 }
9499 break;
9500
9501 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 9502 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
9503 bp->link_params.req_line_speed = SPEED_10;
9504 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9505 bp->port.advertising = (ADVERTISED_10baseT_Half |
9506 ADVERTISED_TP);
a2fbb9ea 9507 } else {
cdaa7cb8
VZ
9508 BNX2X_ERROR("NVRAM config error. "
9509 "Invalid link_config 0x%x"
9510 " speed_cap_mask 0x%x\n",
9511 bp->port.link_config,
9512 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9513 return;
9514 }
9515 break;
9516
9517 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 9518 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 9519 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
9520 bp->port.advertising = (ADVERTISED_100baseT_Full |
9521 ADVERTISED_TP);
a2fbb9ea 9522 } else {
cdaa7cb8
VZ
9523 BNX2X_ERROR("NVRAM config error. "
9524 "Invalid link_config 0x%x"
9525 " speed_cap_mask 0x%x\n",
9526 bp->port.link_config,
9527 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9528 return;
9529 }
9530 break;
9531
9532 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 9533 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
9534 bp->link_params.req_line_speed = SPEED_100;
9535 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9536 bp->port.advertising = (ADVERTISED_100baseT_Half |
9537 ADVERTISED_TP);
a2fbb9ea 9538 } else {
cdaa7cb8
VZ
9539 BNX2X_ERROR("NVRAM config error. "
9540 "Invalid link_config 0x%x"
9541 " speed_cap_mask 0x%x\n",
9542 bp->port.link_config,
9543 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9544 return;
9545 }
9546 break;
9547
9548 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 9549 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 9550 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
9551 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9552 ADVERTISED_TP);
a2fbb9ea 9553 } else {
cdaa7cb8
VZ
9554 BNX2X_ERROR("NVRAM config error. "
9555 "Invalid link_config 0x%x"
9556 " speed_cap_mask 0x%x\n",
9557 bp->port.link_config,
9558 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9559 return;
9560 }
9561 break;
9562
9563 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 9564 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 9565 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
9566 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9567 ADVERTISED_TP);
a2fbb9ea 9568 } else {
cdaa7cb8
VZ
9569 BNX2X_ERROR("NVRAM config error. "
9570 "Invalid link_config 0x%x"
9571 " speed_cap_mask 0x%x\n",
9572 bp->port.link_config,
9573 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9574 return;
9575 }
9576 break;
9577
9578 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9579 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9580 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 9581 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 9582 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
9583 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9584 ADVERTISED_FIBRE);
a2fbb9ea 9585 } else {
cdaa7cb8
VZ
9586 BNX2X_ERROR("NVRAM config error. "
9587 "Invalid link_config 0x%x"
9588 " speed_cap_mask 0x%x\n",
9589 bp->port.link_config,
9590 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9591 return;
9592 }
9593 break;
9594
9595 default:
cdaa7cb8
VZ
9596 BNX2X_ERROR("NVRAM config error. "
9597 "BAD link speed link_config 0x%x\n",
9598 bp->port.link_config);
c18487ee 9599 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9600 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
9601 break;
9602 }
a2fbb9ea 9603
34f80b04
EG
9604 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9605 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 9606 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 9607 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 9608 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9609
c18487ee 9610 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 9611 " advertising 0x%x\n",
c18487ee
YR
9612 bp->link_params.req_line_speed,
9613 bp->link_params.req_duplex,
34f80b04 9614 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
9615}
9616
e665bfda
MC
9617static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9618{
9619 mac_hi = cpu_to_be16(mac_hi);
9620 mac_lo = cpu_to_be32(mac_lo);
9621 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9622 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9623}
9624
34f80b04 9625static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 9626{
34f80b04
EG
9627 int port = BP_PORT(bp);
9628 u32 val, val2;
589abe3a 9629 u32 config;
c2c8b03e 9630 u16 i;
01cd4528 9631 u32 ext_phy_type;
a2fbb9ea 9632
c18487ee 9633 bp->link_params.bp = bp;
34f80b04 9634 bp->link_params.port = port;
c18487ee 9635
c18487ee 9636 bp->link_params.lane_config =
a2fbb9ea 9637 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 9638 bp->link_params.ext_phy_config =
a2fbb9ea
ET
9639 SHMEM_RD(bp,
9640 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
9641 /* BCM8727_NOC => BCM8727 no over current */
9642 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9643 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9644 bp->link_params.ext_phy_config &=
9645 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9646 bp->link_params.ext_phy_config |=
9647 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9648 bp->link_params.feature_config_flags |=
9649 FEATURE_CONFIG_BCM8727_NOC;
9650 }
9651
c18487ee 9652 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
9653 SHMEM_RD(bp,
9654 dev_info.port_hw_config[port].speed_capability_mask);
9655
34f80b04 9656 bp->port.link_config =
a2fbb9ea
ET
9657 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9658
c2c8b03e
EG
9659 /* Get the 4 lanes xgxs config rx and tx */
9660 for (i = 0; i < 2; i++) {
9661 val = SHMEM_RD(bp,
9662 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9663 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9664 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9665
9666 val = SHMEM_RD(bp,
9667 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9668 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9669 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9670 }
9671
3ce2c3f9
EG
9672 /* If the device is capable of WoL, set the default state according
9673 * to the HW
9674 */
4d295db0 9675 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
9676 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9677 (config & PORT_FEATURE_WOL_ENABLED));
9678
c2c8b03e
EG
9679 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9680 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
9681 bp->link_params.lane_config,
9682 bp->link_params.ext_phy_config,
34f80b04 9683 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 9684
4d295db0
EG
9685 bp->link_params.switch_cfg |= (bp->port.link_config &
9686 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 9687 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
9688
9689 bnx2x_link_settings_requested(bp);
9690
01cd4528
EG
9691 /*
9692 * If connected directly, work with the internal PHY, otherwise, work
9693 * with the external PHY
9694 */
9695 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9696 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9697 bp->mdio.prtad = bp->link_params.phy_addr;
9698
9699 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9700 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9701 bp->mdio.prtad =
659bc5c4 9702 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 9703
a2fbb9ea
ET
9704 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9705 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 9706 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
9707 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9708 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
9709
9710#ifdef BCM_CNIC
9711 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9712 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9713 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9714#endif
34f80b04
EG
9715}
9716
9717static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9718{
9719 int func = BP_FUNC(bp);
9720 u32 val, val2;
9721 int rc = 0;
a2fbb9ea 9722
34f80b04 9723 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 9724
34f80b04
EG
9725 bp->e1hov = 0;
9726 bp->e1hmf = 0;
2145a920 9727 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
9728 bp->mf_config =
9729 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 9730
2691d51d 9731 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 9732 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 9733 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 9734 bp->e1hmf = 1;
2691d51d
EG
9735 BNX2X_DEV_INFO("%s function mode\n",
9736 IS_E1HMF(bp) ? "multi" : "single");
9737
9738 if (IS_E1HMF(bp)) {
9739 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9740 e1hov_tag) &
9741 FUNC_MF_CFG_E1HOV_TAG_MASK);
9742 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9743 bp->e1hov = val;
9744 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9745 "(0x%04x)\n",
9746 func, bp->e1hov, bp->e1hov);
9747 } else {
cdaa7cb8
VZ
9748 BNX2X_ERROR("No valid E1HOV for func %d,"
9749 " aborting\n", func);
34f80b04
EG
9750 rc = -EPERM;
9751 }
2691d51d
EG
9752 } else {
9753 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
9754 BNX2X_ERROR("VN %d in single function mode,"
9755 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
9756 rc = -EPERM;
9757 }
34f80b04
EG
9758 }
9759 }
a2fbb9ea 9760
34f80b04
EG
9761 if (!BP_NOMCP(bp)) {
9762 bnx2x_get_port_hwinfo(bp);
9763
9764 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9765 DRV_MSG_SEQ_NUMBER_MASK);
9766 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9767 }
9768
9769 if (IS_E1HMF(bp)) {
9770 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9771 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9772 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9773 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9774 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9775 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9776 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9777 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9778 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9779 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9780 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9781 ETH_ALEN);
9782 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9783 ETH_ALEN);
a2fbb9ea 9784 }
34f80b04
EG
9785
9786 return rc;
a2fbb9ea
ET
9787 }
9788
34f80b04
EG
9789 if (BP_NOMCP(bp)) {
9790 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 9791 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
9792 random_ether_addr(bp->dev->dev_addr);
9793 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9794 }
a2fbb9ea 9795
34f80b04
EG
9796 return rc;
9797}
9798
34f24c7f
VZ
9799static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9800{
9801 int cnt, i, block_end, rodi;
9802 char vpd_data[BNX2X_VPD_LEN+1];
9803 char str_id_reg[VENDOR_ID_LEN+1];
9804 char str_id_cap[VENDOR_ID_LEN+1];
9805 u8 len;
9806
9807 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9808 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9809
9810 if (cnt < BNX2X_VPD_LEN)
9811 goto out_not_found;
9812
9813 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9814 PCI_VPD_LRDT_RO_DATA);
9815 if (i < 0)
9816 goto out_not_found;
9817
9818
9819 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9820 pci_vpd_lrdt_size(&vpd_data[i]);
9821
9822 i += PCI_VPD_LRDT_TAG_SIZE;
9823
9824 if (block_end > BNX2X_VPD_LEN)
9825 goto out_not_found;
9826
9827 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9828 PCI_VPD_RO_KEYWORD_MFR_ID);
9829 if (rodi < 0)
9830 goto out_not_found;
9831
9832 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9833
9834 if (len != VENDOR_ID_LEN)
9835 goto out_not_found;
9836
9837 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9838
9839 /* vendor specific info */
9840 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9841 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9842 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9843 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9844
9845 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9846 PCI_VPD_RO_KEYWORD_VENDOR0);
9847 if (rodi >= 0) {
9848 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9849
9850 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9851
9852 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9853 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9854 bp->fw_ver[len] = ' ';
9855 }
9856 }
9857 return;
9858 }
9859out_not_found:
9860 return;
9861}
9862
34f80b04
EG
9863static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9864{
9865 int func = BP_FUNC(bp);
87942b46 9866 int timer_interval;
34f80b04
EG
9867 int rc;
9868
da5a662a
VZ
9869 /* Disable interrupt handling until HW is initialized */
9870 atomic_set(&bp->intr_sem, 1);
e1510706 9871 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 9872
34f80b04 9873 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 9874 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
9875#ifdef BCM_CNIC
9876 mutex_init(&bp->cnic_mutex);
9877#endif
a2fbb9ea 9878
1cf167f2 9879 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 9880 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
9881
9882 rc = bnx2x_get_hwinfo(bp);
9883
34f24c7f 9884 bnx2x_read_fwinfo(bp);
34f80b04
EG
9885 /* need to reset chip if undi was active */
9886 if (!BP_NOMCP(bp))
9887 bnx2x_undi_unload(bp);
9888
9889 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 9890 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
9891
9892 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
9893 dev_err(&bp->pdev->dev, "MCP disabled, "
9894 "must load devices in order!\n");
34f80b04 9895
555f6c78 9896 /* Set multi queue mode */
8badd27a
EG
9897 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9898 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
9899 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9900 "requested is not MSI-X\n");
555f6c78
EG
9901 multi_mode = ETH_RSS_MODE_DISABLED;
9902 }
9903 bp->multi_mode = multi_mode;
9904
9905
4fd89b7a
DK
9906 bp->dev->features |= NETIF_F_GRO;
9907
7a9b2557
VZ
9908 /* Set TPA flags */
9909 if (disable_tpa) {
9910 bp->flags &= ~TPA_ENABLE_FLAG;
9911 bp->dev->features &= ~NETIF_F_LRO;
9912 } else {
9913 bp->flags |= TPA_ENABLE_FLAG;
9914 bp->dev->features |= NETIF_F_LRO;
9915 }
9916
a18f5128
EG
9917 if (CHIP_IS_E1(bp))
9918 bp->dropless_fc = 0;
9919 else
9920 bp->dropless_fc = dropless_fc;
9921
8d5726c4 9922 bp->mrrs = mrrs;
7a9b2557 9923
34f80b04
EG
9924 bp->tx_ring_size = MAX_TX_AVAIL;
9925 bp->rx_ring_size = MAX_RX_AVAIL;
9926
9927 bp->rx_csum = 1;
34f80b04 9928
7d323bfd
EG
9929 /* make sure that the numbers are in the right granularity */
9930 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9931 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 9932
87942b46
EG
9933 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9934 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9935
9936 init_timer(&bp->timer);
9937 bp->timer.expires = jiffies + bp->current_interval;
9938 bp->timer.data = (unsigned long) bp;
9939 bp->timer.function = bnx2x_timer;
9940
9941 return rc;
a2fbb9ea
ET
9942}
9943
9944/*
9945 * ethtool service functions
9946 */
9947
9948/* All ethtool functions called with rtnl_lock */
9949
9950static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9951{
9952 struct bnx2x *bp = netdev_priv(dev);
9953
34f80b04
EG
9954 cmd->supported = bp->port.supported;
9955 cmd->advertising = bp->port.advertising;
a2fbb9ea 9956
f34d28ea
EG
9957 if ((bp->state == BNX2X_STATE_OPEN) &&
9958 !(bp->flags & MF_FUNC_DIS) &&
9959 (bp->link_vars.link_up)) {
c18487ee
YR
9960 cmd->speed = bp->link_vars.line_speed;
9961 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
9962 if (IS_E1HMF(bp)) {
9963 u16 vn_max_rate;
34f80b04 9964
b015e3d1
EG
9965 vn_max_rate =
9966 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 9967 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
9968 if (vn_max_rate < cmd->speed)
9969 cmd->speed = vn_max_rate;
9970 }
9971 } else {
9972 cmd->speed = -1;
9973 cmd->duplex = -1;
34f80b04 9974 }
a2fbb9ea 9975
c18487ee
YR
9976 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9977 u32 ext_phy_type =
9978 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9979
9980 switch (ext_phy_type) {
9981 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9982 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9983 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9984 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9985 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9986 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9987 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9988 cmd->port = PORT_FIBRE;
9989 break;
9990
9991 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9993 cmd->port = PORT_TP;
9994 break;
9995
c18487ee
YR
9996 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9997 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9998 bp->link_params.ext_phy_config);
9999 break;
10000
f1410647
ET
10001 default:
10002 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
10003 bp->link_params.ext_phy_config);
10004 break;
f1410647
ET
10005 }
10006 } else
a2fbb9ea 10007 cmd->port = PORT_TP;
a2fbb9ea 10008
01cd4528 10009 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
10010 cmd->transceiver = XCVR_INTERNAL;
10011
c18487ee 10012 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 10013 cmd->autoneg = AUTONEG_ENABLE;
f1410647 10014 else
a2fbb9ea 10015 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
10016
10017 cmd->maxtxpkt = 0;
10018 cmd->maxrxpkt = 0;
10019
10020 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10021 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10022 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10023 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10024 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10025 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10026 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10027
10028 return 0;
10029}
10030
10031static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10032{
10033 struct bnx2x *bp = netdev_priv(dev);
10034 u32 advertising;
10035
34f80b04
EG
10036 if (IS_E1HMF(bp))
10037 return 0;
10038
a2fbb9ea
ET
10039 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10040 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10041 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10042 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10043 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10044 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10045 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10046
a2fbb9ea 10047 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
10048 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10049 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 10050 return -EINVAL;
f1410647 10051 }
a2fbb9ea
ET
10052
10053 /* advertise the requested speed and duplex if supported */
34f80b04 10054 cmd->advertising &= bp->port.supported;
a2fbb9ea 10055
c18487ee
YR
10056 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10057 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
10058 bp->port.advertising |= (ADVERTISED_Autoneg |
10059 cmd->advertising);
a2fbb9ea
ET
10060
10061 } else { /* forced speed */
10062 /* advertise the requested speed and duplex if supported */
10063 switch (cmd->speed) {
10064 case SPEED_10:
10065 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10066 if (!(bp->port.supported &
f1410647
ET
10067 SUPPORTED_10baseT_Full)) {
10068 DP(NETIF_MSG_LINK,
10069 "10M full not supported\n");
a2fbb9ea 10070 return -EINVAL;
f1410647 10071 }
a2fbb9ea
ET
10072
10073 advertising = (ADVERTISED_10baseT_Full |
10074 ADVERTISED_TP);
10075 } else {
34f80b04 10076 if (!(bp->port.supported &
f1410647
ET
10077 SUPPORTED_10baseT_Half)) {
10078 DP(NETIF_MSG_LINK,
10079 "10M half not supported\n");
a2fbb9ea 10080 return -EINVAL;
f1410647 10081 }
a2fbb9ea
ET
10082
10083 advertising = (ADVERTISED_10baseT_Half |
10084 ADVERTISED_TP);
10085 }
10086 break;
10087
10088 case SPEED_100:
10089 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10090 if (!(bp->port.supported &
f1410647
ET
10091 SUPPORTED_100baseT_Full)) {
10092 DP(NETIF_MSG_LINK,
10093 "100M full not supported\n");
a2fbb9ea 10094 return -EINVAL;
f1410647 10095 }
a2fbb9ea
ET
10096
10097 advertising = (ADVERTISED_100baseT_Full |
10098 ADVERTISED_TP);
10099 } else {
34f80b04 10100 if (!(bp->port.supported &
f1410647
ET
10101 SUPPORTED_100baseT_Half)) {
10102 DP(NETIF_MSG_LINK,
10103 "100M half not supported\n");
a2fbb9ea 10104 return -EINVAL;
f1410647 10105 }
a2fbb9ea
ET
10106
10107 advertising = (ADVERTISED_100baseT_Half |
10108 ADVERTISED_TP);
10109 }
10110 break;
10111
10112 case SPEED_1000:
f1410647
ET
10113 if (cmd->duplex != DUPLEX_FULL) {
10114 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 10115 return -EINVAL;
f1410647 10116 }
a2fbb9ea 10117
34f80b04 10118 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 10119 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 10120 return -EINVAL;
f1410647 10121 }
a2fbb9ea
ET
10122
10123 advertising = (ADVERTISED_1000baseT_Full |
10124 ADVERTISED_TP);
10125 break;
10126
10127 case SPEED_2500:
f1410647
ET
10128 if (cmd->duplex != DUPLEX_FULL) {
10129 DP(NETIF_MSG_LINK,
10130 "2.5G half not supported\n");
a2fbb9ea 10131 return -EINVAL;
f1410647 10132 }
a2fbb9ea 10133
34f80b04 10134 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
10135 DP(NETIF_MSG_LINK,
10136 "2.5G full not supported\n");
a2fbb9ea 10137 return -EINVAL;
f1410647 10138 }
a2fbb9ea 10139
f1410647 10140 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
10141 ADVERTISED_TP);
10142 break;
10143
10144 case SPEED_10000:
f1410647
ET
10145 if (cmd->duplex != DUPLEX_FULL) {
10146 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 10147 return -EINVAL;
f1410647 10148 }
a2fbb9ea 10149
34f80b04 10150 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 10151 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 10152 return -EINVAL;
f1410647 10153 }
a2fbb9ea
ET
10154
10155 advertising = (ADVERTISED_10000baseT_Full |
10156 ADVERTISED_FIBRE);
10157 break;
10158
10159 default:
f1410647 10160 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
10161 return -EINVAL;
10162 }
10163
c18487ee
YR
10164 bp->link_params.req_line_speed = cmd->speed;
10165 bp->link_params.req_duplex = cmd->duplex;
34f80b04 10166 bp->port.advertising = advertising;
a2fbb9ea
ET
10167 }
10168
c18487ee 10169 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 10170 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 10171 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 10172 bp->port.advertising);
a2fbb9ea 10173
34f80b04 10174 if (netif_running(dev)) {
bb2a0f7a 10175 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10176 bnx2x_link_set(bp);
10177 }
a2fbb9ea
ET
10178
10179 return 0;
10180}
10181
0a64ea57
EG
10182#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10183#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10184
10185static int bnx2x_get_regs_len(struct net_device *dev)
10186{
0a64ea57 10187 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 10188 int regdump_len = 0;
0a64ea57
EG
10189 int i;
10190
0a64ea57
EG
10191 if (CHIP_IS_E1(bp)) {
10192 for (i = 0; i < REGS_COUNT; i++)
10193 if (IS_E1_ONLINE(reg_addrs[i].info))
10194 regdump_len += reg_addrs[i].size;
10195
10196 for (i = 0; i < WREGS_COUNT_E1; i++)
10197 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10198 regdump_len += wreg_addrs_e1[i].size *
10199 (1 + wreg_addrs_e1[i].read_regs_count);
10200
10201 } else { /* E1H */
10202 for (i = 0; i < REGS_COUNT; i++)
10203 if (IS_E1H_ONLINE(reg_addrs[i].info))
10204 regdump_len += reg_addrs[i].size;
10205
10206 for (i = 0; i < WREGS_COUNT_E1H; i++)
10207 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10208 regdump_len += wreg_addrs_e1h[i].size *
10209 (1 + wreg_addrs_e1h[i].read_regs_count);
10210 }
10211 regdump_len *= 4;
10212 regdump_len += sizeof(struct dump_hdr);
10213
10214 return regdump_len;
10215}
10216
10217static void bnx2x_get_regs(struct net_device *dev,
10218 struct ethtool_regs *regs, void *_p)
10219{
10220 u32 *p = _p, i, j;
10221 struct bnx2x *bp = netdev_priv(dev);
10222 struct dump_hdr dump_hdr = {0};
10223
10224 regs->version = 0;
10225 memset(p, 0, regs->len);
10226
10227 if (!netif_running(bp->dev))
10228 return;
10229
10230 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10231 dump_hdr.dump_sign = dump_sign_all;
10232 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10233 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10234 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10235 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10236 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10237
10238 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10239 p += dump_hdr.hdr_size + 1;
10240
10241 if (CHIP_IS_E1(bp)) {
10242 for (i = 0; i < REGS_COUNT; i++)
10243 if (IS_E1_ONLINE(reg_addrs[i].info))
10244 for (j = 0; j < reg_addrs[i].size; j++)
10245 *p++ = REG_RD(bp,
10246 reg_addrs[i].addr + j*4);
10247
10248 } else { /* E1H */
10249 for (i = 0; i < REGS_COUNT; i++)
10250 if (IS_E1H_ONLINE(reg_addrs[i].info))
10251 for (j = 0; j < reg_addrs[i].size; j++)
10252 *p++ = REG_RD(bp,
10253 reg_addrs[i].addr + j*4);
10254 }
10255}
10256
0d28e49a
EG
10257#define PHY_FW_VER_LEN 10
10258
10259static void bnx2x_get_drvinfo(struct net_device *dev,
10260 struct ethtool_drvinfo *info)
10261{
10262 struct bnx2x *bp = netdev_priv(dev);
10263 u8 phy_fw_ver[PHY_FW_VER_LEN];
10264
10265 strcpy(info->driver, DRV_MODULE_NAME);
10266 strcpy(info->version, DRV_MODULE_VERSION);
10267
10268 phy_fw_ver[0] = '\0';
10269 if (bp->port.pmf) {
10270 bnx2x_acquire_phy_lock(bp);
10271 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10272 (bp->state != BNX2X_STATE_CLOSED),
10273 phy_fw_ver, PHY_FW_VER_LEN);
10274 bnx2x_release_phy_lock(bp);
10275 }
10276
34f24c7f
VZ
10277 strncpy(info->fw_version, bp->fw_ver, 32);
10278 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10279 "bc %d.%d.%d%s%s",
0d28e49a
EG
10280 (bp->common.bc_ver & 0xff0000) >> 16,
10281 (bp->common.bc_ver & 0xff00) >> 8,
10282 (bp->common.bc_ver & 0xff),
34f24c7f 10283 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
0d28e49a
EG
10284 strcpy(info->bus_info, pci_name(bp->pdev));
10285 info->n_stats = BNX2X_NUM_STATS;
10286 info->testinfo_len = BNX2X_NUM_TESTS;
10287 info->eedump_len = bp->common.flash_size;
10288 info->regdump_len = bnx2x_get_regs_len(dev);
10289}
10290
a2fbb9ea
ET
10291static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10292{
10293 struct bnx2x *bp = netdev_priv(dev);
10294
10295 if (bp->flags & NO_WOL_FLAG) {
10296 wol->supported = 0;
10297 wol->wolopts = 0;
10298 } else {
10299 wol->supported = WAKE_MAGIC;
10300 if (bp->wol)
10301 wol->wolopts = WAKE_MAGIC;
10302 else
10303 wol->wolopts = 0;
10304 }
10305 memset(&wol->sopass, 0, sizeof(wol->sopass));
10306}
10307
10308static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10309{
10310 struct bnx2x *bp = netdev_priv(dev);
10311
10312 if (wol->wolopts & ~WAKE_MAGIC)
10313 return -EINVAL;
10314
10315 if (wol->wolopts & WAKE_MAGIC) {
10316 if (bp->flags & NO_WOL_FLAG)
10317 return -EINVAL;
10318
10319 bp->wol = 1;
34f80b04 10320 } else
a2fbb9ea 10321 bp->wol = 0;
34f80b04 10322
a2fbb9ea
ET
10323 return 0;
10324}
10325
10326static u32 bnx2x_get_msglevel(struct net_device *dev)
10327{
10328 struct bnx2x *bp = netdev_priv(dev);
10329
7995c64e 10330 return bp->msg_enable;
a2fbb9ea
ET
10331}
10332
10333static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10334{
10335 struct bnx2x *bp = netdev_priv(dev);
10336
10337 if (capable(CAP_NET_ADMIN))
7995c64e 10338 bp->msg_enable = level;
a2fbb9ea
ET
10339}
10340
10341static int bnx2x_nway_reset(struct net_device *dev)
10342{
10343 struct bnx2x *bp = netdev_priv(dev);
10344
34f80b04
EG
10345 if (!bp->port.pmf)
10346 return 0;
a2fbb9ea 10347
34f80b04 10348 if (netif_running(dev)) {
bb2a0f7a 10349 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10350 bnx2x_link_set(bp);
10351 }
a2fbb9ea
ET
10352
10353 return 0;
10354}
10355
ab6ad5a4 10356static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
10357{
10358 struct bnx2x *bp = netdev_priv(dev);
10359
f34d28ea
EG
10360 if (bp->flags & MF_FUNC_DIS)
10361 return 0;
10362
01e53298
NO
10363 return bp->link_vars.link_up;
10364}
10365
a2fbb9ea
ET
10366static int bnx2x_get_eeprom_len(struct net_device *dev)
10367{
10368 struct bnx2x *bp = netdev_priv(dev);
10369
34f80b04 10370 return bp->common.flash_size;
a2fbb9ea
ET
10371}
10372
10373static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10374{
34f80b04 10375 int port = BP_PORT(bp);
a2fbb9ea
ET
10376 int count, i;
10377 u32 val = 0;
10378
10379 /* adjust timeout for emulation/FPGA */
10380 count = NVRAM_TIMEOUT_COUNT;
10381 if (CHIP_REV_IS_SLOW(bp))
10382 count *= 100;
10383
10384 /* request access to nvram interface */
10385 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10386 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10387
10388 for (i = 0; i < count*10; i++) {
10389 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10390 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10391 break;
10392
10393 udelay(5);
10394 }
10395
10396 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 10397 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
10398 return -EBUSY;
10399 }
10400
10401 return 0;
10402}
10403
10404static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10405{
34f80b04 10406 int port = BP_PORT(bp);
a2fbb9ea
ET
10407 int count, i;
10408 u32 val = 0;
10409
10410 /* adjust timeout for emulation/FPGA */
10411 count = NVRAM_TIMEOUT_COUNT;
10412 if (CHIP_REV_IS_SLOW(bp))
10413 count *= 100;
10414
10415 /* relinquish nvram interface */
10416 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10417 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10418
10419 for (i = 0; i < count*10; i++) {
10420 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10421 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10422 break;
10423
10424 udelay(5);
10425 }
10426
10427 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 10428 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
10429 return -EBUSY;
10430 }
10431
10432 return 0;
10433}
10434
10435static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10436{
10437 u32 val;
10438
10439 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10440
10441 /* enable both bits, even on read */
10442 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10443 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10444 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10445}
10446
10447static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10448{
10449 u32 val;
10450
10451 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10452
10453 /* disable both bits, even after read */
10454 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10455 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10456 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10457}
10458
4781bfad 10459static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
10460 u32 cmd_flags)
10461{
f1410647 10462 int count, i, rc;
a2fbb9ea
ET
10463 u32 val;
10464
10465 /* build the command word */
10466 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10467
10468 /* need to clear DONE bit separately */
10469 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10470
10471 /* address of the NVRAM to read from */
10472 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10473 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10474
10475 /* issue a read command */
10476 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10477
10478 /* adjust timeout for emulation/FPGA */
10479 count = NVRAM_TIMEOUT_COUNT;
10480 if (CHIP_REV_IS_SLOW(bp))
10481 count *= 100;
10482
10483 /* wait for completion */
10484 *ret_val = 0;
10485 rc = -EBUSY;
10486 for (i = 0; i < count; i++) {
10487 udelay(5);
10488 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10489
10490 if (val & MCPR_NVM_COMMAND_DONE) {
10491 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
10492 /* we read nvram data in cpu order
10493 * but ethtool sees it as an array of bytes
10494 * converting to big-endian will do the work */
4781bfad 10495 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
10496 rc = 0;
10497 break;
10498 }
10499 }
10500
10501 return rc;
10502}
10503
10504static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10505 int buf_size)
10506{
10507 int rc;
10508 u32 cmd_flags;
4781bfad 10509 __be32 val;
a2fbb9ea
ET
10510
10511 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10512 DP(BNX2X_MSG_NVM,
c14423fe 10513 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10514 offset, buf_size);
10515 return -EINVAL;
10516 }
10517
34f80b04
EG
10518 if (offset + buf_size > bp->common.flash_size) {
10519 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10520 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10521 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10522 return -EINVAL;
10523 }
10524
10525 /* request access to nvram interface */
10526 rc = bnx2x_acquire_nvram_lock(bp);
10527 if (rc)
10528 return rc;
10529
10530 /* enable access to nvram interface */
10531 bnx2x_enable_nvram_access(bp);
10532
10533 /* read the first word(s) */
10534 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10535 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10536 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10537 memcpy(ret_buf, &val, 4);
10538
10539 /* advance to the next dword */
10540 offset += sizeof(u32);
10541 ret_buf += sizeof(u32);
10542 buf_size -= sizeof(u32);
10543 cmd_flags = 0;
10544 }
10545
10546 if (rc == 0) {
10547 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10548 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10549 memcpy(ret_buf, &val, 4);
10550 }
10551
10552 /* disable access to nvram interface */
10553 bnx2x_disable_nvram_access(bp);
10554 bnx2x_release_nvram_lock(bp);
10555
10556 return rc;
10557}
10558
10559static int bnx2x_get_eeprom(struct net_device *dev,
10560 struct ethtool_eeprom *eeprom, u8 *eebuf)
10561{
10562 struct bnx2x *bp = netdev_priv(dev);
10563 int rc;
10564
2add3acb
EG
10565 if (!netif_running(dev))
10566 return -EAGAIN;
10567
34f80b04 10568 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10569 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10570 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10571 eeprom->len, eeprom->len);
10572
10573 /* parameters already validated in ethtool_get_eeprom */
10574
10575 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10576
10577 return rc;
10578}
10579
10580static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10581 u32 cmd_flags)
10582{
f1410647 10583 int count, i, rc;
a2fbb9ea
ET
10584
10585 /* build the command word */
10586 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10587
10588 /* need to clear DONE bit separately */
10589 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10590
10591 /* write the data */
10592 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10593
10594 /* address of the NVRAM to write to */
10595 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10596 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10597
10598 /* issue the write command */
10599 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10600
10601 /* adjust timeout for emulation/FPGA */
10602 count = NVRAM_TIMEOUT_COUNT;
10603 if (CHIP_REV_IS_SLOW(bp))
10604 count *= 100;
10605
10606 /* wait for completion */
10607 rc = -EBUSY;
10608 for (i = 0; i < count; i++) {
10609 udelay(5);
10610 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10611 if (val & MCPR_NVM_COMMAND_DONE) {
10612 rc = 0;
10613 break;
10614 }
10615 }
10616
10617 return rc;
10618}
10619
f1410647 10620#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
10621
10622static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10623 int buf_size)
10624{
10625 int rc;
10626 u32 cmd_flags;
10627 u32 align_offset;
4781bfad 10628 __be32 val;
a2fbb9ea 10629
34f80b04
EG
10630 if (offset + buf_size > bp->common.flash_size) {
10631 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10632 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10633 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10634 return -EINVAL;
10635 }
10636
10637 /* request access to nvram interface */
10638 rc = bnx2x_acquire_nvram_lock(bp);
10639 if (rc)
10640 return rc;
10641
10642 /* enable access to nvram interface */
10643 bnx2x_enable_nvram_access(bp);
10644
10645 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10646 align_offset = (offset & ~0x03);
10647 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10648
10649 if (rc == 0) {
10650 val &= ~(0xff << BYTE_OFFSET(offset));
10651 val |= (*data_buf << BYTE_OFFSET(offset));
10652
10653 /* nvram data is returned as an array of bytes
10654 * convert it back to cpu order */
10655 val = be32_to_cpu(val);
10656
a2fbb9ea
ET
10657 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10658 cmd_flags);
10659 }
10660
10661 /* disable access to nvram interface */
10662 bnx2x_disable_nvram_access(bp);
10663 bnx2x_release_nvram_lock(bp);
10664
10665 return rc;
10666}
10667
10668static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10669 int buf_size)
10670{
10671 int rc;
10672 u32 cmd_flags;
10673 u32 val;
10674 u32 written_so_far;
10675
34f80b04 10676 if (buf_size == 1) /* ethtool */
a2fbb9ea 10677 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
10678
10679 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10680 DP(BNX2X_MSG_NVM,
c14423fe 10681 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10682 offset, buf_size);
10683 return -EINVAL;
10684 }
10685
34f80b04
EG
10686 if (offset + buf_size > bp->common.flash_size) {
10687 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10688 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10689 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10690 return -EINVAL;
10691 }
10692
10693 /* request access to nvram interface */
10694 rc = bnx2x_acquire_nvram_lock(bp);
10695 if (rc)
10696 return rc;
10697
10698 /* enable access to nvram interface */
10699 bnx2x_enable_nvram_access(bp);
10700
10701 written_so_far = 0;
10702 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10703 while ((written_so_far < buf_size) && (rc == 0)) {
10704 if (written_so_far == (buf_size - sizeof(u32)))
10705 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10706 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10707 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10708 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10709 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10710
10711 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
10712
10713 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10714
10715 /* advance to the next dword */
10716 offset += sizeof(u32);
10717 data_buf += sizeof(u32);
10718 written_so_far += sizeof(u32);
10719 cmd_flags = 0;
10720 }
10721
10722 /* disable access to nvram interface */
10723 bnx2x_disable_nvram_access(bp);
10724 bnx2x_release_nvram_lock(bp);
10725
10726 return rc;
10727}
10728
10729static int bnx2x_set_eeprom(struct net_device *dev,
10730 struct ethtool_eeprom *eeprom, u8 *eebuf)
10731{
10732 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
10733 int port = BP_PORT(bp);
10734 int rc = 0;
a2fbb9ea 10735
9f4c9583
EG
10736 if (!netif_running(dev))
10737 return -EAGAIN;
10738
34f80b04 10739 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10740 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10741 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10742 eeprom->len, eeprom->len);
10743
10744 /* parameters already validated in ethtool_set_eeprom */
10745
f57a6025
EG
10746 /* PHY eeprom can be accessed only by the PMF */
10747 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10748 !bp->port.pmf)
10749 return -EINVAL;
10750
10751 if (eeprom->magic == 0x50485950) {
10752 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10753 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 10754
f57a6025
EG
10755 bnx2x_acquire_phy_lock(bp);
10756 rc |= bnx2x_link_reset(&bp->link_params,
10757 &bp->link_vars, 0);
10758 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10759 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10760 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10761 MISC_REGISTERS_GPIO_HIGH, port);
10762 bnx2x_release_phy_lock(bp);
10763 bnx2x_link_report(bp);
10764
10765 } else if (eeprom->magic == 0x50485952) {
10766 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 10767 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 10768 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
10769 rc |= bnx2x_link_reset(&bp->link_params,
10770 &bp->link_vars, 1);
10771
10772 rc |= bnx2x_phy_init(&bp->link_params,
10773 &bp->link_vars);
4a37fb66 10774 bnx2x_release_phy_lock(bp);
f57a6025
EG
10775 bnx2x_calc_fc_adv(bp);
10776 }
10777 } else if (eeprom->magic == 0x53985943) {
10778 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10779 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10780 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10781 u8 ext_phy_addr =
659bc5c4 10782 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
10783
10784 /* DSP Remove Download Mode */
10785 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10786 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 10787
f57a6025
EG
10788 bnx2x_acquire_phy_lock(bp);
10789
10790 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10791
10792 /* wait 0.5 sec to allow it to run */
10793 msleep(500);
10794 bnx2x_ext_phy_hw_reset(bp, port);
10795 msleep(500);
10796 bnx2x_release_phy_lock(bp);
10797 }
10798 } else
c18487ee 10799 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
10800
10801 return rc;
10802}
10803
10804static int bnx2x_get_coalesce(struct net_device *dev,
10805 struct ethtool_coalesce *coal)
10806{
10807 struct bnx2x *bp = netdev_priv(dev);
10808
10809 memset(coal, 0, sizeof(struct ethtool_coalesce));
10810
10811 coal->rx_coalesce_usecs = bp->rx_ticks;
10812 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
10813
10814 return 0;
10815}
10816
10817static int bnx2x_set_coalesce(struct net_device *dev,
10818 struct ethtool_coalesce *coal)
10819{
10820 struct bnx2x *bp = netdev_priv(dev);
10821
cdaa7cb8
VZ
10822 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10823 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10824 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 10825
cdaa7cb8
VZ
10826 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10827 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10828 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 10829
34f80b04 10830 if (netif_running(dev))
a2fbb9ea
ET
10831 bnx2x_update_coalesce(bp);
10832
10833 return 0;
10834}
10835
10836static void bnx2x_get_ringparam(struct net_device *dev,
10837 struct ethtool_ringparam *ering)
10838{
10839 struct bnx2x *bp = netdev_priv(dev);
10840
10841 ering->rx_max_pending = MAX_RX_AVAIL;
10842 ering->rx_mini_max_pending = 0;
10843 ering->rx_jumbo_max_pending = 0;
10844
10845 ering->rx_pending = bp->rx_ring_size;
10846 ering->rx_mini_pending = 0;
10847 ering->rx_jumbo_pending = 0;
10848
10849 ering->tx_max_pending = MAX_TX_AVAIL;
10850 ering->tx_pending = bp->tx_ring_size;
10851}
10852
10853static int bnx2x_set_ringparam(struct net_device *dev,
10854 struct ethtool_ringparam *ering)
10855{
10856 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10857 int rc = 0;
a2fbb9ea 10858
72fd0718
VZ
10859 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10860 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10861 return -EAGAIN;
10862 }
10863
a2fbb9ea
ET
10864 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10865 (ering->tx_pending > MAX_TX_AVAIL) ||
10866 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10867 return -EINVAL;
10868
10869 bp->rx_ring_size = ering->rx_pending;
10870 bp->tx_ring_size = ering->tx_pending;
10871
34f80b04
EG
10872 if (netif_running(dev)) {
10873 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10874 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
10875 }
10876
34f80b04 10877 return rc;
a2fbb9ea
ET
10878}
10879
10880static void bnx2x_get_pauseparam(struct net_device *dev,
10881 struct ethtool_pauseparam *epause)
10882{
10883 struct bnx2x *bp = netdev_priv(dev);
10884
356e2385
EG
10885 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10886 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
10887 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10888
c0700f90
DM
10889 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10890 BNX2X_FLOW_CTRL_RX);
10891 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10892 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
10893
10894 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10895 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10896 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10897}
10898
10899static int bnx2x_set_pauseparam(struct net_device *dev,
10900 struct ethtool_pauseparam *epause)
10901{
10902 struct bnx2x *bp = netdev_priv(dev);
10903
34f80b04
EG
10904 if (IS_E1HMF(bp))
10905 return 0;
10906
a2fbb9ea
ET
10907 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10908 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10909 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10910
c0700f90 10911 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 10912
f1410647 10913 if (epause->rx_pause)
c0700f90 10914 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 10915
f1410647 10916 if (epause->tx_pause)
c0700f90 10917 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 10918
c0700f90
DM
10919 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10920 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 10921
c18487ee 10922 if (epause->autoneg) {
34f80b04 10923 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 10924 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
10925 return -EINVAL;
10926 }
a2fbb9ea 10927
c18487ee 10928 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10929 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10930 }
a2fbb9ea 10931
c18487ee
YR
10932 DP(NETIF_MSG_LINK,
10933 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10934
10935 if (netif_running(dev)) {
bb2a0f7a 10936 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10937 bnx2x_link_set(bp);
10938 }
a2fbb9ea
ET
10939
10940 return 0;
10941}
10942
df0f2343
VZ
10943static int bnx2x_set_flags(struct net_device *dev, u32 data)
10944{
10945 struct bnx2x *bp = netdev_priv(dev);
10946 int changed = 0;
10947 int rc = 0;
10948
72fd0718
VZ
10949 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10950 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10951 return -EAGAIN;
10952 }
10953
df0f2343
VZ
10954 /* TPA requires Rx CSUM offloading */
10955 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
d43a7e67
VZ
10956 if (!disable_tpa) {
10957 if (!(dev->features & NETIF_F_LRO)) {
10958 dev->features |= NETIF_F_LRO;
10959 bp->flags |= TPA_ENABLE_FLAG;
10960 changed = 1;
10961 }
10962 } else
10963 rc = -EINVAL;
df0f2343
VZ
10964 } else if (dev->features & NETIF_F_LRO) {
10965 dev->features &= ~NETIF_F_LRO;
10966 bp->flags &= ~TPA_ENABLE_FLAG;
10967 changed = 1;
10968 }
10969
10970 if (changed && netif_running(dev)) {
10971 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10972 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10973 }
10974
10975 return rc;
10976}
10977
a2fbb9ea
ET
10978static u32 bnx2x_get_rx_csum(struct net_device *dev)
10979{
10980 struct bnx2x *bp = netdev_priv(dev);
10981
10982 return bp->rx_csum;
10983}
10984
10985static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10986{
10987 struct bnx2x *bp = netdev_priv(dev);
df0f2343 10988 int rc = 0;
a2fbb9ea 10989
72fd0718
VZ
10990 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10991 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10992 return -EAGAIN;
10993 }
10994
a2fbb9ea 10995 bp->rx_csum = data;
df0f2343
VZ
10996
10997 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10998 TPA'ed packets will be discarded due to wrong TCP CSUM */
10999 if (!data) {
11000 u32 flags = ethtool_op_get_flags(dev);
11001
11002 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11003 }
11004
11005 return rc;
a2fbb9ea
ET
11006}
11007
11008static int bnx2x_set_tso(struct net_device *dev, u32 data)
11009{
755735eb 11010 if (data) {
a2fbb9ea 11011 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
11012 dev->features |= NETIF_F_TSO6;
11013 } else {
a2fbb9ea 11014 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
11015 dev->features &= ~NETIF_F_TSO6;
11016 }
11017
a2fbb9ea
ET
11018 return 0;
11019}
11020
f3c87cdd 11021static const struct {
a2fbb9ea
ET
11022 char string[ETH_GSTRING_LEN];
11023} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
11024 { "register_test (offline)" },
11025 { "memory_test (offline)" },
11026 { "loopback_test (offline)" },
11027 { "nvram_test (online)" },
11028 { "interrupt_test (online)" },
11029 { "link_test (online)" },
d3d4f495 11030 { "idle check (online)" }
a2fbb9ea
ET
11031};
11032
f3c87cdd
YG
11033static int bnx2x_test_registers(struct bnx2x *bp)
11034{
11035 int idx, i, rc = -ENODEV;
11036 u32 wr_val = 0;
9dabc424 11037 int port = BP_PORT(bp);
f3c87cdd 11038 static const struct {
cdaa7cb8
VZ
11039 u32 offset0;
11040 u32 offset1;
11041 u32 mask;
f3c87cdd
YG
11042 } reg_tbl[] = {
11043/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11044 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11045 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11046 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11047 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11048 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11049 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11050 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11051 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11052 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11053/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11054 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11055 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11056 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11057 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11058 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11059 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11060 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 11061 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
11062 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11063/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
11064 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11065 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11066 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11067 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11068 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11069 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11070 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11071 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
11072 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11073/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
11074 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11075 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11076 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11077 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11078 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11079 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11080
11081 { 0xffffffff, 0, 0x00000000 }
11082 };
11083
11084 if (!netif_running(bp->dev))
11085 return rc;
11086
11087 /* Repeat the test twice:
11088 First by writing 0x00000000, second by writing 0xffffffff */
11089 for (idx = 0; idx < 2; idx++) {
11090
11091 switch (idx) {
11092 case 0:
11093 wr_val = 0;
11094 break;
11095 case 1:
11096 wr_val = 0xffffffff;
11097 break;
11098 }
11099
11100 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11101 u32 offset, mask, save_val, val;
f3c87cdd
YG
11102
11103 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11104 mask = reg_tbl[i].mask;
11105
11106 save_val = REG_RD(bp, offset);
11107
11108 REG_WR(bp, offset, wr_val);
11109 val = REG_RD(bp, offset);
11110
11111 /* Restore the original register's value */
11112 REG_WR(bp, offset, save_val);
11113
cdaa7cb8
VZ
11114 /* verify value is as expected */
11115 if ((val & mask) != (wr_val & mask)) {
11116 DP(NETIF_MSG_PROBE,
11117 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11118 offset, val, wr_val, mask);
f3c87cdd 11119 goto test_reg_exit;
cdaa7cb8 11120 }
f3c87cdd
YG
11121 }
11122 }
11123
11124 rc = 0;
11125
11126test_reg_exit:
11127 return rc;
11128}
11129
11130static int bnx2x_test_memory(struct bnx2x *bp)
11131{
11132 int i, j, rc = -ENODEV;
11133 u32 val;
11134 static const struct {
11135 u32 offset;
11136 int size;
11137 } mem_tbl[] = {
11138 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11139 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11140 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11141 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11142 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11143 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11144 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11145
11146 { 0xffffffff, 0 }
11147 };
11148 static const struct {
11149 char *name;
11150 u32 offset;
9dabc424
YG
11151 u32 e1_mask;
11152 u32 e1h_mask;
f3c87cdd 11153 } prty_tbl[] = {
9dabc424
YG
11154 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11155 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11156 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11157 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11158 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11159 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11160
11161 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
11162 };
11163
11164 if (!netif_running(bp->dev))
11165 return rc;
11166
11167 /* Go through all the memories */
11168 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11169 for (j = 0; j < mem_tbl[i].size; j++)
11170 REG_RD(bp, mem_tbl[i].offset + j*4);
11171
11172 /* Check the parity status */
11173 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11174 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
11175 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11176 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
11177 DP(NETIF_MSG_HW,
11178 "%s is 0x%x\n", prty_tbl[i].name, val);
11179 goto test_mem_exit;
11180 }
11181 }
11182
11183 rc = 0;
11184
11185test_mem_exit:
11186 return rc;
11187}
11188
f3c87cdd
YG
11189static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11190{
11191 int cnt = 1000;
11192
11193 if (link_up)
11194 while (bnx2x_link_test(bp) && cnt--)
11195 msleep(10);
11196}
11197
11198static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11199{
11200 unsigned int pkt_size, num_pkts, i;
11201 struct sk_buff *skb;
11202 unsigned char *packet;
ca00392c 11203 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 11204 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
11205 u16 tx_start_idx, tx_idx;
11206 u16 rx_start_idx, rx_idx;
ca00392c 11207 u16 pkt_prod, bd_prod;
f3c87cdd 11208 struct sw_tx_bd *tx_buf;
ca00392c
EG
11209 struct eth_tx_start_bd *tx_start_bd;
11210 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
11211 dma_addr_t mapping;
11212 union eth_rx_cqe *cqe;
11213 u8 cqe_fp_flags;
11214 struct sw_rx_bd *rx_buf;
11215 u16 len;
11216 int rc = -ENODEV;
11217
b5bf9068
EG
11218 /* check the loopback mode */
11219 switch (loopback_mode) {
11220 case BNX2X_PHY_LOOPBACK:
11221 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11222 return -EINVAL;
11223 break;
11224 case BNX2X_MAC_LOOPBACK:
f3c87cdd 11225 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 11226 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
11227 break;
11228 default:
f3c87cdd 11229 return -EINVAL;
b5bf9068 11230 }
f3c87cdd 11231
b5bf9068
EG
11232 /* prepare the loopback packet */
11233 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11234 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
11235 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11236 if (!skb) {
11237 rc = -ENOMEM;
11238 goto test_loopback_exit;
11239 }
11240 packet = skb_put(skb, pkt_size);
11241 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
11242 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11243 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
11244 for (i = ETH_HLEN; i < pkt_size; i++)
11245 packet[i] = (unsigned char) (i & 0xff);
11246
b5bf9068 11247 /* send the loopback packet */
f3c87cdd 11248 num_pkts = 0;
ca00392c
EG
11249 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11250 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 11251
ca00392c
EG
11252 pkt_prod = fp_tx->tx_pkt_prod++;
11253 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11254 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 11255 tx_buf->skb = skb;
ca00392c 11256 tx_buf->flags = 0;
f3c87cdd 11257
ca00392c
EG
11258 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11259 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
1a983142
FT
11260 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11261 skb_headlen(skb), DMA_TO_DEVICE);
ca00392c
EG
11262 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11263 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11264 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11265 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11266 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11267 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11268 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11269 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11270
11271 /* turn on parsing and get a BD */
11272 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11273 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11274
11275 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 11276
58f4c4cf
EG
11277 wmb();
11278
ca00392c
EG
11279 fp_tx->tx_db.data.prod += 2;
11280 barrier();
54b9ddaa 11281 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
11282
11283 mmiowb();
11284
11285 num_pkts++;
ca00392c 11286 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
11287
11288 udelay(100);
11289
ca00392c 11290 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
11291 if (tx_idx != tx_start_idx + num_pkts)
11292 goto test_loopback_exit;
11293
ca00392c 11294 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
11295 if (rx_idx != rx_start_idx + num_pkts)
11296 goto test_loopback_exit;
11297
ca00392c 11298 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
11299 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11300 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11301 goto test_loopback_rx_exit;
11302
11303 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11304 if (len != pkt_size)
11305 goto test_loopback_rx_exit;
11306
ca00392c 11307 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
11308 skb = rx_buf->skb;
11309 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11310 for (i = ETH_HLEN; i < pkt_size; i++)
11311 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11312 goto test_loopback_rx_exit;
11313
11314 rc = 0;
11315
11316test_loopback_rx_exit:
f3c87cdd 11317
ca00392c
EG
11318 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11319 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11320 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11321 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
11322
11323 /* Update producers */
ca00392c
EG
11324 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11325 fp_rx->rx_sge_prod);
f3c87cdd
YG
11326
11327test_loopback_exit:
11328 bp->link_params.loopback_mode = LOOPBACK_NONE;
11329
11330 return rc;
11331}
11332
11333static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11334{
b5bf9068 11335 int rc = 0, res;
f3c87cdd 11336
2145a920
VZ
11337 if (BP_NOMCP(bp))
11338 return rc;
11339
f3c87cdd
YG
11340 if (!netif_running(bp->dev))
11341 return BNX2X_LOOPBACK_FAILED;
11342
f8ef6e44 11343 bnx2x_netif_stop(bp, 1);
3910c8ae 11344 bnx2x_acquire_phy_lock(bp);
f3c87cdd 11345
b5bf9068
EG
11346 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11347 if (res) {
11348 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11349 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
11350 }
11351
b5bf9068
EG
11352 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11353 if (res) {
11354 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11355 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
11356 }
11357
3910c8ae 11358 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
11359 bnx2x_netif_start(bp);
11360
11361 return rc;
11362}
11363
11364#define CRC32_RESIDUAL 0xdebb20e3
11365
11366static int bnx2x_test_nvram(struct bnx2x *bp)
11367{
11368 static const struct {
11369 int offset;
11370 int size;
11371 } nvram_tbl[] = {
11372 { 0, 0x14 }, /* bootstrap */
11373 { 0x14, 0xec }, /* dir */
11374 { 0x100, 0x350 }, /* manuf_info */
11375 { 0x450, 0xf0 }, /* feature_info */
11376 { 0x640, 0x64 }, /* upgrade_key_info */
11377 { 0x6a4, 0x64 },
11378 { 0x708, 0x70 }, /* manuf_key_info */
11379 { 0x778, 0x70 },
11380 { 0, 0 }
11381 };
4781bfad 11382 __be32 buf[0x350 / 4];
f3c87cdd
YG
11383 u8 *data = (u8 *)buf;
11384 int i, rc;
ab6ad5a4 11385 u32 magic, crc;
f3c87cdd 11386
2145a920
VZ
11387 if (BP_NOMCP(bp))
11388 return 0;
11389
f3c87cdd
YG
11390 rc = bnx2x_nvram_read(bp, 0, data, 4);
11391 if (rc) {
f5372251 11392 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
11393 goto test_nvram_exit;
11394 }
11395
11396 magic = be32_to_cpu(buf[0]);
11397 if (magic != 0x669955aa) {
11398 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11399 rc = -ENODEV;
11400 goto test_nvram_exit;
11401 }
11402
11403 for (i = 0; nvram_tbl[i].size; i++) {
11404
11405 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11406 nvram_tbl[i].size);
11407 if (rc) {
11408 DP(NETIF_MSG_PROBE,
f5372251 11409 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
11410 goto test_nvram_exit;
11411 }
11412
ab6ad5a4
EG
11413 crc = ether_crc_le(nvram_tbl[i].size, data);
11414 if (crc != CRC32_RESIDUAL) {
f3c87cdd 11415 DP(NETIF_MSG_PROBE,
ab6ad5a4 11416 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
11417 rc = -ENODEV;
11418 goto test_nvram_exit;
11419 }
11420 }
11421
11422test_nvram_exit:
11423 return rc;
11424}
11425
11426static int bnx2x_test_intr(struct bnx2x *bp)
11427{
11428 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11429 int i, rc;
11430
11431 if (!netif_running(bp->dev))
11432 return -ENODEV;
11433
8d9c5f34 11434 config->hdr.length = 0;
af246401 11435 if (CHIP_IS_E1(bp))
0c43f43f
VZ
11436 /* use last unicast entries */
11437 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
af246401
EG
11438 else
11439 config->hdr.offset = BP_FUNC(bp);
0626b899 11440 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
11441 config->hdr.reserved1 = 0;
11442
e665bfda
MC
11443 bp->set_mac_pending++;
11444 smp_wmb();
f3c87cdd
YG
11445 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11446 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11447 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11448 if (rc == 0) {
f3c87cdd
YG
11449 for (i = 0; i < 10; i++) {
11450 if (!bp->set_mac_pending)
11451 break;
e665bfda 11452 smp_rmb();
f3c87cdd
YG
11453 msleep_interruptible(10);
11454 }
11455 if (i == 10)
11456 rc = -ENODEV;
11457 }
11458
11459 return rc;
11460}
11461
a2fbb9ea
ET
11462static void bnx2x_self_test(struct net_device *dev,
11463 struct ethtool_test *etest, u64 *buf)
11464{
11465 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea 11466
72fd0718
VZ
11467 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11468 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11469 etest->flags |= ETH_TEST_FL_FAILED;
11470 return;
11471 }
11472
a2fbb9ea
ET
11473 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11474
f3c87cdd 11475 if (!netif_running(dev))
a2fbb9ea 11476 return;
a2fbb9ea 11477
33471629 11478 /* offline tests are not supported in MF mode */
f3c87cdd
YG
11479 if (IS_E1HMF(bp))
11480 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11481
11482 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
11483 int port = BP_PORT(bp);
11484 u32 val;
f3c87cdd
YG
11485 u8 link_up;
11486
279abdf5
EG
11487 /* save current value of input enable for TX port IF */
11488 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11489 /* disable input for TX port IF */
11490 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11491
061bc702 11492 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
11493 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11494 bnx2x_nic_load(bp, LOAD_DIAG);
11495 /* wait until link state is restored */
11496 bnx2x_wait_for_link(bp, link_up);
11497
11498 if (bnx2x_test_registers(bp) != 0) {
11499 buf[0] = 1;
11500 etest->flags |= ETH_TEST_FL_FAILED;
11501 }
11502 if (bnx2x_test_memory(bp) != 0) {
11503 buf[1] = 1;
11504 etest->flags |= ETH_TEST_FL_FAILED;
11505 }
11506 buf[2] = bnx2x_test_loopback(bp, link_up);
11507 if (buf[2] != 0)
11508 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 11509
f3c87cdd 11510 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
11511
11512 /* restore input for TX port IF */
11513 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11514
f3c87cdd
YG
11515 bnx2x_nic_load(bp, LOAD_NORMAL);
11516 /* wait until link state is restored */
11517 bnx2x_wait_for_link(bp, link_up);
11518 }
11519 if (bnx2x_test_nvram(bp) != 0) {
11520 buf[3] = 1;
a2fbb9ea
ET
11521 etest->flags |= ETH_TEST_FL_FAILED;
11522 }
f3c87cdd
YG
11523 if (bnx2x_test_intr(bp) != 0) {
11524 buf[4] = 1;
11525 etest->flags |= ETH_TEST_FL_FAILED;
11526 }
11527 if (bp->port.pmf)
11528 if (bnx2x_link_test(bp) != 0) {
11529 buf[5] = 1;
11530 etest->flags |= ETH_TEST_FL_FAILED;
11531 }
f3c87cdd
YG
11532
11533#ifdef BNX2X_EXTRA_DEBUG
11534 bnx2x_panic_dump(bp);
11535#endif
a2fbb9ea
ET
11536}
11537
de832a55
EG
11538static const struct {
11539 long offset;
11540 int size;
11541 u8 string[ETH_GSTRING_LEN];
11542} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11543/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11544 { Q_STATS_OFFSET32(error_bytes_received_hi),
11545 8, "[%d]: rx_error_bytes" },
11546 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11547 8, "[%d]: rx_ucast_packets" },
11548 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11549 8, "[%d]: rx_mcast_packets" },
11550 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11551 8, "[%d]: rx_bcast_packets" },
11552 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11553 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11554 4, "[%d]: rx_phy_ip_err_discards"},
11555 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11556 4, "[%d]: rx_skb_alloc_discard" },
11557 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11558
11559/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11560 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11561 8, "[%d]: tx_packets" }
11562};
11563
bb2a0f7a
YG
11564static const struct {
11565 long offset;
11566 int size;
11567 u32 flags;
66e855f3
YG
11568#define STATS_FLAGS_PORT 1
11569#define STATS_FLAGS_FUNC 2
de832a55 11570#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 11571 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 11572} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
11573/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11574 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 11575 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 11576 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 11577 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 11578 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 11579 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 11580 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 11581 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 11582 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 11583 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 11584 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 11585 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 11586 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
11587 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11588 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11589 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11590 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11591/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11592 8, STATS_FLAGS_PORT, "rx_fragments" },
11593 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11594 8, STATS_FLAGS_PORT, "rx_jabbers" },
11595 { STATS_OFFSET32(no_buff_discard_hi),
11596 8, STATS_FLAGS_BOTH, "rx_discards" },
11597 { STATS_OFFSET32(mac_filter_discard),
11598 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11599 { STATS_OFFSET32(xxoverflow_discard),
11600 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11601 { STATS_OFFSET32(brb_drop_hi),
11602 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11603 { STATS_OFFSET32(brb_truncate_hi),
11604 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11605 { STATS_OFFSET32(pause_frames_received_hi),
11606 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11607 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11608 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11609 { STATS_OFFSET32(nig_timer_max),
11610 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11611/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11612 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11613 { STATS_OFFSET32(rx_skb_alloc_failed),
11614 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11615 { STATS_OFFSET32(hw_csum_err),
11616 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11617
11618 { STATS_OFFSET32(total_bytes_transmitted_hi),
11619 8, STATS_FLAGS_BOTH, "tx_bytes" },
11620 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11621 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11622 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11623 8, STATS_FLAGS_BOTH, "tx_packets" },
11624 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11625 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11626 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11627 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 11628 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 11629 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 11630 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 11631 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 11632/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 11633 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 11634 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 11635 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 11636 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 11637 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 11638 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 11639 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 11640 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 11641 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 11642 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 11643 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 11644 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 11645 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 11646 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 11647 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 11648 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 11649 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 11650 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 11651 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 11652/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 11653 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
11654 { STATS_OFFSET32(pause_frames_sent_hi),
11655 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
11656};
11657
de832a55
EG
11658#define IS_PORT_STAT(i) \
11659 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11660#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11661#define IS_E1HMF_MODE_STAT(bp) \
7995c64e 11662 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
66e855f3 11663
15f0a394
BH
11664static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11665{
11666 struct bnx2x *bp = netdev_priv(dev);
11667 int i, num_stats;
11668
cdaa7cb8 11669 switch (stringset) {
15f0a394
BH
11670 case ETH_SS_STATS:
11671 if (is_multi(bp)) {
54b9ddaa 11672 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
11673 if (!IS_E1HMF_MODE_STAT(bp))
11674 num_stats += BNX2X_NUM_STATS;
11675 } else {
11676 if (IS_E1HMF_MODE_STAT(bp)) {
11677 num_stats = 0;
11678 for (i = 0; i < BNX2X_NUM_STATS; i++)
11679 if (IS_FUNC_STAT(i))
11680 num_stats++;
11681 } else
11682 num_stats = BNX2X_NUM_STATS;
11683 }
11684 return num_stats;
11685
11686 case ETH_SS_TEST:
11687 return BNX2X_NUM_TESTS;
11688
11689 default:
11690 return -EINVAL;
11691 }
11692}
11693
a2fbb9ea
ET
11694static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11695{
bb2a0f7a 11696 struct bnx2x *bp = netdev_priv(dev);
de832a55 11697 int i, j, k;
bb2a0f7a 11698
a2fbb9ea
ET
11699 switch (stringset) {
11700 case ETH_SS_STATS:
de832a55
EG
11701 if (is_multi(bp)) {
11702 k = 0;
54b9ddaa 11703 for_each_queue(bp, i) {
de832a55
EG
11704 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11705 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11706 bnx2x_q_stats_arr[j].string, i);
11707 k += BNX2X_NUM_Q_STATS;
11708 }
11709 if (IS_E1HMF_MODE_STAT(bp))
11710 break;
11711 for (j = 0; j < BNX2X_NUM_STATS; j++)
11712 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11713 bnx2x_stats_arr[j].string);
11714 } else {
11715 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11716 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11717 continue;
11718 strcpy(buf + j*ETH_GSTRING_LEN,
11719 bnx2x_stats_arr[i].string);
11720 j++;
11721 }
bb2a0f7a 11722 }
a2fbb9ea
ET
11723 break;
11724
11725 case ETH_SS_TEST:
11726 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11727 break;
11728 }
11729}
11730
a2fbb9ea
ET
11731static void bnx2x_get_ethtool_stats(struct net_device *dev,
11732 struct ethtool_stats *stats, u64 *buf)
11733{
11734 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
11735 u32 *hw_stats, *offset;
11736 int i, j, k;
bb2a0f7a 11737
de832a55
EG
11738 if (is_multi(bp)) {
11739 k = 0;
54b9ddaa 11740 for_each_queue(bp, i) {
de832a55
EG
11741 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11742 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11743 if (bnx2x_q_stats_arr[j].size == 0) {
11744 /* skip this counter */
11745 buf[k + j] = 0;
11746 continue;
11747 }
11748 offset = (hw_stats +
11749 bnx2x_q_stats_arr[j].offset);
11750 if (bnx2x_q_stats_arr[j].size == 4) {
11751 /* 4-byte counter */
11752 buf[k + j] = (u64) *offset;
11753 continue;
11754 }
11755 /* 8-byte counter */
11756 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11757 }
11758 k += BNX2X_NUM_Q_STATS;
11759 }
11760 if (IS_E1HMF_MODE_STAT(bp))
11761 return;
11762 hw_stats = (u32 *)&bp->eth_stats;
11763 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11764 if (bnx2x_stats_arr[j].size == 0) {
11765 /* skip this counter */
11766 buf[k + j] = 0;
11767 continue;
11768 }
11769 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11770 if (bnx2x_stats_arr[j].size == 4) {
11771 /* 4-byte counter */
11772 buf[k + j] = (u64) *offset;
11773 continue;
11774 }
11775 /* 8-byte counter */
11776 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 11777 }
de832a55
EG
11778 } else {
11779 hw_stats = (u32 *)&bp->eth_stats;
11780 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11781 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11782 continue;
11783 if (bnx2x_stats_arr[i].size == 0) {
11784 /* skip this counter */
11785 buf[j] = 0;
11786 j++;
11787 continue;
11788 }
11789 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11790 if (bnx2x_stats_arr[i].size == 4) {
11791 /* 4-byte counter */
11792 buf[j] = (u64) *offset;
11793 j++;
11794 continue;
11795 }
11796 /* 8-byte counter */
11797 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 11798 j++;
a2fbb9ea 11799 }
a2fbb9ea
ET
11800 }
11801}
11802
11803static int bnx2x_phys_id(struct net_device *dev, u32 data)
11804{
11805 struct bnx2x *bp = netdev_priv(dev);
11806 int i;
11807
34f80b04
EG
11808 if (!netif_running(dev))
11809 return 0;
11810
11811 if (!bp->port.pmf)
11812 return 0;
11813
a2fbb9ea
ET
11814 if (data == 0)
11815 data = 2;
11816
11817 for (i = 0; i < (data * 2); i++) {
c18487ee 11818 if ((i % 2) == 0)
7846e471
YR
11819 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11820 SPEED_1000);
c18487ee 11821 else
7846e471 11822 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 11823
a2fbb9ea
ET
11824 msleep_interruptible(500);
11825 if (signal_pending(current))
11826 break;
11827 }
11828
c18487ee 11829 if (bp->link_vars.link_up)
7846e471
YR
11830 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11831 bp->link_vars.line_speed);
a2fbb9ea
ET
11832
11833 return 0;
11834}
11835
0fc0b732 11836static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
11837 .get_settings = bnx2x_get_settings,
11838 .set_settings = bnx2x_set_settings,
11839 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
11840 .get_regs_len = bnx2x_get_regs_len,
11841 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
11842 .get_wol = bnx2x_get_wol,
11843 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
11844 .get_msglevel = bnx2x_get_msglevel,
11845 .set_msglevel = bnx2x_set_msglevel,
11846 .nway_reset = bnx2x_nway_reset,
01e53298 11847 .get_link = bnx2x_get_link,
7a9b2557
VZ
11848 .get_eeprom_len = bnx2x_get_eeprom_len,
11849 .get_eeprom = bnx2x_get_eeprom,
11850 .set_eeprom = bnx2x_set_eeprom,
11851 .get_coalesce = bnx2x_get_coalesce,
11852 .set_coalesce = bnx2x_set_coalesce,
11853 .get_ringparam = bnx2x_get_ringparam,
11854 .set_ringparam = bnx2x_set_ringparam,
11855 .get_pauseparam = bnx2x_get_pauseparam,
11856 .set_pauseparam = bnx2x_set_pauseparam,
11857 .get_rx_csum = bnx2x_get_rx_csum,
11858 .set_rx_csum = bnx2x_set_rx_csum,
11859 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 11860 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
11861 .set_flags = bnx2x_set_flags,
11862 .get_flags = ethtool_op_get_flags,
11863 .get_sg = ethtool_op_get_sg,
11864 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
11865 .get_tso = ethtool_op_get_tso,
11866 .set_tso = bnx2x_set_tso,
7a9b2557 11867 .self_test = bnx2x_self_test,
15f0a394 11868 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 11869 .get_strings = bnx2x_get_strings,
a2fbb9ea 11870 .phys_id = bnx2x_phys_id,
bb2a0f7a 11871 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
11872};
11873
11874/* end of ethtool_ops */
11875
11876/****************************************************************************
11877* General service functions
11878****************************************************************************/
11879
11880static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11881{
11882 u16 pmcsr;
11883
11884 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11885
11886 switch (state) {
11887 case PCI_D0:
34f80b04 11888 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
11889 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11890 PCI_PM_CTRL_PME_STATUS));
11891
11892 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 11893 /* delay required during transition out of D3hot */
a2fbb9ea 11894 msleep(20);
34f80b04 11895 break;
a2fbb9ea 11896
34f80b04
EG
11897 case PCI_D3hot:
11898 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11899 pmcsr |= 3;
a2fbb9ea 11900
34f80b04
EG
11901 if (bp->wol)
11902 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 11903
34f80b04
EG
11904 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11905 pmcsr);
a2fbb9ea 11906
34f80b04
EG
11907 /* No more memory access after this point until
11908 * device is brought back to D0.
11909 */
11910 break;
11911
11912 default:
11913 return -EINVAL;
11914 }
11915 return 0;
a2fbb9ea
ET
11916}
11917
237907c1
EG
11918static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11919{
11920 u16 rx_cons_sb;
11921
11922 /* Tell compiler that status block fields can change */
11923 barrier();
11924 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11925 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11926 rx_cons_sb++;
11927 return (fp->rx_comp_cons != rx_cons_sb);
11928}
11929
34f80b04
EG
11930/*
11931 * net_device service functions
11932 */
11933
a2fbb9ea
ET
11934static int bnx2x_poll(struct napi_struct *napi, int budget)
11935{
54b9ddaa 11936 int work_done = 0;
a2fbb9ea
ET
11937 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11938 napi);
11939 struct bnx2x *bp = fp->bp;
a2fbb9ea 11940
54b9ddaa 11941 while (1) {
a2fbb9ea 11942#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
11943 if (unlikely(bp->panic)) {
11944 napi_complete(napi);
11945 return 0;
11946 }
a2fbb9ea
ET
11947#endif
11948
54b9ddaa
VZ
11949 if (bnx2x_has_tx_work(fp))
11950 bnx2x_tx_int(fp);
356e2385 11951
54b9ddaa
VZ
11952 if (bnx2x_has_rx_work(fp)) {
11953 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 11954
54b9ddaa
VZ
11955 /* must not complete if we consumed full budget */
11956 if (work_done >= budget)
11957 break;
11958 }
a2fbb9ea 11959
54b9ddaa
VZ
11960 /* Fall out from the NAPI loop if needed */
11961 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11962 bnx2x_update_fpsb_idx(fp);
11963 /* bnx2x_has_rx_work() reads the status block, thus we need
11964 * to ensure that status block indices have been actually read
11965 * (bnx2x_update_fpsb_idx) prior to this check
11966 * (bnx2x_has_rx_work) so that we won't write the "newer"
11967 * value of the status block to IGU (if there was a DMA right
11968 * after bnx2x_has_rx_work and if there is no rmb, the memory
11969 * reading (bnx2x_update_fpsb_idx) may be postponed to right
11970 * before bnx2x_ack_sb). In this case there will never be
11971 * another interrupt until there is another update of the
11972 * status block, while there is still unhandled work.
11973 */
11974 rmb();
a2fbb9ea 11975
54b9ddaa
VZ
11976 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11977 napi_complete(napi);
11978 /* Re-enable interrupts */
11979 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11980 le16_to_cpu(fp->fp_c_idx),
11981 IGU_INT_NOP, 1);
11982 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11983 le16_to_cpu(fp->fp_u_idx),
11984 IGU_INT_ENABLE, 1);
11985 break;
11986 }
11987 }
a2fbb9ea 11988 }
356e2385 11989
a2fbb9ea
ET
11990 return work_done;
11991}
11992
755735eb
EG
11993
11994/* we split the first BD into headers and data BDs
33471629 11995 * to ease the pain of our fellow microcode engineers
755735eb
EG
11996 * we use one mapping for both BDs
11997 * So far this has only been observed to happen
11998 * in Other Operating Systems(TM)
11999 */
12000static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12001 struct bnx2x_fastpath *fp,
ca00392c
EG
12002 struct sw_tx_bd *tx_buf,
12003 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
12004 u16 bd_prod, int nbd)
12005{
ca00392c 12006 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
12007 struct eth_tx_bd *d_tx_bd;
12008 dma_addr_t mapping;
12009 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12010
12011 /* first fix first BD */
12012 h_tx_bd->nbd = cpu_to_le16(nbd);
12013 h_tx_bd->nbytes = cpu_to_le16(hlen);
12014
12015 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12016 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12017 h_tx_bd->addr_lo, h_tx_bd->nbd);
12018
12019 /* now get a new data BD
12020 * (after the pbd) and fill it */
12021 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 12022 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
12023
12024 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12025 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12026
12027 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12028 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12029 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
12030
12031 /* this marks the BD as one that has no individual mapping */
12032 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12033
755735eb
EG
12034 DP(NETIF_MSG_TX_QUEUED,
12035 "TSO split data size is %d (%x:%x)\n",
12036 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12037
ca00392c
EG
12038 /* update tx_bd */
12039 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
12040
12041 return bd_prod;
12042}
12043
12044static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12045{
12046 if (fix > 0)
12047 csum = (u16) ~csum_fold(csum_sub(csum,
12048 csum_partial(t_header - fix, fix, 0)));
12049
12050 else if (fix < 0)
12051 csum = (u16) ~csum_fold(csum_add(csum,
12052 csum_partial(t_header, -fix, 0)));
12053
12054 return swab16(csum);
12055}
12056
12057static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12058{
12059 u32 rc;
12060
12061 if (skb->ip_summed != CHECKSUM_PARTIAL)
12062 rc = XMIT_PLAIN;
12063
12064 else {
4781bfad 12065 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
12066 rc = XMIT_CSUM_V6;
12067 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12068 rc |= XMIT_CSUM_TCP;
12069
12070 } else {
12071 rc = XMIT_CSUM_V4;
12072 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12073 rc |= XMIT_CSUM_TCP;
12074 }
12075 }
12076
12077 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 12078 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
12079
12080 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 12081 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
12082
12083 return rc;
12084}
12085
632da4d6 12086#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12087/* check if packet requires linearization (packet is too fragmented)
12088 no need to check fragmentation if page size > 8K (there will be no
12089 violation to FW restrictions) */
755735eb
EG
12090static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12091 u32 xmit_type)
12092{
12093 int to_copy = 0;
12094 int hlen = 0;
12095 int first_bd_sz = 0;
12096
12097 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12098 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12099
12100 if (xmit_type & XMIT_GSO) {
12101 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12102 /* Check if LSO packet needs to be copied:
12103 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12104 int wnd_size = MAX_FETCH_BD - 3;
33471629 12105 /* Number of windows to check */
755735eb
EG
12106 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12107 int wnd_idx = 0;
12108 int frag_idx = 0;
12109 u32 wnd_sum = 0;
12110
12111 /* Headers length */
12112 hlen = (int)(skb_transport_header(skb) - skb->data) +
12113 tcp_hdrlen(skb);
12114
12115 /* Amount of data (w/o headers) on linear part of SKB*/
12116 first_bd_sz = skb_headlen(skb) - hlen;
12117
12118 wnd_sum = first_bd_sz;
12119
12120 /* Calculate the first sum - it's special */
12121 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12122 wnd_sum +=
12123 skb_shinfo(skb)->frags[frag_idx].size;
12124
12125 /* If there was data on linear skb data - check it */
12126 if (first_bd_sz > 0) {
12127 if (unlikely(wnd_sum < lso_mss)) {
12128 to_copy = 1;
12129 goto exit_lbl;
12130 }
12131
12132 wnd_sum -= first_bd_sz;
12133 }
12134
12135 /* Others are easier: run through the frag list and
12136 check all windows */
12137 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12138 wnd_sum +=
12139 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12140
12141 if (unlikely(wnd_sum < lso_mss)) {
12142 to_copy = 1;
12143 break;
12144 }
12145 wnd_sum -=
12146 skb_shinfo(skb)->frags[wnd_idx].size;
12147 }
755735eb
EG
12148 } else {
12149 /* in non-LSO too fragmented packet should always
12150 be linearized */
12151 to_copy = 1;
12152 }
12153 }
12154
12155exit_lbl:
12156 if (unlikely(to_copy))
12157 DP(NETIF_MSG_TX_QUEUED,
12158 "Linearization IS REQUIRED for %s packet. "
12159 "num_frags %d hlen %d first_bd_sz %d\n",
12160 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12161 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12162
12163 return to_copy;
12164}
632da4d6 12165#endif
755735eb
EG
12166
12167/* called with netif_tx_lock
a2fbb9ea 12168 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 12169 * netif_wake_queue()
a2fbb9ea 12170 */
61357325 12171static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
12172{
12173 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 12174 struct bnx2x_fastpath *fp;
555f6c78 12175 struct netdev_queue *txq;
a2fbb9ea 12176 struct sw_tx_bd *tx_buf;
ca00392c
EG
12177 struct eth_tx_start_bd *tx_start_bd;
12178 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
12179 struct eth_tx_parse_bd *pbd = NULL;
12180 u16 pkt_prod, bd_prod;
755735eb 12181 int nbd, fp_index;
a2fbb9ea 12182 dma_addr_t mapping;
755735eb 12183 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
12184 int i;
12185 u8 hlen = 0;
ca00392c 12186 __le16 pkt_size = 0;
a2fbb9ea
ET
12187
12188#ifdef BNX2X_STOP_ON_ERROR
12189 if (unlikely(bp->panic))
12190 return NETDEV_TX_BUSY;
12191#endif
12192
555f6c78
EG
12193 fp_index = skb_get_queue_mapping(skb);
12194 txq = netdev_get_tx_queue(dev, fp_index);
12195
54b9ddaa 12196 fp = &bp->fp[fp_index];
755735eb 12197
231fd58a 12198 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 12199 fp->eth_q_stats.driver_xoff++;
555f6c78 12200 netif_tx_stop_queue(txq);
a2fbb9ea
ET
12201 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12202 return NETDEV_TX_BUSY;
12203 }
12204
755735eb
EG
12205 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12206 " gso type %x xmit_type %x\n",
12207 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12208 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12209
632da4d6 12210#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12211 /* First, check if we need to linearize the skb (due to FW
12212 restrictions). No need to check fragmentation if page size > 8K
12213 (there will be no violation to FW restrictions) */
755735eb
EG
12214 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12215 /* Statistics of linearization */
12216 bp->lin_cnt++;
12217 if (skb_linearize(skb) != 0) {
12218 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12219 "silently dropping this SKB\n");
12220 dev_kfree_skb_any(skb);
da5a662a 12221 return NETDEV_TX_OK;
755735eb
EG
12222 }
12223 }
632da4d6 12224#endif
755735eb 12225
a2fbb9ea 12226 /*
755735eb 12227 Please read carefully. First we use one BD which we mark as start,
ca00392c 12228 then we have a parsing info BD (used for TSO or xsum),
755735eb 12229 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
12230 (don't forget to mark the last one as last,
12231 and to unmap only AFTER you write to the BD ...)
755735eb 12232 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
12233 */
12234
12235 pkt_prod = fp->tx_pkt_prod++;
755735eb 12236 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 12237
755735eb 12238 /* get a tx_buf and first BD */
a2fbb9ea 12239 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 12240 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 12241
ca00392c
EG
12242 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12243 tx_start_bd->general_data = (UNICAST_ADDRESS <<
12244 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 12245 /* header nbd */
ca00392c 12246 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 12247
755735eb
EG
12248 /* remember the first BD of the packet */
12249 tx_buf->first_bd = fp->tx_bd_prod;
12250 tx_buf->skb = skb;
ca00392c 12251 tx_buf->flags = 0;
a2fbb9ea
ET
12252
12253 DP(NETIF_MSG_TX_QUEUED,
12254 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 12255 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 12256
0c6671b0
EG
12257#ifdef BCM_VLAN
12258 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12259 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
12260 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12261 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 12262 } else
0c6671b0 12263#endif
ca00392c 12264 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 12265
ca00392c
EG
12266 /* turn on parsing and get a BD */
12267 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12268 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 12269
ca00392c 12270 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
12271
12272 if (xmit_type & XMIT_CSUM) {
ca00392c 12273 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
12274
12275 /* for now NS flag is not used in Linux */
4781bfad
EG
12276 pbd->global_data =
12277 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12278 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 12279
755735eb
EG
12280 pbd->ip_hlen = (skb_transport_header(skb) -
12281 skb_network_header(skb)) / 2;
12282
12283 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 12284
755735eb 12285 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 12286 hlen = hlen*2;
a2fbb9ea 12287
ca00392c 12288 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
12289
12290 if (xmit_type & XMIT_CSUM_V4)
ca00392c 12291 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
12292 ETH_TX_BD_FLAGS_IP_CSUM;
12293 else
ca00392c
EG
12294 tx_start_bd->bd_flags.as_bitfield |=
12295 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
12296
12297 if (xmit_type & XMIT_CSUM_TCP) {
12298 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12299
12300 } else {
12301 s8 fix = SKB_CS_OFF(skb); /* signed! */
12302
ca00392c 12303 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 12304
755735eb 12305 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12306 "hlen %d fix %d csum before fix %x\n",
12307 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
12308
12309 /* HW bug: fixup the CSUM */
12310 pbd->tcp_pseudo_csum =
12311 bnx2x_csum_fix(skb_transport_header(skb),
12312 SKB_CS(skb), fix);
12313
12314 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12315 pbd->tcp_pseudo_csum);
12316 }
a2fbb9ea
ET
12317 }
12318
1a983142
FT
12319 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12320 skb_headlen(skb), DMA_TO_DEVICE);
a2fbb9ea 12321
ca00392c
EG
12322 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12323 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12324 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12325 tx_start_bd->nbd = cpu_to_le16(nbd);
12326 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12327 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
12328
12329 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 12330 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
12331 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12332 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12333 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 12334
755735eb 12335 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
12336
12337 DP(NETIF_MSG_TX_QUEUED,
12338 "TSO packet len %d hlen %d total len %d tso size %d\n",
12339 skb->len, hlen, skb_headlen(skb),
12340 skb_shinfo(skb)->gso_size);
12341
ca00392c 12342 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 12343
755735eb 12344 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
12345 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12346 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
12347
12348 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12349 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
12350 pbd->tcp_flags = pbd_tcp_flags(skb);
12351
12352 if (xmit_type & XMIT_GSO_V4) {
12353 pbd->ip_id = swab16(ip_hdr(skb)->id);
12354 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
12355 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12356 ip_hdr(skb)->daddr,
12357 0, IPPROTO_TCP, 0));
755735eb
EG
12358
12359 } else
12360 pbd->tcp_pseudo_csum =
12361 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12362 &ipv6_hdr(skb)->daddr,
12363 0, IPPROTO_TCP, 0));
12364
a2fbb9ea
ET
12365 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12366 }
ca00392c 12367 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 12368
755735eb
EG
12369 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12370 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 12371
755735eb 12372 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
12373 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12374 if (total_pkt_bd == NULL)
12375 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 12376
1a983142
FT
12377 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12378 frag->page_offset,
12379 frag->size, DMA_TO_DEVICE);
a2fbb9ea 12380
ca00392c
EG
12381 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12382 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12383 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12384 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 12385
755735eb 12386 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12387 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12388 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12389 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
12390 }
12391
ca00392c 12392 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 12393
a2fbb9ea
ET
12394 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12395
755735eb 12396 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
12397 * if the packet contains or ends with it
12398 */
12399 if (TX_BD_POFF(bd_prod) < nbd)
12400 nbd++;
12401
ca00392c
EG
12402 if (total_pkt_bd != NULL)
12403 total_pkt_bd->total_pkt_bytes = pkt_size;
12404
a2fbb9ea
ET
12405 if (pbd)
12406 DP(NETIF_MSG_TX_QUEUED,
12407 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12408 " tcp_flags %x xsum %x seq %u hlen %u\n",
12409 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12410 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 12411 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 12412
755735eb 12413 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 12414
58f4c4cf
EG
12415 /*
12416 * Make sure that the BD data is updated before updating the producer
12417 * since FW might read the BD right after the producer is updated.
12418 * This is only applicable for weak-ordered memory model archs such
12419 * as IA-64. The following barrier is also mandatory since FW will
12420 * assumes packets must have BDs.
12421 */
12422 wmb();
12423
ca00392c
EG
12424 fp->tx_db.data.prod += nbd;
12425 barrier();
54b9ddaa 12426 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
12427
12428 mmiowb();
12429
755735eb 12430 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
12431
12432 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 12433 netif_tx_stop_queue(txq);
9baddeb8
SG
12434
12435 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12436 * ordering of set_bit() in netif_tx_stop_queue() and read of
12437 * fp->bd_tx_cons */
58f4c4cf 12438 smp_mb();
9baddeb8 12439
54b9ddaa 12440 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 12441 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 12442 netif_tx_wake_queue(txq);
a2fbb9ea 12443 }
54b9ddaa 12444 fp->tx_pkt++;
a2fbb9ea
ET
12445
12446 return NETDEV_TX_OK;
12447}
12448
bb2a0f7a 12449/* called with rtnl_lock */
a2fbb9ea
ET
12450static int bnx2x_open(struct net_device *dev)
12451{
12452 struct bnx2x *bp = netdev_priv(dev);
12453
6eccabb3
EG
12454 netif_carrier_off(dev);
12455
a2fbb9ea
ET
12456 bnx2x_set_power_state(bp, PCI_D0);
12457
72fd0718
VZ
12458 if (!bnx2x_reset_is_done(bp)) {
12459 do {
12460 /* Reset MCP mail box sequence if there is on going
12461 * recovery
12462 */
12463 bp->fw_seq = 0;
12464
12465 /* If it's the first function to load and reset done
12466 * is still not cleared it may mean that. We don't
12467 * check the attention state here because it may have
12468 * already been cleared by a "common" reset but we
12469 * shell proceed with "process kill" anyway.
12470 */
12471 if ((bnx2x_get_load_cnt(bp) == 0) &&
12472 bnx2x_trylock_hw_lock(bp,
12473 HW_LOCK_RESOURCE_RESERVED_08) &&
12474 (!bnx2x_leader_reset(bp))) {
12475 DP(NETIF_MSG_HW, "Recovered in open\n");
12476 break;
12477 }
12478
12479 bnx2x_set_power_state(bp, PCI_D3hot);
12480
12481 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12482 " completed yet. Try again later. If u still see this"
12483 " message after a few retries then power cycle is"
12484 " required.\n", bp->dev->name);
12485
12486 return -EAGAIN;
12487 } while (0);
12488 }
12489
12490 bp->recovery_state = BNX2X_RECOVERY_DONE;
12491
bb2a0f7a 12492 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
12493}
12494
bb2a0f7a 12495/* called with rtnl_lock */
a2fbb9ea
ET
12496static int bnx2x_close(struct net_device *dev)
12497{
a2fbb9ea
ET
12498 struct bnx2x *bp = netdev_priv(dev);
12499
12500 /* Unload the driver, release IRQs */
bb2a0f7a
YG
12501 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12502 if (atomic_read(&bp->pdev->enable_cnt) == 1)
12503 if (!CHIP_REV_IS_SLOW(bp))
12504 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
12505
12506 return 0;
12507}
12508
f5372251 12509/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
12510static void bnx2x_set_rx_mode(struct net_device *dev)
12511{
12512 struct bnx2x *bp = netdev_priv(dev);
12513 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12514 int port = BP_PORT(bp);
12515
12516 if (bp->state != BNX2X_STATE_OPEN) {
12517 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12518 return;
12519 }
12520
12521 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12522
12523 if (dev->flags & IFF_PROMISC)
12524 rx_mode = BNX2X_RX_MODE_PROMISC;
12525
12526 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
12527 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12528 CHIP_IS_E1(bp)))
34f80b04
EG
12529 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12530
12531 else { /* some multicasts */
12532 if (CHIP_IS_E1(bp)) {
12533 int i, old, offset;
22bedad3 12534 struct netdev_hw_addr *ha;
34f80b04
EG
12535 struct mac_configuration_cmd *config =
12536 bnx2x_sp(bp, mcast_config);
12537
0ddf477b 12538 i = 0;
22bedad3 12539 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
12540 config->config_table[i].
12541 cam_entry.msb_mac_addr =
22bedad3 12542 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
12543 config->config_table[i].
12544 cam_entry.middle_mac_addr =
22bedad3 12545 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
12546 config->config_table[i].
12547 cam_entry.lsb_mac_addr =
22bedad3 12548 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
12549 config->config_table[i].cam_entry.flags =
12550 cpu_to_le16(port);
12551 config->config_table[i].
12552 target_table_entry.flags = 0;
ca00392c
EG
12553 config->config_table[i].target_table_entry.
12554 clients_bit_vector =
12555 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
12556 config->config_table[i].
12557 target_table_entry.vlan_id = 0;
12558
12559 DP(NETIF_MSG_IFUP,
12560 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12561 config->config_table[i].
12562 cam_entry.msb_mac_addr,
12563 config->config_table[i].
12564 cam_entry.middle_mac_addr,
12565 config->config_table[i].
12566 cam_entry.lsb_mac_addr);
0ddf477b 12567 i++;
34f80b04 12568 }
8d9c5f34 12569 old = config->hdr.length;
34f80b04
EG
12570 if (old > i) {
12571 for (; i < old; i++) {
12572 if (CAM_IS_INVALID(config->
12573 config_table[i])) {
af246401 12574 /* already invalidated */
34f80b04
EG
12575 break;
12576 }
12577 /* invalidate */
12578 CAM_INVALIDATE(config->
12579 config_table[i]);
12580 }
12581 }
12582
12583 if (CHIP_REV_IS_SLOW(bp))
12584 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12585 else
12586 offset = BNX2X_MAX_MULTICAST*(1 + port);
12587
8d9c5f34 12588 config->hdr.length = i;
34f80b04 12589 config->hdr.offset = offset;
8d9c5f34 12590 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
12591 config->hdr.reserved1 = 0;
12592
e665bfda
MC
12593 bp->set_mac_pending++;
12594 smp_wmb();
12595
34f80b04
EG
12596 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12597 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12598 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12599 0);
12600 } else { /* E1H */
12601 /* Accept one or more multicasts */
22bedad3 12602 struct netdev_hw_addr *ha;
34f80b04
EG
12603 u32 mc_filter[MC_HASH_SIZE];
12604 u32 crc, bit, regidx;
12605 int i;
12606
12607 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12608
22bedad3 12609 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 12610 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 12611 ha->addr);
34f80b04 12612
22bedad3 12613 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
12614 bit = (crc >> 24) & 0xff;
12615 regidx = bit >> 5;
12616 bit &= 0x1f;
12617 mc_filter[regidx] |= (1 << bit);
12618 }
12619
12620 for (i = 0; i < MC_HASH_SIZE; i++)
12621 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12622 mc_filter[i]);
12623 }
12624 }
12625
12626 bp->rx_mode = rx_mode;
12627 bnx2x_set_storm_rx_mode(bp);
12628}
12629
12630/* called with rtnl_lock */
a2fbb9ea
ET
12631static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12632{
12633 struct sockaddr *addr = p;
12634 struct bnx2x *bp = netdev_priv(dev);
12635
34f80b04 12636 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
12637 return -EINVAL;
12638
12639 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
12640 if (netif_running(dev)) {
12641 if (CHIP_IS_E1(bp))
e665bfda 12642 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 12643 else
e665bfda 12644 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 12645 }
a2fbb9ea
ET
12646
12647 return 0;
12648}
12649
c18487ee 12650/* called with rtnl_lock */
01cd4528
EG
12651static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12652 int devad, u16 addr)
a2fbb9ea 12653{
01cd4528
EG
12654 struct bnx2x *bp = netdev_priv(netdev);
12655 u16 value;
12656 int rc;
12657 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 12658
01cd4528
EG
12659 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12660 prtad, devad, addr);
a2fbb9ea 12661
01cd4528
EG
12662 if (prtad != bp->mdio.prtad) {
12663 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12664 prtad, bp->mdio.prtad);
12665 return -EINVAL;
12666 }
12667
12668 /* The HW expects different devad if CL22 is used */
12669 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 12670
01cd4528
EG
12671 bnx2x_acquire_phy_lock(bp);
12672 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12673 devad, addr, &value);
12674 bnx2x_release_phy_lock(bp);
12675 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 12676
01cd4528
EG
12677 if (!rc)
12678 rc = value;
12679 return rc;
12680}
a2fbb9ea 12681
01cd4528
EG
12682/* called with rtnl_lock */
12683static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12684 u16 addr, u16 value)
12685{
12686 struct bnx2x *bp = netdev_priv(netdev);
12687 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12688 int rc;
12689
12690 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12691 " value 0x%x\n", prtad, devad, addr, value);
12692
12693 if (prtad != bp->mdio.prtad) {
12694 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12695 prtad, bp->mdio.prtad);
12696 return -EINVAL;
a2fbb9ea
ET
12697 }
12698
01cd4528
EG
12699 /* The HW expects different devad if CL22 is used */
12700 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 12701
01cd4528
EG
12702 bnx2x_acquire_phy_lock(bp);
12703 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12704 devad, addr, value);
12705 bnx2x_release_phy_lock(bp);
12706 return rc;
12707}
c18487ee 12708
01cd4528
EG
12709/* called with rtnl_lock */
12710static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12711{
12712 struct bnx2x *bp = netdev_priv(dev);
12713 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 12714
01cd4528
EG
12715 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12716 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 12717
01cd4528
EG
12718 if (!netif_running(dev))
12719 return -EAGAIN;
12720
12721 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
12722}
12723
34f80b04 12724/* called with rtnl_lock */
a2fbb9ea
ET
12725static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12726{
12727 struct bnx2x *bp = netdev_priv(dev);
34f80b04 12728 int rc = 0;
a2fbb9ea 12729
72fd0718
VZ
12730 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12731 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12732 return -EAGAIN;
12733 }
12734
a2fbb9ea
ET
12735 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12736 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12737 return -EINVAL;
12738
12739 /* This does not race with packet allocation
c14423fe 12740 * because the actual alloc size is
a2fbb9ea
ET
12741 * only updated as part of load
12742 */
12743 dev->mtu = new_mtu;
12744
12745 if (netif_running(dev)) {
34f80b04
EG
12746 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12747 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 12748 }
34f80b04
EG
12749
12750 return rc;
a2fbb9ea
ET
12751}
12752
12753static void bnx2x_tx_timeout(struct net_device *dev)
12754{
12755 struct bnx2x *bp = netdev_priv(dev);
12756
12757#ifdef BNX2X_STOP_ON_ERROR
12758 if (!bp->panic)
12759 bnx2x_panic();
12760#endif
12761 /* This allows the netif to be shutdown gracefully before resetting */
72fd0718 12762 schedule_delayed_work(&bp->reset_task, 0);
a2fbb9ea
ET
12763}
12764
12765#ifdef BCM_VLAN
34f80b04 12766/* called with rtnl_lock */
a2fbb9ea
ET
12767static void bnx2x_vlan_rx_register(struct net_device *dev,
12768 struct vlan_group *vlgrp)
12769{
12770 struct bnx2x *bp = netdev_priv(dev);
12771
12772 bp->vlgrp = vlgrp;
0c6671b0
EG
12773
12774 /* Set flags according to the required capabilities */
12775 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12776
12777 if (dev->features & NETIF_F_HW_VLAN_TX)
12778 bp->flags |= HW_VLAN_TX_FLAG;
12779
12780 if (dev->features & NETIF_F_HW_VLAN_RX)
12781 bp->flags |= HW_VLAN_RX_FLAG;
12782
a2fbb9ea 12783 if (netif_running(dev))
49d66772 12784 bnx2x_set_client_config(bp);
a2fbb9ea 12785}
34f80b04 12786
a2fbb9ea
ET
12787#endif
12788
257ddbda 12789#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
12790static void poll_bnx2x(struct net_device *dev)
12791{
12792 struct bnx2x *bp = netdev_priv(dev);
12793
12794 disable_irq(bp->pdev->irq);
12795 bnx2x_interrupt(bp->pdev->irq, dev);
12796 enable_irq(bp->pdev->irq);
12797}
12798#endif
12799
c64213cd
SH
12800static const struct net_device_ops bnx2x_netdev_ops = {
12801 .ndo_open = bnx2x_open,
12802 .ndo_stop = bnx2x_close,
12803 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 12804 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
12805 .ndo_set_mac_address = bnx2x_change_mac_addr,
12806 .ndo_validate_addr = eth_validate_addr,
12807 .ndo_do_ioctl = bnx2x_ioctl,
12808 .ndo_change_mtu = bnx2x_change_mtu,
12809 .ndo_tx_timeout = bnx2x_tx_timeout,
12810#ifdef BCM_VLAN
12811 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12812#endif
257ddbda 12813#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
12814 .ndo_poll_controller = poll_bnx2x,
12815#endif
12816};
12817
34f80b04
EG
12818static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12819 struct net_device *dev)
a2fbb9ea
ET
12820{
12821 struct bnx2x *bp;
12822 int rc;
12823
12824 SET_NETDEV_DEV(dev, &pdev->dev);
12825 bp = netdev_priv(dev);
12826
34f80b04
EG
12827 bp->dev = dev;
12828 bp->pdev = pdev;
a2fbb9ea 12829 bp->flags = 0;
34f80b04 12830 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
12831
12832 rc = pci_enable_device(pdev);
12833 if (rc) {
cdaa7cb8
VZ
12834 dev_err(&bp->pdev->dev,
12835 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
12836 goto err_out;
12837 }
12838
12839 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12840 dev_err(&bp->pdev->dev,
12841 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
12842 rc = -ENODEV;
12843 goto err_out_disable;
12844 }
12845
12846 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12847 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12848 " base address, aborting\n");
a2fbb9ea
ET
12849 rc = -ENODEV;
12850 goto err_out_disable;
12851 }
12852
34f80b04
EG
12853 if (atomic_read(&pdev->enable_cnt) == 1) {
12854 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12855 if (rc) {
cdaa7cb8
VZ
12856 dev_err(&bp->pdev->dev,
12857 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
12858 goto err_out_disable;
12859 }
a2fbb9ea 12860
34f80b04
EG
12861 pci_set_master(pdev);
12862 pci_save_state(pdev);
12863 }
a2fbb9ea
ET
12864
12865 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12866 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
12867 dev_err(&bp->pdev->dev,
12868 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
12869 rc = -EIO;
12870 goto err_out_release;
12871 }
12872
12873 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12874 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
12875 dev_err(&bp->pdev->dev,
12876 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
12877 rc = -EIO;
12878 goto err_out_release;
12879 }
12880
1a983142 12881 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 12882 bp->flags |= USING_DAC_FLAG;
1a983142 12883 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
12884 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12885 " failed, aborting\n");
a2fbb9ea
ET
12886 rc = -EIO;
12887 goto err_out_release;
12888 }
12889
1a983142 12890 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
12891 dev_err(&bp->pdev->dev,
12892 "System does not support DMA, aborting\n");
a2fbb9ea
ET
12893 rc = -EIO;
12894 goto err_out_release;
12895 }
12896
34f80b04
EG
12897 dev->mem_start = pci_resource_start(pdev, 0);
12898 dev->base_addr = dev->mem_start;
12899 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
12900
12901 dev->irq = pdev->irq;
12902
275f165f 12903 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 12904 if (!bp->regview) {
cdaa7cb8
VZ
12905 dev_err(&bp->pdev->dev,
12906 "Cannot map register space, aborting\n");
a2fbb9ea
ET
12907 rc = -ENOMEM;
12908 goto err_out_release;
12909 }
12910
34f80b04
EG
12911 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12912 min_t(u64, BNX2X_DB_SIZE,
12913 pci_resource_len(pdev, 2)));
a2fbb9ea 12914 if (!bp->doorbells) {
cdaa7cb8
VZ
12915 dev_err(&bp->pdev->dev,
12916 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
12917 rc = -ENOMEM;
12918 goto err_out_unmap;
12919 }
12920
12921 bnx2x_set_power_state(bp, PCI_D0);
12922
34f80b04
EG
12923 /* clean indirect addresses */
12924 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12925 PCICFG_VENDOR_ID_OFFSET);
12926 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12927 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12928 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12929 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 12930
72fd0718
VZ
12931 /* Reset the load counter */
12932 bnx2x_clear_load_cnt(bp);
12933
34f80b04 12934 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 12935
c64213cd 12936 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 12937 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
12938 dev->features |= NETIF_F_SG;
12939 dev->features |= NETIF_F_HW_CSUM;
12940 if (bp->flags & USING_DAC_FLAG)
12941 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
12942 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12943 dev->features |= NETIF_F_TSO6;
34f80b04
EG
12944#ifdef BCM_VLAN
12945 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 12946 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
12947
12948 dev->vlan_features |= NETIF_F_SG;
12949 dev->vlan_features |= NETIF_F_HW_CSUM;
12950 if (bp->flags & USING_DAC_FLAG)
12951 dev->vlan_features |= NETIF_F_HIGHDMA;
12952 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12953 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 12954#endif
a2fbb9ea 12955
01cd4528
EG
12956 /* get_port_hwinfo() will set prtad and mmds properly */
12957 bp->mdio.prtad = MDIO_PRTAD_NONE;
12958 bp->mdio.mmds = 0;
12959 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12960 bp->mdio.dev = dev;
12961 bp->mdio.mdio_read = bnx2x_mdio_read;
12962 bp->mdio.mdio_write = bnx2x_mdio_write;
12963
a2fbb9ea
ET
12964 return 0;
12965
12966err_out_unmap:
12967 if (bp->regview) {
12968 iounmap(bp->regview);
12969 bp->regview = NULL;
12970 }
a2fbb9ea
ET
12971 if (bp->doorbells) {
12972 iounmap(bp->doorbells);
12973 bp->doorbells = NULL;
12974 }
12975
12976err_out_release:
34f80b04
EG
12977 if (atomic_read(&pdev->enable_cnt) == 1)
12978 pci_release_regions(pdev);
a2fbb9ea
ET
12979
12980err_out_disable:
12981 pci_disable_device(pdev);
12982 pci_set_drvdata(pdev, NULL);
12983
12984err_out:
12985 return rc;
12986}
12987
37f9ce62
EG
12988static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
12989 int *width, int *speed)
25047950
ET
12990{
12991 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
12992
37f9ce62 12993 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 12994
37f9ce62
EG
12995 /* return value of 1=2.5GHz 2=5GHz */
12996 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 12997}
37f9ce62 12998
94a78b79
VZ
12999static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13000{
37f9ce62 13001 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
13002 struct bnx2x_fw_file_hdr *fw_hdr;
13003 struct bnx2x_fw_file_section *sections;
94a78b79 13004 u32 offset, len, num_ops;
37f9ce62 13005 u16 *ops_offsets;
94a78b79 13006 int i;
37f9ce62 13007 const u8 *fw_ver;
94a78b79
VZ
13008
13009 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13010 return -EINVAL;
13011
13012 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13013 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13014
13015 /* Make sure none of the offsets and sizes make us read beyond
13016 * the end of the firmware data */
13017 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13018 offset = be32_to_cpu(sections[i].offset);
13019 len = be32_to_cpu(sections[i].len);
13020 if (offset + len > firmware->size) {
cdaa7cb8
VZ
13021 dev_err(&bp->pdev->dev,
13022 "Section %d length is out of bounds\n", i);
94a78b79
VZ
13023 return -EINVAL;
13024 }
13025 }
13026
13027 /* Likewise for the init_ops offsets */
13028 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13029 ops_offsets = (u16 *)(firmware->data + offset);
13030 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13031
13032 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13033 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
13034 dev_err(&bp->pdev->dev,
13035 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
13036 return -EINVAL;
13037 }
13038 }
13039
13040 /* Check FW version */
13041 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13042 fw_ver = firmware->data + offset;
13043 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13044 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13045 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13046 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
13047 dev_err(&bp->pdev->dev,
13048 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
13049 fw_ver[0], fw_ver[1], fw_ver[2],
13050 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13051 BCM_5710_FW_MINOR_VERSION,
13052 BCM_5710_FW_REVISION_VERSION,
13053 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 13054 return -EINVAL;
94a78b79
VZ
13055 }
13056
13057 return 0;
13058}
13059
ab6ad5a4 13060static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13061{
ab6ad5a4
EG
13062 const __be32 *source = (const __be32 *)_source;
13063 u32 *target = (u32 *)_target;
94a78b79 13064 u32 i;
94a78b79
VZ
13065
13066 for (i = 0; i < n/4; i++)
13067 target[i] = be32_to_cpu(source[i]);
13068}
13069
13070/*
13071 Ops array is stored in the following format:
13072 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13073 */
ab6ad5a4 13074static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 13075{
ab6ad5a4
EG
13076 const __be32 *source = (const __be32 *)_source;
13077 struct raw_op *target = (struct raw_op *)_target;
94a78b79 13078 u32 i, j, tmp;
94a78b79 13079
ab6ad5a4 13080 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
13081 tmp = be32_to_cpu(source[j]);
13082 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
13083 target[i].offset = tmp & 0xffffff;
13084 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
13085 }
13086}
ab6ad5a4
EG
13087
13088static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13089{
ab6ad5a4
EG
13090 const __be16 *source = (const __be16 *)_source;
13091 u16 *target = (u16 *)_target;
94a78b79 13092 u32 i;
94a78b79
VZ
13093
13094 for (i = 0; i < n/2; i++)
13095 target[i] = be16_to_cpu(source[i]);
13096}
13097
7995c64e
JP
13098#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13099do { \
13100 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13101 bp->arr = kmalloc(len, GFP_KERNEL); \
13102 if (!bp->arr) { \
13103 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13104 goto lbl; \
13105 } \
13106 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13107 (u8 *)bp->arr, len); \
13108} while (0)
94a78b79 13109
94a78b79
VZ
13110static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13111{
45229b42 13112 const char *fw_file_name;
94a78b79 13113 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 13114 int rc;
94a78b79 13115
94a78b79 13116 if (CHIP_IS_E1(bp))
45229b42 13117 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 13118 else if (CHIP_IS_E1H(bp))
45229b42 13119 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8
VZ
13120 else {
13121 dev_err(dev, "Unsupported chip revision\n");
13122 return -EINVAL;
13123 }
94a78b79 13124
cdaa7cb8 13125 dev_info(dev, "Loading %s\n", fw_file_name);
94a78b79
VZ
13126
13127 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13128 if (rc) {
cdaa7cb8 13129 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
13130 goto request_firmware_exit;
13131 }
13132
13133 rc = bnx2x_check_firmware(bp);
13134 if (rc) {
cdaa7cb8 13135 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
13136 goto request_firmware_exit;
13137 }
13138
13139 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13140
13141 /* Initialize the pointers to the init arrays */
13142 /* Blob */
13143 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13144
13145 /* Opcodes */
13146 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13147
13148 /* Offsets */
ab6ad5a4
EG
13149 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13150 be16_to_cpu_n);
94a78b79
VZ
13151
13152 /* STORMs firmware */
573f2035
EG
13153 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13154 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13155 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13156 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13157 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13158 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13159 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13160 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13161 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13162 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13163 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13164 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13165 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13166 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13167 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13168 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
13169
13170 return 0;
ab6ad5a4 13171
94a78b79
VZ
13172init_offsets_alloc_err:
13173 kfree(bp->init_ops);
13174init_ops_alloc_err:
13175 kfree(bp->init_data);
13176request_firmware_exit:
13177 release_firmware(bp->firmware);
13178
13179 return rc;
13180}
13181
13182
a2fbb9ea
ET
13183static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13184 const struct pci_device_id *ent)
13185{
a2fbb9ea
ET
13186 struct net_device *dev = NULL;
13187 struct bnx2x *bp;
37f9ce62 13188 int pcie_width, pcie_speed;
25047950 13189 int rc;
a2fbb9ea 13190
a2fbb9ea 13191 /* dev zeroed in init_etherdev */
555f6c78 13192 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 13193 if (!dev) {
cdaa7cb8 13194 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 13195 return -ENOMEM;
34f80b04 13196 }
a2fbb9ea 13197
a2fbb9ea 13198 bp = netdev_priv(dev);
7995c64e 13199 bp->msg_enable = debug;
a2fbb9ea 13200
df4770de
EG
13201 pci_set_drvdata(pdev, dev);
13202
34f80b04 13203 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
13204 if (rc < 0) {
13205 free_netdev(dev);
13206 return rc;
13207 }
13208
34f80b04 13209 rc = bnx2x_init_bp(bp);
693fc0d1
EG
13210 if (rc)
13211 goto init_one_exit;
13212
94a78b79
VZ
13213 /* Set init arrays */
13214 rc = bnx2x_init_firmware(bp, &pdev->dev);
13215 if (rc) {
cdaa7cb8 13216 dev_err(&pdev->dev, "Error loading firmware\n");
94a78b79
VZ
13217 goto init_one_exit;
13218 }
13219
693fc0d1 13220 rc = register_netdev(dev);
34f80b04 13221 if (rc) {
693fc0d1 13222 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
13223 goto init_one_exit;
13224 }
13225
37f9ce62 13226 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
13227 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13228 " IRQ %d, ", board_info[ent->driver_data].name,
13229 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13230 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13231 dev->base_addr, bp->pdev->irq);
13232 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 13233
a2fbb9ea 13234 return 0;
34f80b04
EG
13235
13236init_one_exit:
13237 if (bp->regview)
13238 iounmap(bp->regview);
13239
13240 if (bp->doorbells)
13241 iounmap(bp->doorbells);
13242
13243 free_netdev(dev);
13244
13245 if (atomic_read(&pdev->enable_cnt) == 1)
13246 pci_release_regions(pdev);
13247
13248 pci_disable_device(pdev);
13249 pci_set_drvdata(pdev, NULL);
13250
13251 return rc;
a2fbb9ea
ET
13252}
13253
13254static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13255{
13256 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13257 struct bnx2x *bp;
13258
13259 if (!dev) {
cdaa7cb8 13260 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
13261 return;
13262 }
228241eb 13263 bp = netdev_priv(dev);
a2fbb9ea 13264
a2fbb9ea
ET
13265 unregister_netdev(dev);
13266
72fd0718
VZ
13267 /* Make sure RESET task is not scheduled before continuing */
13268 cancel_delayed_work_sync(&bp->reset_task);
13269
94a78b79
VZ
13270 kfree(bp->init_ops_offsets);
13271 kfree(bp->init_ops);
13272 kfree(bp->init_data);
13273 release_firmware(bp->firmware);
13274
a2fbb9ea
ET
13275 if (bp->regview)
13276 iounmap(bp->regview);
13277
13278 if (bp->doorbells)
13279 iounmap(bp->doorbells);
13280
13281 free_netdev(dev);
34f80b04
EG
13282
13283 if (atomic_read(&pdev->enable_cnt) == 1)
13284 pci_release_regions(pdev);
13285
a2fbb9ea
ET
13286 pci_disable_device(pdev);
13287 pci_set_drvdata(pdev, NULL);
13288}
13289
13290static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13291{
13292 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13293 struct bnx2x *bp;
13294
34f80b04 13295 if (!dev) {
cdaa7cb8 13296 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
34f80b04
EG
13297 return -ENODEV;
13298 }
13299 bp = netdev_priv(dev);
a2fbb9ea 13300
34f80b04 13301 rtnl_lock();
a2fbb9ea 13302
34f80b04 13303 pci_save_state(pdev);
228241eb 13304
34f80b04
EG
13305 if (!netif_running(dev)) {
13306 rtnl_unlock();
13307 return 0;
13308 }
a2fbb9ea
ET
13309
13310 netif_device_detach(dev);
a2fbb9ea 13311
da5a662a 13312 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 13313
a2fbb9ea 13314 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 13315
34f80b04
EG
13316 rtnl_unlock();
13317
a2fbb9ea
ET
13318 return 0;
13319}
13320
13321static int bnx2x_resume(struct pci_dev *pdev)
13322{
13323 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 13324 struct bnx2x *bp;
a2fbb9ea
ET
13325 int rc;
13326
228241eb 13327 if (!dev) {
cdaa7cb8 13328 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
13329 return -ENODEV;
13330 }
228241eb 13331 bp = netdev_priv(dev);
a2fbb9ea 13332
72fd0718
VZ
13333 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13334 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13335 return -EAGAIN;
13336 }
13337
34f80b04
EG
13338 rtnl_lock();
13339
228241eb 13340 pci_restore_state(pdev);
34f80b04
EG
13341
13342 if (!netif_running(dev)) {
13343 rtnl_unlock();
13344 return 0;
13345 }
13346
a2fbb9ea
ET
13347 bnx2x_set_power_state(bp, PCI_D0);
13348 netif_device_attach(dev);
13349
da5a662a 13350 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 13351
34f80b04
EG
13352 rtnl_unlock();
13353
13354 return rc;
a2fbb9ea
ET
13355}
13356
f8ef6e44
YG
13357static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13358{
13359 int i;
13360
13361 bp->state = BNX2X_STATE_ERROR;
13362
13363 bp->rx_mode = BNX2X_RX_MODE_NONE;
13364
13365 bnx2x_netif_stop(bp, 0);
13366
13367 del_timer_sync(&bp->timer);
13368 bp->stats_state = STATS_STATE_DISABLED;
13369 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13370
13371 /* Release IRQs */
6cbe5065 13372 bnx2x_free_irq(bp, false);
f8ef6e44
YG
13373
13374 if (CHIP_IS_E1(bp)) {
13375 struct mac_configuration_cmd *config =
13376 bnx2x_sp(bp, mcast_config);
13377
8d9c5f34 13378 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
13379 CAM_INVALIDATE(config->config_table[i]);
13380 }
13381
13382 /* Free SKBs, SGEs, TPA pool and driver internals */
13383 bnx2x_free_skbs(bp);
54b9ddaa 13384 for_each_queue(bp, i)
f8ef6e44 13385 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 13386 for_each_queue(bp, i)
7cde1c8b 13387 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
13388 bnx2x_free_mem(bp);
13389
13390 bp->state = BNX2X_STATE_CLOSED;
13391
13392 netif_carrier_off(bp->dev);
13393
13394 return 0;
13395}
13396
13397static void bnx2x_eeh_recover(struct bnx2x *bp)
13398{
13399 u32 val;
13400
13401 mutex_init(&bp->port.phy_mutex);
13402
13403 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13404 bp->link_params.shmem_base = bp->common.shmem_base;
13405 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13406
13407 if (!bp->common.shmem_base ||
13408 (bp->common.shmem_base < 0xA0000) ||
13409 (bp->common.shmem_base >= 0xC0000)) {
13410 BNX2X_DEV_INFO("MCP not active\n");
13411 bp->flags |= NO_MCP_FLAG;
13412 return;
13413 }
13414
13415 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13416 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13417 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13418 BNX2X_ERR("BAD MCP validity signature\n");
13419
13420 if (!BP_NOMCP(bp)) {
13421 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13422 & DRV_MSG_SEQ_NUMBER_MASK);
13423 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13424 }
13425}
13426
493adb1f
WX
13427/**
13428 * bnx2x_io_error_detected - called when PCI error is detected
13429 * @pdev: Pointer to PCI device
13430 * @state: The current pci connection state
13431 *
13432 * This function is called after a PCI bus error affecting
13433 * this device has been detected.
13434 */
13435static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13436 pci_channel_state_t state)
13437{
13438 struct net_device *dev = pci_get_drvdata(pdev);
13439 struct bnx2x *bp = netdev_priv(dev);
13440
13441 rtnl_lock();
13442
13443 netif_device_detach(dev);
13444
07ce50e4
DN
13445 if (state == pci_channel_io_perm_failure) {
13446 rtnl_unlock();
13447 return PCI_ERS_RESULT_DISCONNECT;
13448 }
13449
493adb1f 13450 if (netif_running(dev))
f8ef6e44 13451 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
13452
13453 pci_disable_device(pdev);
13454
13455 rtnl_unlock();
13456
13457 /* Request a slot reset */
13458 return PCI_ERS_RESULT_NEED_RESET;
13459}
13460
13461/**
13462 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13463 * @pdev: Pointer to PCI device
13464 *
13465 * Restart the card from scratch, as if from a cold-boot.
13466 */
13467static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13468{
13469 struct net_device *dev = pci_get_drvdata(pdev);
13470 struct bnx2x *bp = netdev_priv(dev);
13471
13472 rtnl_lock();
13473
13474 if (pci_enable_device(pdev)) {
13475 dev_err(&pdev->dev,
13476 "Cannot re-enable PCI device after reset\n");
13477 rtnl_unlock();
13478 return PCI_ERS_RESULT_DISCONNECT;
13479 }
13480
13481 pci_set_master(pdev);
13482 pci_restore_state(pdev);
13483
13484 if (netif_running(dev))
13485 bnx2x_set_power_state(bp, PCI_D0);
13486
13487 rtnl_unlock();
13488
13489 return PCI_ERS_RESULT_RECOVERED;
13490}
13491
13492/**
13493 * bnx2x_io_resume - called when traffic can start flowing again
13494 * @pdev: Pointer to PCI device
13495 *
13496 * This callback is called when the error recovery driver tells us that
13497 * its OK to resume normal operation.
13498 */
13499static void bnx2x_io_resume(struct pci_dev *pdev)
13500{
13501 struct net_device *dev = pci_get_drvdata(pdev);
13502 struct bnx2x *bp = netdev_priv(dev);
13503
72fd0718
VZ
13504 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13505 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13506 return;
13507 }
13508
493adb1f
WX
13509 rtnl_lock();
13510
f8ef6e44
YG
13511 bnx2x_eeh_recover(bp);
13512
493adb1f 13513 if (netif_running(dev))
f8ef6e44 13514 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
13515
13516 netif_device_attach(dev);
13517
13518 rtnl_unlock();
13519}
13520
13521static struct pci_error_handlers bnx2x_err_handler = {
13522 .error_detected = bnx2x_io_error_detected,
356e2385
EG
13523 .slot_reset = bnx2x_io_slot_reset,
13524 .resume = bnx2x_io_resume,
493adb1f
WX
13525};
13526
a2fbb9ea 13527static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
13528 .name = DRV_MODULE_NAME,
13529 .id_table = bnx2x_pci_tbl,
13530 .probe = bnx2x_init_one,
13531 .remove = __devexit_p(bnx2x_remove_one),
13532 .suspend = bnx2x_suspend,
13533 .resume = bnx2x_resume,
13534 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
13535};
13536
13537static int __init bnx2x_init(void)
13538{
dd21ca6d
SG
13539 int ret;
13540
7995c64e 13541 pr_info("%s", version);
938cf541 13542
1cf167f2
EG
13543 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13544 if (bnx2x_wq == NULL) {
7995c64e 13545 pr_err("Cannot create workqueue\n");
1cf167f2
EG
13546 return -ENOMEM;
13547 }
13548
dd21ca6d
SG
13549 ret = pci_register_driver(&bnx2x_pci_driver);
13550 if (ret) {
7995c64e 13551 pr_err("Cannot register driver\n");
dd21ca6d
SG
13552 destroy_workqueue(bnx2x_wq);
13553 }
13554 return ret;
a2fbb9ea
ET
13555}
13556
13557static void __exit bnx2x_cleanup(void)
13558{
13559 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
13560
13561 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
13562}
13563
13564module_init(bnx2x_init);
13565module_exit(bnx2x_cleanup);
13566
993ac7b5
MC
13567#ifdef BCM_CNIC
13568
13569/* count denotes the number of new completions we have seen */
13570static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13571{
13572 struct eth_spe *spe;
13573
13574#ifdef BNX2X_STOP_ON_ERROR
13575 if (unlikely(bp->panic))
13576 return;
13577#endif
13578
13579 spin_lock_bh(&bp->spq_lock);
13580 bp->cnic_spq_pending -= count;
13581
13582 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13583 bp->cnic_spq_pending++) {
13584
13585 if (!bp->cnic_kwq_pending)
13586 break;
13587
13588 spe = bnx2x_sp_get_next(bp);
13589 *spe = *bp->cnic_kwq_cons;
13590
13591 bp->cnic_kwq_pending--;
13592
13593 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13594 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13595
13596 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13597 bp->cnic_kwq_cons = bp->cnic_kwq;
13598 else
13599 bp->cnic_kwq_cons++;
13600 }
13601 bnx2x_sp_prod_update(bp);
13602 spin_unlock_bh(&bp->spq_lock);
13603}
13604
13605static int bnx2x_cnic_sp_queue(struct net_device *dev,
13606 struct kwqe_16 *kwqes[], u32 count)
13607{
13608 struct bnx2x *bp = netdev_priv(dev);
13609 int i;
13610
13611#ifdef BNX2X_STOP_ON_ERROR
13612 if (unlikely(bp->panic))
13613 return -EIO;
13614#endif
13615
13616 spin_lock_bh(&bp->spq_lock);
13617
13618 for (i = 0; i < count; i++) {
13619 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13620
13621 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13622 break;
13623
13624 *bp->cnic_kwq_prod = *spe;
13625
13626 bp->cnic_kwq_pending++;
13627
13628 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13629 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13630 spe->data.mac_config_addr.hi,
13631 spe->data.mac_config_addr.lo,
13632 bp->cnic_kwq_pending);
13633
13634 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13635 bp->cnic_kwq_prod = bp->cnic_kwq;
13636 else
13637 bp->cnic_kwq_prod++;
13638 }
13639
13640 spin_unlock_bh(&bp->spq_lock);
13641
13642 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13643 bnx2x_cnic_sp_post(bp, 0);
13644
13645 return i;
13646}
13647
13648static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13649{
13650 struct cnic_ops *c_ops;
13651 int rc = 0;
13652
13653 mutex_lock(&bp->cnic_mutex);
13654 c_ops = bp->cnic_ops;
13655 if (c_ops)
13656 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13657 mutex_unlock(&bp->cnic_mutex);
13658
13659 return rc;
13660}
13661
13662static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13663{
13664 struct cnic_ops *c_ops;
13665 int rc = 0;
13666
13667 rcu_read_lock();
13668 c_ops = rcu_dereference(bp->cnic_ops);
13669 if (c_ops)
13670 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13671 rcu_read_unlock();
13672
13673 return rc;
13674}
13675
13676/*
13677 * for commands that have no data
13678 */
13679static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13680{
13681 struct cnic_ctl_info ctl = {0};
13682
13683 ctl.cmd = cmd;
13684
13685 return bnx2x_cnic_ctl_send(bp, &ctl);
13686}
13687
13688static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13689{
13690 struct cnic_ctl_info ctl;
13691
13692 /* first we tell CNIC and only then we count this as a completion */
13693 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13694 ctl.data.comp.cid = cid;
13695
13696 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13697 bnx2x_cnic_sp_post(bp, 1);
13698}
13699
13700static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13701{
13702 struct bnx2x *bp = netdev_priv(dev);
13703 int rc = 0;
13704
13705 switch (ctl->cmd) {
13706 case DRV_CTL_CTXTBL_WR_CMD: {
13707 u32 index = ctl->data.io.offset;
13708 dma_addr_t addr = ctl->data.io.dma_addr;
13709
13710 bnx2x_ilt_wr(bp, index, addr);
13711 break;
13712 }
13713
13714 case DRV_CTL_COMPLETION_CMD: {
13715 int count = ctl->data.comp.comp_count;
13716
13717 bnx2x_cnic_sp_post(bp, count);
13718 break;
13719 }
13720
13721 /* rtnl_lock is held. */
13722 case DRV_CTL_START_L2_CMD: {
13723 u32 cli = ctl->data.ring.client_id;
13724
13725 bp->rx_mode_cl_mask |= (1 << cli);
13726 bnx2x_set_storm_rx_mode(bp);
13727 break;
13728 }
13729
13730 /* rtnl_lock is held. */
13731 case DRV_CTL_STOP_L2_CMD: {
13732 u32 cli = ctl->data.ring.client_id;
13733
13734 bp->rx_mode_cl_mask &= ~(1 << cli);
13735 bnx2x_set_storm_rx_mode(bp);
13736 break;
13737 }
13738
13739 default:
13740 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13741 rc = -EINVAL;
13742 }
13743
13744 return rc;
13745}
13746
13747static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13748{
13749 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13750
13751 if (bp->flags & USING_MSIX_FLAG) {
13752 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13753 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13754 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13755 } else {
13756 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13757 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13758 }
13759 cp->irq_arr[0].status_blk = bp->cnic_sb;
13760 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13761 cp->irq_arr[1].status_blk = bp->def_status_blk;
13762 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13763
13764 cp->num_irq = 2;
13765}
13766
13767static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13768 void *data)
13769{
13770 struct bnx2x *bp = netdev_priv(dev);
13771 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13772
13773 if (ops == NULL)
13774 return -EINVAL;
13775
13776 if (atomic_read(&bp->intr_sem) != 0)
13777 return -EBUSY;
13778
13779 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13780 if (!bp->cnic_kwq)
13781 return -ENOMEM;
13782
13783 bp->cnic_kwq_cons = bp->cnic_kwq;
13784 bp->cnic_kwq_prod = bp->cnic_kwq;
13785 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13786
13787 bp->cnic_spq_pending = 0;
13788 bp->cnic_kwq_pending = 0;
13789
13790 bp->cnic_data = data;
13791
13792 cp->num_irq = 0;
13793 cp->drv_state = CNIC_DRV_STATE_REGD;
13794
13795 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13796
13797 bnx2x_setup_cnic_irq_info(bp);
13798 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13799 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13800 rcu_assign_pointer(bp->cnic_ops, ops);
13801
13802 return 0;
13803}
13804
13805static int bnx2x_unregister_cnic(struct net_device *dev)
13806{
13807 struct bnx2x *bp = netdev_priv(dev);
13808 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13809
13810 mutex_lock(&bp->cnic_mutex);
13811 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13812 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13813 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13814 }
13815 cp->drv_state = 0;
13816 rcu_assign_pointer(bp->cnic_ops, NULL);
13817 mutex_unlock(&bp->cnic_mutex);
13818 synchronize_rcu();
13819 kfree(bp->cnic_kwq);
13820 bp->cnic_kwq = NULL;
13821
13822 return 0;
13823}
13824
13825struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13826{
13827 struct bnx2x *bp = netdev_priv(dev);
13828 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13829
13830 cp->drv_owner = THIS_MODULE;
13831 cp->chip_id = CHIP_ID(bp);
13832 cp->pdev = bp->pdev;
13833 cp->io_base = bp->regview;
13834 cp->io_base2 = bp->doorbells;
13835 cp->max_kwqe_pending = 8;
13836 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13837 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13838 cp->ctx_tbl_len = CNIC_ILT_LINES;
13839 cp->starting_cid = BCM_CNIC_CID_START;
13840 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13841 cp->drv_ctl = bnx2x_drv_ctl;
13842 cp->drv_register_cnic = bnx2x_register_cnic;
13843 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13844
13845 return cp;
13846}
13847EXPORT_SYMBOL(bnx2x_cnic_probe);
13848
13849#endif /* BCM_CNIC */
94a78b79 13850