]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: Create separate file for ethtool routines
[mirror_ubuntu-eoan-kernel.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
b0efbb99 54#define BNX2X_MAIN
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
9f6c9258 58#include "bnx2x_cmn.h"
a2fbb9ea 59
a2fbb9ea 60
94a78b79
VZ
61#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
45229b42
BH
64#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 85
555f6c78
EG
86static int multi_mode = 1;
87module_param(multi_mode, int, 0);
ca00392c
EG
88MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
90
54b9ddaa
VZ
91static int num_queues;
92module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
cdaa7cb8
VZ
102MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103 "(1 INT#x; 2 MSI)");
8badd27a 104
a18f5128
EG
105static int dropless_fc;
106module_param(dropless_fc, int, 0);
107MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
9898f86d 109static int poll;
a2fbb9ea 110module_param(poll, int, 0);
9898f86d 111MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
112
113static int mrrs = -1;
114module_param(mrrs, int, 0);
115MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
9898f86d 117static int debug;
a2fbb9ea 118module_param(debug, int, 0);
9898f86d
EG
119MODULE_PARM_DESC(debug, " Default debug msglevel");
120
1cf167f2 121static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
122
123enum bnx2x_board_type {
124 BCM57710 = 0,
34f80b04
EG
125 BCM57711 = 1,
126 BCM57711E = 2,
a2fbb9ea
ET
127};
128
34f80b04 129/* indexed by board_type, above */
53a10565 130static struct {
a2fbb9ea
ET
131 char *name;
132} board_info[] __devinitdata = {
34f80b04
EG
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
136};
137
34f80b04 138
a3aa1884 139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
573f2035 155void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea
ET
174
175static const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
183static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184 int idx)
185{
186 u32 cmd_offset;
187 int i;
188
189 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
ad8d3948
EG
193 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
195 }
196 REG_WR(bp, dmae_reg_go_c[idx], 1);
197}
198
ad8d3948
EG
199void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200 u32 len32)
a2fbb9ea 201{
5ff7b6d4 202 struct dmae_command dmae;
a2fbb9ea 203 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
204 int cnt = 200;
205
206 if (!bp->dmae_ready) {
207 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr, len32);
211 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212 return;
213 }
214
5ff7b6d4 215 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 216
5ff7b6d4
EG
217 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
218 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
219 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 220#ifdef __BIG_ENDIAN
5ff7b6d4 221 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 222#else
5ff7b6d4 223 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 224#endif
5ff7b6d4
EG
225 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
226 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
227 dmae.src_addr_lo = U64_LO(dma_addr);
228 dmae.src_addr_hi = U64_HI(dma_addr);
229 dmae.dst_addr_lo = dst_addr >> 2;
230 dmae.dst_addr_hi = 0;
231 dmae.len = len32;
232 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
233 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 235
c3eefaf6 236 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
237 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
238 "dst_addr [%x:%08x (%08x)]\n"
239 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
240 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
241 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
242 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 243 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
244 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
245 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 246
5ff7b6d4
EG
247 mutex_lock(&bp->dmae_mutex);
248
a2fbb9ea
ET
249 *wb_comp = 0;
250
5ff7b6d4 251 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
252
253 udelay(5);
ad8d3948
EG
254
255 while (*wb_comp != DMAE_COMP_VAL) {
256 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
ad8d3948 258 if (!cnt) {
c3eefaf6 259 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
260 break;
261 }
ad8d3948 262 cnt--;
12469401
YG
263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp))
265 msleep(100);
266 else
267 udelay(5);
a2fbb9ea 268 }
ad8d3948
EG
269
270 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
271}
272
c18487ee 273void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 274{
5ff7b6d4 275 struct dmae_command dmae;
a2fbb9ea 276 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
277 int cnt = 200;
278
279 if (!bp->dmae_ready) {
280 u32 *data = bnx2x_sp(bp, wb_data[0]);
281 int i;
282
283 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr, len32);
285 for (i = 0; i < len32; i++)
286 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287 return;
288 }
289
5ff7b6d4 290 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 291
5ff7b6d4
EG
292 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
293 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
294 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 295#ifdef __BIG_ENDIAN
5ff7b6d4 296 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 297#else
5ff7b6d4 298 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 299#endif
5ff7b6d4
EG
300 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
301 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
302 dmae.src_addr_lo = src_addr >> 2;
303 dmae.src_addr_hi = 0;
304 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
305 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
306 dmae.len = len32;
307 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 310
c3eefaf6 311 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
312 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
313 "dst_addr [%x:%08x (%08x)]\n"
314 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
315 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
316 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
317 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 318
5ff7b6d4
EG
319 mutex_lock(&bp->dmae_mutex);
320
321 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
322 *wb_comp = 0;
323
5ff7b6d4 324 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
325
326 udelay(5);
ad8d3948
EG
327
328 while (*wb_comp != DMAE_COMP_VAL) {
329
ad8d3948 330 if (!cnt) {
c3eefaf6 331 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
332 break;
333 }
ad8d3948 334 cnt--;
12469401
YG
335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp))
337 msleep(100);
338 else
339 udelay(5);
a2fbb9ea 340 }
ad8d3948 341 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
342 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
344
345 mutex_unlock(&bp->dmae_mutex);
346}
347
573f2035
EG
348void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
349 u32 addr, u32 len)
350{
02e3c6cb 351 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
352 int offset = 0;
353
02e3c6cb 354 while (len > dmae_wr_max) {
573f2035 355 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
356 addr + offset, dmae_wr_max);
357 offset += dmae_wr_max * 4;
358 len -= dmae_wr_max;
573f2035
EG
359 }
360
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362}
363
ad8d3948
EG
364/* used only for slowpath so not inlined */
365static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366{
367 u32 wb_write[2];
368
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 372}
a2fbb9ea 373
ad8d3948
EG
374#ifdef USE_WB_RD
375static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376{
377 u32 wb_data[2];
378
379 REG_RD_DMAE(bp, reg, wb_data, 2);
380
381 return HILO_U64(wb_data[0], wb_data[1]);
382}
383#endif
384
a2fbb9ea
ET
385static int bnx2x_mc_assert(struct bnx2x *bp)
386{
a2fbb9ea 387 char last_idx;
34f80b04
EG
388 int i, rc = 0;
389 u32 row0, row1, row2, row3;
390
391 /* XSTORM */
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 if (last_idx)
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
413 rc++;
414 } else {
415 break;
416 }
417 }
418
419 /* TSTORM */
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 if (last_idx)
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
441 rc++;
442 } else {
443 break;
444 }
445 }
446
447 /* CSTORM */
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 if (last_idx)
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
469 rc++;
470 } else {
471 break;
472 }
473 }
474
475 /* USTORM */
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 if (last_idx)
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
497 rc++;
498 } else {
499 break;
a2fbb9ea
ET
500 }
501 }
34f80b04 502
a2fbb9ea
ET
503 return rc;
504}
c14423fe 505
a2fbb9ea
ET
506static void bnx2x_fw_dump(struct bnx2x *bp)
507{
cdaa7cb8 508 u32 addr;
a2fbb9ea 509 u32 mark, offset;
4781bfad 510 __be32 data[9];
a2fbb9ea
ET
511 int word;
512
2145a920
VZ
513 if (BP_NOMCP(bp)) {
514 BNX2X_ERR("NO MCP - can not dump\n");
515 return;
516 }
cdaa7cb8
VZ
517
518 addr = bp->common.shmem_base - 0x0800 + 4;
519 mark = REG_RD(bp, addr);
520 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 521 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 522
7995c64e 523 pr_err("");
cdaa7cb8 524 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 525 for (word = 0; word < 8; word++)
cdaa7cb8 526 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 527 data[8] = 0x0;
7995c64e 528 pr_cont("%s", (char *)data);
a2fbb9ea 529 }
cdaa7cb8 530 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 531 for (word = 0; word < 8; word++)
cdaa7cb8 532 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 533 data[8] = 0x0;
7995c64e 534 pr_cont("%s", (char *)data);
a2fbb9ea 535 }
7995c64e 536 pr_err("end of fw dump\n");
a2fbb9ea
ET
537}
538
539static void bnx2x_panic_dump(struct bnx2x *bp)
540{
541 int i;
542 u16 j, start, end;
543
66e855f3
YG
544 bp->stats_state = STATS_STATE_DISABLED;
545 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
546
a2fbb9ea
ET
547 BNX2X_ERR("begin crash dump -----------------\n");
548
8440d2b6
EG
549 /* Indices */
550 /* Common */
cdaa7cb8
VZ
551 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
552 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
553 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
554 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
555 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
556
557 /* Rx */
54b9ddaa 558 for_each_queue(bp, i) {
a2fbb9ea 559 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 560
cdaa7cb8
VZ
561 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
562 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
563 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 564 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
565 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
566 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
567 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
568 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
569 fp->rx_sge_prod, fp->last_max_sge,
570 le16_to_cpu(fp->fp_u_idx),
571 fp->status_blk->u_status_block.status_block_index);
572 }
a2fbb9ea 573
8440d2b6 574 /* Tx */
54b9ddaa 575 for_each_queue(bp, i) {
8440d2b6 576 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 577
cdaa7cb8
VZ
578 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
579 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
580 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
581 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
582 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
583 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
584 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 585 fp->status_blk->c_status_block.status_block_index,
ca00392c 586 fp->tx_db.data.prod);
8440d2b6 587 }
a2fbb9ea 588
8440d2b6
EG
589 /* Rings */
590 /* Rx */
54b9ddaa 591 for_each_queue(bp, i) {
8440d2b6 592 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
593
594 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
595 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 596 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
597 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
598 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
599
c3eefaf6
EG
600 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
601 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
602 }
603
3196a88a
EG
604 start = RX_SGE(fp->rx_sge_prod);
605 end = RX_SGE(fp->last_max_sge);
8440d2b6 606 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
607 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
608 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
609
c3eefaf6
EG
610 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
611 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
612 }
613
a2fbb9ea
ET
614 start = RCQ_BD(fp->rx_comp_cons - 10);
615 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 616 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
617 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
618
c3eefaf6
EG
619 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
620 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
621 }
622 }
623
8440d2b6 624 /* Tx */
54b9ddaa 625 for_each_queue(bp, i) {
8440d2b6
EG
626 struct bnx2x_fastpath *fp = &bp->fp[i];
627
628 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
629 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
630 for (j = start; j != end; j = TX_BD(j + 1)) {
631 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
632
c3eefaf6
EG
633 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
634 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
635 }
636
637 start = TX_BD(fp->tx_bd_cons - 10);
638 end = TX_BD(fp->tx_bd_cons + 254);
639 for (j = start; j != end; j = TX_BD(j + 1)) {
640 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
641
c3eefaf6
EG
642 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
643 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
644 }
645 }
a2fbb9ea 646
34f80b04 647 bnx2x_fw_dump(bp);
a2fbb9ea
ET
648 bnx2x_mc_assert(bp);
649 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
650}
651
9f6c9258 652void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 653{
34f80b04 654 int port = BP_PORT(bp);
a2fbb9ea
ET
655 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
656 u32 val = REG_RD(bp, addr);
657 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 658 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
659
660 if (msix) {
8badd27a
EG
661 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
663 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
664 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
665 } else if (msi) {
666 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
667 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
668 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
670 } else {
671 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 672 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
673 HC_CONFIG_0_REG_INT_LINE_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 675
8badd27a
EG
676 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
677 val, port, addr);
615f8fd9
ET
678
679 REG_WR(bp, addr, val);
680
a2fbb9ea
ET
681 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
682 }
683
8badd27a
EG
684 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
685 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
686
687 REG_WR(bp, addr, val);
37dbbf32
EG
688 /*
689 * Ensure that HC_CONFIG is written before leading/trailing edge config
690 */
691 mmiowb();
692 barrier();
34f80b04
EG
693
694 if (CHIP_IS_E1H(bp)) {
695 /* init leading/trailing edge */
696 if (IS_E1HMF(bp)) {
8badd27a 697 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 698 if (bp->port.pmf)
4acac6a5
EG
699 /* enable nig and gpio3 attention */
700 val |= 0x1100;
34f80b04
EG
701 } else
702 val = 0xffff;
703
704 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
705 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
706 }
37dbbf32
EG
707
708 /* Make sure that interrupts are indeed enabled from here on */
709 mmiowb();
a2fbb9ea
ET
710}
711
615f8fd9 712static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 713{
34f80b04 714 int port = BP_PORT(bp);
a2fbb9ea
ET
715 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
716 u32 val = REG_RD(bp, addr);
717
718 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
719 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
720 HC_CONFIG_0_REG_INT_LINE_EN_0 |
721 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
722
723 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
724 val, port, addr);
725
8badd27a
EG
726 /* flush all outstanding writes */
727 mmiowb();
728
a2fbb9ea
ET
729 REG_WR(bp, addr, val);
730 if (REG_RD(bp, addr) != val)
731 BNX2X_ERR("BUG! proper val not read from IGU!\n");
732}
733
9f6c9258 734void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 735{
a2fbb9ea 736 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 737 int i, offset;
a2fbb9ea 738
34f80b04 739 /* disable interrupt handling */
a2fbb9ea 740 atomic_inc(&bp->intr_sem);
e1510706
EG
741 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
742
f8ef6e44
YG
743 if (disable_hw)
744 /* prevent the HW from sending interrupts */
745 bnx2x_int_disable(bp);
a2fbb9ea
ET
746
747 /* make sure all ISRs are done */
748 if (msix) {
8badd27a
EG
749 synchronize_irq(bp->msix_table[0].vector);
750 offset = 1;
37b091ba
MC
751#ifdef BCM_CNIC
752 offset++;
753#endif
a2fbb9ea 754 for_each_queue(bp, i)
8badd27a 755 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
756 } else
757 synchronize_irq(bp->pdev->irq);
758
759 /* make sure sp_task is not running */
1cf167f2
EG
760 cancel_delayed_work(&bp->sp_task);
761 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
762}
763
34f80b04 764/* fast path */
a2fbb9ea
ET
765
766/*
34f80b04 767 * General service functions
a2fbb9ea
ET
768 */
769
72fd0718
VZ
770/* Return true if succeeded to acquire the lock */
771static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
772{
773 u32 lock_status;
774 u32 resource_bit = (1 << resource);
775 int func = BP_FUNC(bp);
776 u32 hw_lock_control_reg;
777
778 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
779
780 /* Validating that the resource is within range */
781 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
782 DP(NETIF_MSG_HW,
783 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
784 resource, HW_LOCK_MAX_RESOURCE_VALUE);
785 return -EINVAL;
786 }
787
788 if (func <= 5)
789 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
790 else
791 hw_lock_control_reg =
792 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
793
794 /* Try to acquire the lock */
795 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
796 lock_status = REG_RD(bp, hw_lock_control_reg);
797 if (lock_status & resource_bit)
798 return true;
799
800 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
801 return false;
802}
803
a2fbb9ea 804
993ac7b5
MC
805#ifdef BCM_CNIC
806static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
807#endif
3196a88a 808
9f6c9258 809void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
810 union eth_rx_cqe *rr_cqe)
811{
812 struct bnx2x *bp = fp->bp;
813 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
815
34f80b04 816 DP(BNX2X_MSG_SP,
a2fbb9ea 817 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 818 fp->index, cid, command, bp->state,
34f80b04 819 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
820
821 bp->spq_left++;
822
0626b899 823 if (fp->index) {
a2fbb9ea
ET
824 switch (command | fp->state) {
825 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
826 BNX2X_FP_STATE_OPENING):
827 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
828 cid);
829 fp->state = BNX2X_FP_STATE_OPEN;
830 break;
831
832 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
833 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
834 cid);
835 fp->state = BNX2X_FP_STATE_HALTED;
836 break;
837
838 default:
34f80b04 839 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
840 "fp[%d] state is %x\n",
841 command, fp->index, fp->state);
34f80b04 842 break;
a2fbb9ea 843 }
34f80b04 844 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
845 return;
846 }
c14423fe 847
a2fbb9ea
ET
848 switch (command | bp->state) {
849 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
850 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
851 bp->state = BNX2X_STATE_OPEN;
852 break;
853
854 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
855 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
856 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
857 fp->state = BNX2X_FP_STATE_HALTED;
858 break;
859
a2fbb9ea 860 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 861 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 862 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
863 break;
864
993ac7b5
MC
865#ifdef BCM_CNIC
866 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
867 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
868 bnx2x_cnic_cfc_comp(bp, cid);
869 break;
870#endif
3196a88a 871
a2fbb9ea 872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 873 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 874 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
875 bp->set_mac_pending--;
876 smp_wmb();
a2fbb9ea
ET
877 break;
878
49d66772 879 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 880 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
881 bp->set_mac_pending--;
882 smp_wmb();
49d66772
ET
883 break;
884
a2fbb9ea 885 default:
34f80b04 886 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 887 command, bp->state);
34f80b04 888 break;
a2fbb9ea 889 }
34f80b04 890 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
891}
892
9f6c9258 893irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 894{
555f6c78 895 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 896 u16 status = bnx2x_ack_int(bp);
34f80b04 897 u16 mask;
ca00392c 898 int i;
a2fbb9ea 899
34f80b04 900 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
901 if (unlikely(status == 0)) {
902 DP(NETIF_MSG_INTR, "not our interrupt!\n");
903 return IRQ_NONE;
904 }
f5372251 905 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 906
34f80b04 907 /* Return here if interrupt is disabled */
a2fbb9ea
ET
908 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
909 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
910 return IRQ_HANDLED;
911 }
912
3196a88a
EG
913#ifdef BNX2X_STOP_ON_ERROR
914 if (unlikely(bp->panic))
915 return IRQ_HANDLED;
916#endif
917
ca00392c
EG
918 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
919 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 920
ca00392c
EG
921 mask = 0x2 << fp->sb_id;
922 if (status & mask) {
54b9ddaa
VZ
923 /* Handle Rx and Tx according to SB id */
924 prefetch(fp->rx_cons_sb);
925 prefetch(&fp->status_blk->u_status_block.
926 status_block_index);
927 prefetch(fp->tx_cons_sb);
928 prefetch(&fp->status_blk->c_status_block.
929 status_block_index);
930 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
931 status &= ~mask;
932 }
a2fbb9ea
ET
933 }
934
993ac7b5
MC
935#ifdef BCM_CNIC
936 mask = 0x2 << CNIC_SB_ID(bp);
937 if (status & (mask | 0x1)) {
938 struct cnic_ops *c_ops = NULL;
939
940 rcu_read_lock();
941 c_ops = rcu_dereference(bp->cnic_ops);
942 if (c_ops)
943 c_ops->cnic_handler(bp->cnic_data, NULL);
944 rcu_read_unlock();
945
946 status &= ~mask;
947 }
948#endif
a2fbb9ea 949
34f80b04 950 if (unlikely(status & 0x1)) {
1cf167f2 951 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
952
953 status &= ~0x1;
954 if (!status)
955 return IRQ_HANDLED;
956 }
957
cdaa7cb8
VZ
958 if (unlikely(status))
959 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 960 status);
a2fbb9ea 961
c18487ee 962 return IRQ_HANDLED;
a2fbb9ea
ET
963}
964
c18487ee 965/* end of fast path */
a2fbb9ea 966
a2fbb9ea 967
c18487ee
YR
968/* Link */
969
970/*
971 * General service functions
972 */
a2fbb9ea 973
9f6c9258 974int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
975{
976 u32 lock_status;
977 u32 resource_bit = (1 << resource);
4a37fb66
YG
978 int func = BP_FUNC(bp);
979 u32 hw_lock_control_reg;
c18487ee 980 int cnt;
a2fbb9ea 981
c18487ee
YR
982 /* Validating that the resource is within range */
983 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
984 DP(NETIF_MSG_HW,
985 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
986 resource, HW_LOCK_MAX_RESOURCE_VALUE);
987 return -EINVAL;
988 }
a2fbb9ea 989
4a37fb66
YG
990 if (func <= 5) {
991 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
992 } else {
993 hw_lock_control_reg =
994 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
995 }
996
c18487ee 997 /* Validating that the resource is not already taken */
4a37fb66 998 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
999 if (lock_status & resource_bit) {
1000 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1001 lock_status, resource_bit);
1002 return -EEXIST;
1003 }
a2fbb9ea 1004
46230476
EG
1005 /* Try for 5 second every 5ms */
1006 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1007 /* Try to acquire the lock */
4a37fb66
YG
1008 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1009 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1010 if (lock_status & resource_bit)
1011 return 0;
a2fbb9ea 1012
c18487ee 1013 msleep(5);
a2fbb9ea 1014 }
c18487ee
YR
1015 DP(NETIF_MSG_HW, "Timeout\n");
1016 return -EAGAIN;
1017}
a2fbb9ea 1018
9f6c9258 1019int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1020{
1021 u32 lock_status;
1022 u32 resource_bit = (1 << resource);
4a37fb66
YG
1023 int func = BP_FUNC(bp);
1024 u32 hw_lock_control_reg;
a2fbb9ea 1025
72fd0718
VZ
1026 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1027
c18487ee
YR
1028 /* Validating that the resource is within range */
1029 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1030 DP(NETIF_MSG_HW,
1031 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1032 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1033 return -EINVAL;
1034 }
1035
4a37fb66
YG
1036 if (func <= 5) {
1037 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1038 } else {
1039 hw_lock_control_reg =
1040 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1041 }
1042
c18487ee 1043 /* Validating that the resource is currently taken */
4a37fb66 1044 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1045 if (!(lock_status & resource_bit)) {
1046 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1047 lock_status, resource_bit);
1048 return -EFAULT;
a2fbb9ea
ET
1049 }
1050
9f6c9258
DK
1051 REG_WR(bp, hw_lock_control_reg, resource_bit);
1052 return 0;
c18487ee 1053}
a2fbb9ea 1054
9f6c9258 1055
4acac6a5
EG
1056int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1057{
1058 /* The GPIO should be swapped if swap register is set and active */
1059 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1060 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1061 int gpio_shift = gpio_num +
1062 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1063 u32 gpio_mask = (1 << gpio_shift);
1064 u32 gpio_reg;
1065 int value;
1066
1067 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1068 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1069 return -EINVAL;
1070 }
1071
1072 /* read GPIO value */
1073 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1074
1075 /* get the requested pin value */
1076 if ((gpio_reg & gpio_mask) == gpio_mask)
1077 value = 1;
1078 else
1079 value = 0;
1080
1081 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1082
1083 return value;
1084}
1085
17de50b7 1086int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1087{
1088 /* The GPIO should be swapped if swap register is set and active */
1089 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1090 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1091 int gpio_shift = gpio_num +
1092 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1093 u32 gpio_mask = (1 << gpio_shift);
1094 u32 gpio_reg;
a2fbb9ea 1095
c18487ee
YR
1096 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1097 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1098 return -EINVAL;
1099 }
a2fbb9ea 1100
4a37fb66 1101 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1102 /* read GPIO and mask except the float bits */
1103 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1104
c18487ee
YR
1105 switch (mode) {
1106 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1107 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1108 gpio_num, gpio_shift);
1109 /* clear FLOAT and set CLR */
1110 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1111 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1112 break;
a2fbb9ea 1113
c18487ee
YR
1114 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1115 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1116 gpio_num, gpio_shift);
1117 /* clear FLOAT and set SET */
1118 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1119 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1120 break;
a2fbb9ea 1121
17de50b7 1122 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1123 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1124 gpio_num, gpio_shift);
1125 /* set FLOAT */
1126 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1127 break;
a2fbb9ea 1128
c18487ee
YR
1129 default:
1130 break;
a2fbb9ea
ET
1131 }
1132
c18487ee 1133 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1134 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1135
c18487ee 1136 return 0;
a2fbb9ea
ET
1137}
1138
4acac6a5
EG
1139int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1140{
1141 /* The GPIO should be swapped if swap register is set and active */
1142 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1143 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1144 int gpio_shift = gpio_num +
1145 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1146 u32 gpio_mask = (1 << gpio_shift);
1147 u32 gpio_reg;
1148
1149 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1150 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1151 return -EINVAL;
1152 }
1153
1154 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1155 /* read GPIO int */
1156 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1157
1158 switch (mode) {
1159 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1160 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1161 "output low\n", gpio_num, gpio_shift);
1162 /* clear SET and set CLR */
1163 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1164 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1165 break;
1166
1167 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1168 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1169 "output high\n", gpio_num, gpio_shift);
1170 /* clear CLR and set SET */
1171 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1172 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1173 break;
1174
1175 default:
1176 break;
1177 }
1178
1179 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1180 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1181
1182 return 0;
1183}
1184
c18487ee 1185static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1186{
c18487ee
YR
1187 u32 spio_mask = (1 << spio_num);
1188 u32 spio_reg;
a2fbb9ea 1189
c18487ee
YR
1190 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1191 (spio_num > MISC_REGISTERS_SPIO_7)) {
1192 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1193 return -EINVAL;
a2fbb9ea
ET
1194 }
1195
4a37fb66 1196 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1197 /* read SPIO and mask except the float bits */
1198 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1199
c18487ee 1200 switch (mode) {
6378c025 1201 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1202 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1203 /* clear FLOAT and set CLR */
1204 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1205 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1206 break;
a2fbb9ea 1207
6378c025 1208 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1209 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1210 /* clear FLOAT and set SET */
1211 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1212 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1213 break;
a2fbb9ea 1214
c18487ee
YR
1215 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1216 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1217 /* set FLOAT */
1218 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1219 break;
a2fbb9ea 1220
c18487ee
YR
1221 default:
1222 break;
a2fbb9ea
ET
1223 }
1224
c18487ee 1225 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1226 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1227
a2fbb9ea
ET
1228 return 0;
1229}
1230
9f6c9258 1231void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1232{
ad33ea3a
EG
1233 switch (bp->link_vars.ieee_fc &
1234 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1235 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1236 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1237 ADVERTISED_Pause);
1238 break;
356e2385 1239
c18487ee 1240 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1241 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1242 ADVERTISED_Pause);
1243 break;
356e2385 1244
c18487ee 1245 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1246 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 1247 break;
356e2385 1248
c18487ee 1249 default:
34f80b04 1250 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1251 ADVERTISED_Pause);
1252 break;
1253 }
1254}
f1410647 1255
c18487ee 1256
9f6c9258 1257u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1258{
19680c48
EG
1259 if (!BP_NOMCP(bp)) {
1260 u8 rc;
a2fbb9ea 1261
19680c48 1262 /* Initialize link parameters structure variables */
8c99e7b0
YR
1263 /* It is recommended to turn off RX FC for jumbo frames
1264 for better performance */
0c593270 1265 if (bp->dev->mtu > 5000)
c0700f90 1266 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1267 else
c0700f90 1268 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1269
4a37fb66 1270 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
1271
1272 if (load_mode == LOAD_DIAG)
1273 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1274
19680c48 1275 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1276
4a37fb66 1277 bnx2x_release_phy_lock(bp);
a2fbb9ea 1278
3c96c68b
EG
1279 bnx2x_calc_fc_adv(bp);
1280
b5bf9068
EG
1281 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1282 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1283 bnx2x_link_report(bp);
b5bf9068 1284 }
34f80b04 1285
19680c48
EG
1286 return rc;
1287 }
f5372251 1288 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1289 return -EINVAL;
a2fbb9ea
ET
1290}
1291
9f6c9258 1292void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1293{
19680c48 1294 if (!BP_NOMCP(bp)) {
4a37fb66 1295 bnx2x_acquire_phy_lock(bp);
19680c48 1296 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1297 bnx2x_release_phy_lock(bp);
a2fbb9ea 1298
19680c48
EG
1299 bnx2x_calc_fc_adv(bp);
1300 } else
f5372251 1301 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1302}
a2fbb9ea 1303
c18487ee
YR
1304static void bnx2x__link_reset(struct bnx2x *bp)
1305{
19680c48 1306 if (!BP_NOMCP(bp)) {
4a37fb66 1307 bnx2x_acquire_phy_lock(bp);
589abe3a 1308 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1309 bnx2x_release_phy_lock(bp);
19680c48 1310 } else
f5372251 1311 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1312}
a2fbb9ea 1313
9f6c9258 1314u8 bnx2x_link_test(struct bnx2x *bp)
c18487ee 1315{
2145a920 1316 u8 rc = 0;
a2fbb9ea 1317
2145a920
VZ
1318 if (!BP_NOMCP(bp)) {
1319 bnx2x_acquire_phy_lock(bp);
1320 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1321 bnx2x_release_phy_lock(bp);
1322 } else
1323 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1324
c18487ee
YR
1325 return rc;
1326}
a2fbb9ea 1327
8a1c38d1 1328static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1329{
8a1c38d1
EG
1330 u32 r_param = bp->link_vars.line_speed / 8;
1331 u32 fair_periodic_timeout_usec;
1332 u32 t_fair;
34f80b04 1333
8a1c38d1
EG
1334 memset(&(bp->cmng.rs_vars), 0,
1335 sizeof(struct rate_shaping_vars_per_port));
1336 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1337
8a1c38d1
EG
1338 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1339 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1340
8a1c38d1
EG
1341 /* this is the threshold below which no timer arming will occur
1342 1.25 coefficient is for the threshold to be a little bigger
1343 than the real time, to compensate for timer in-accuracy */
1344 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1345 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1346
8a1c38d1
EG
1347 /* resolution of fairness timer */
1348 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1349 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1350 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1351
8a1c38d1
EG
1352 /* this is the threshold below which we won't arm the timer anymore */
1353 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1354
8a1c38d1
EG
1355 /* we multiply by 1e3/8 to get bytes/msec.
1356 We don't want the credits to pass a credit
1357 of the t_fair*FAIR_MEM (algorithm resolution) */
1358 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1359 /* since each tick is 4 usec */
1360 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1361}
1362
2691d51d
EG
1363/* Calculates the sum of vn_min_rates.
1364 It's needed for further normalizing of the min_rates.
1365 Returns:
1366 sum of vn_min_rates.
1367 or
1368 0 - if all the min_rates are 0.
1369 In the later case fainess algorithm should be deactivated.
1370 If not all min_rates are zero then those that are zeroes will be set to 1.
1371 */
1372static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1373{
1374 int all_zero = 1;
1375 int port = BP_PORT(bp);
1376 int vn;
1377
1378 bp->vn_weight_sum = 0;
1379 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1380 int func = 2*vn + port;
1381 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1382 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1383 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1384
1385 /* Skip hidden vns */
1386 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1387 continue;
1388
1389 /* If min rate is zero - set it to 1 */
1390 if (!vn_min_rate)
1391 vn_min_rate = DEF_MIN_RATE;
1392 else
1393 all_zero = 0;
1394
1395 bp->vn_weight_sum += vn_min_rate;
1396 }
1397
1398 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1399 if (all_zero) {
1400 bp->cmng.flags.cmng_enables &=
1401 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1402 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1403 " fairness will be disabled\n");
1404 } else
1405 bp->cmng.flags.cmng_enables |=
1406 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1407}
1408
8a1c38d1 1409static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
1410{
1411 struct rate_shaping_vars_per_vn m_rs_vn;
1412 struct fairness_vars_per_vn m_fair_vn;
1413 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1414 u16 vn_min_rate, vn_max_rate;
1415 int i;
1416
1417 /* If function is hidden - set min and max to zeroes */
1418 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1419 vn_min_rate = 0;
1420 vn_max_rate = 0;
1421
1422 } else {
1423 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1424 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
1425 /* If min rate is zero - set it to 1 */
1426 if (!vn_min_rate)
34f80b04
EG
1427 vn_min_rate = DEF_MIN_RATE;
1428 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1429 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1430 }
8a1c38d1 1431 DP(NETIF_MSG_IFUP,
b015e3d1 1432 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1433 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1434
1435 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1436 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1437
1438 /* global vn counter - maximal Mbps for this vn */
1439 m_rs_vn.vn_counter.rate = vn_max_rate;
1440
1441 /* quota - number of bytes transmitted in this period */
1442 m_rs_vn.vn_counter.quota =
1443 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1444
8a1c38d1 1445 if (bp->vn_weight_sum) {
34f80b04
EG
1446 /* credit for each period of the fairness algorithm:
1447 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1448 vn_weight_sum should not be larger than 10000, thus
1449 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1450 than zero */
34f80b04 1451 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1452 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1453 (8 * bp->vn_weight_sum))),
1454 (bp->cmng.fair_vars.fair_threshold * 2));
1455 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1456 m_fair_vn.vn_credit_delta);
1457 }
1458
34f80b04
EG
1459 /* Store it to internal memory */
1460 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1461 REG_WR(bp, BAR_XSTRORM_INTMEM +
1462 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1463 ((u32 *)(&m_rs_vn))[i]);
1464
1465 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1466 REG_WR(bp, BAR_XSTRORM_INTMEM +
1467 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1468 ((u32 *)(&m_fair_vn))[i]);
1469}
1470
8a1c38d1 1471
c18487ee
YR
1472/* This function is called upon link interrupt */
1473static void bnx2x_link_attn(struct bnx2x *bp)
1474{
d9e8b185 1475 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
1476 /* Make sure that we are synced with the current statistics */
1477 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1478
c18487ee 1479 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1480
bb2a0f7a
YG
1481 if (bp->link_vars.link_up) {
1482
1c06328c 1483 /* dropless flow control */
a18f5128 1484 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
1485 int port = BP_PORT(bp);
1486 u32 pause_enabled = 0;
1487
1488 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1489 pause_enabled = 1;
1490
1491 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 1492 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
1493 pause_enabled);
1494 }
1495
bb2a0f7a
YG
1496 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1497 struct host_port_stats *pstats;
1498
1499 pstats = bnx2x_sp(bp, port_stats);
1500 /* reset old bmac stats */
1501 memset(&(pstats->mac_stx[0]), 0,
1502 sizeof(struct mac_stx));
1503 }
f34d28ea 1504 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
1505 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1506 }
1507
d9e8b185
VZ
1508 /* indicate link status only if link status actually changed */
1509 if (prev_link_status != bp->link_vars.link_status)
1510 bnx2x_link_report(bp);
34f80b04
EG
1511
1512 if (IS_E1HMF(bp)) {
8a1c38d1 1513 int port = BP_PORT(bp);
34f80b04 1514 int func;
8a1c38d1 1515 int vn;
34f80b04 1516
ab6ad5a4 1517 /* Set the attention towards other drivers on the same port */
34f80b04
EG
1518 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1519 if (vn == BP_E1HVN(bp))
1520 continue;
1521
8a1c38d1 1522 func = ((vn << 1) | port);
34f80b04
EG
1523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1524 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1525 }
34f80b04 1526
8a1c38d1
EG
1527 if (bp->link_vars.link_up) {
1528 int i;
1529
1530 /* Init rate shaping and fairness contexts */
1531 bnx2x_init_port_minmax(bp);
34f80b04 1532
34f80b04 1533 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
1534 bnx2x_init_vn_minmax(bp, 2*vn + port);
1535
1536 /* Store it to internal memory */
1537 for (i = 0;
1538 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1539 REG_WR(bp, BAR_XSTRORM_INTMEM +
1540 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1541 ((u32 *)(&bp->cmng))[i]);
1542 }
34f80b04 1543 }
c18487ee 1544}
a2fbb9ea 1545
9f6c9258 1546void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 1547{
f34d28ea 1548 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 1549 return;
a2fbb9ea 1550
c18487ee 1551 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1552
bb2a0f7a
YG
1553 if (bp->link_vars.link_up)
1554 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1555 else
1556 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1557
2691d51d
EG
1558 bnx2x_calc_vn_weight_sum(bp);
1559
c18487ee
YR
1560 /* indicate link status */
1561 bnx2x_link_report(bp);
a2fbb9ea 1562}
a2fbb9ea 1563
34f80b04
EG
1564static void bnx2x_pmf_update(struct bnx2x *bp)
1565{
1566 int port = BP_PORT(bp);
1567 u32 val;
1568
1569 bp->port.pmf = 1;
1570 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1571
1572 /* enable nig attention */
1573 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1574 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1575 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
1576
1577 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
1578}
1579
c18487ee 1580/* end of Link */
a2fbb9ea
ET
1581
1582/* slow path */
1583
1584/*
1585 * General service functions
1586 */
1587
2691d51d
EG
1588/* send the MCP a request, block until there is a reply */
1589u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1590{
1591 int func = BP_FUNC(bp);
1592 u32 seq = ++bp->fw_seq;
1593 u32 rc = 0;
1594 u32 cnt = 1;
1595 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1596
c4ff7cbf 1597 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
1598 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1599 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1600
1601 do {
1602 /* let the FW do it's magic ... */
1603 msleep(delay);
1604
1605 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1606
c4ff7cbf
EG
1607 /* Give the FW up to 5 second (500*10ms) */
1608 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
1609
1610 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1611 cnt*delay, rc, seq);
1612
1613 /* is this a reply to our command? */
1614 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1615 rc &= FW_MSG_CODE_MASK;
1616 else {
1617 /* FW BUG! */
1618 BNX2X_ERR("FW failed to respond!\n");
1619 bnx2x_fw_dump(bp);
1620 rc = 0;
1621 }
c4ff7cbf 1622 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
1623
1624 return rc;
1625}
1626
2691d51d
EG
1627static void bnx2x_e1h_disable(struct bnx2x *bp)
1628{
1629 int port = BP_PORT(bp);
2691d51d
EG
1630
1631 netif_tx_disable(bp->dev);
2691d51d
EG
1632
1633 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1634
2691d51d
EG
1635 netif_carrier_off(bp->dev);
1636}
1637
1638static void bnx2x_e1h_enable(struct bnx2x *bp)
1639{
1640 int port = BP_PORT(bp);
1641
1642 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1643
2691d51d
EG
1644 /* Tx queue should be only reenabled */
1645 netif_tx_wake_all_queues(bp->dev);
1646
061bc702
EG
1647 /*
1648 * Should not call netif_carrier_on since it will be called if the link
1649 * is up when checking for link state
1650 */
2691d51d
EG
1651}
1652
1653static void bnx2x_update_min_max(struct bnx2x *bp)
1654{
1655 int port = BP_PORT(bp);
1656 int vn, i;
1657
1658 /* Init rate shaping and fairness contexts */
1659 bnx2x_init_port_minmax(bp);
1660
1661 bnx2x_calc_vn_weight_sum(bp);
1662
1663 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1664 bnx2x_init_vn_minmax(bp, 2*vn + port);
1665
1666 if (bp->port.pmf) {
1667 int func;
1668
1669 /* Set the attention towards other drivers on the same port */
1670 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1671 if (vn == BP_E1HVN(bp))
1672 continue;
1673
1674 func = ((vn << 1) | port);
1675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1676 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1677 }
1678
1679 /* Store it to internal memory */
1680 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1681 REG_WR(bp, BAR_XSTRORM_INTMEM +
1682 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1683 ((u32 *)(&bp->cmng))[i]);
1684 }
1685}
1686
1687static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1688{
2691d51d 1689 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
1690
1691 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1692
f34d28ea
EG
1693 /*
1694 * This is the only place besides the function initialization
1695 * where the bp->flags can change so it is done without any
1696 * locks
1697 */
2691d51d
EG
1698 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1699 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 1700 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
1701
1702 bnx2x_e1h_disable(bp);
1703 } else {
1704 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 1705 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
1706
1707 bnx2x_e1h_enable(bp);
1708 }
1709 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1710 }
1711 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1712
1713 bnx2x_update_min_max(bp);
1714 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1715 }
1716
1717 /* Report results to MCP */
1718 if (dcc_event)
1719 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1720 else
1721 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1722}
1723
28912902
MC
1724/* must be called under the spq lock */
1725static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1726{
1727 struct eth_spe *next_spe = bp->spq_prod_bd;
1728
1729 if (bp->spq_prod_bd == bp->spq_last_bd) {
1730 bp->spq_prod_bd = bp->spq;
1731 bp->spq_prod_idx = 0;
1732 DP(NETIF_MSG_TIMER, "end of spq\n");
1733 } else {
1734 bp->spq_prod_bd++;
1735 bp->spq_prod_idx++;
1736 }
1737 return next_spe;
1738}
1739
1740/* must be called under the spq lock */
1741static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1742{
1743 int func = BP_FUNC(bp);
1744
1745 /* Make sure that BD data is updated before writing the producer */
1746 wmb();
1747
1748 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1749 bp->spq_prod_idx);
1750 mmiowb();
1751}
1752
a2fbb9ea 1753/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 1754int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
a2fbb9ea
ET
1755 u32 data_hi, u32 data_lo, int common)
1756{
28912902 1757 struct eth_spe *spe;
a2fbb9ea 1758
a2fbb9ea
ET
1759#ifdef BNX2X_STOP_ON_ERROR
1760 if (unlikely(bp->panic))
1761 return -EIO;
1762#endif
1763
34f80b04 1764 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
1765
1766 if (!bp->spq_left) {
1767 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 1768 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1769 bnx2x_panic();
1770 return -EBUSY;
1771 }
f1410647 1772
28912902
MC
1773 spe = bnx2x_sp_get_next(bp);
1774
a2fbb9ea 1775 /* CID needs port number to be encoded int it */
28912902 1776 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
1777 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1778 HW_CID(bp, cid));
28912902 1779 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 1780 if (common)
28912902 1781 spe->hdr.type |=
a2fbb9ea
ET
1782 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1783
28912902
MC
1784 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1785 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
1786
1787 bp->spq_left--;
1788
cdaa7cb8
VZ
1789 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1790 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1791 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1792 (u32)(U64_LO(bp->spq_mapping) +
1793 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1794 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1795
28912902 1796 bnx2x_sp_prod_update(bp);
34f80b04 1797 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1798 return 0;
1799}
1800
1801/* acquire split MCP access lock register */
4a37fb66 1802static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 1803{
72fd0718 1804 u32 j, val;
34f80b04 1805 int rc = 0;
a2fbb9ea
ET
1806
1807 might_sleep();
72fd0718 1808 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
1809 val = (1UL << 31);
1810 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1811 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1812 if (val & (1L << 31))
1813 break;
1814
1815 msleep(5);
1816 }
a2fbb9ea 1817 if (!(val & (1L << 31))) {
19680c48 1818 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
1819 rc = -EBUSY;
1820 }
1821
1822 return rc;
1823}
1824
4a37fb66
YG
1825/* release split MCP access lock register */
1826static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 1827{
72fd0718 1828 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
1829}
1830
1831static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1832{
1833 struct host_def_status_block *def_sb = bp->def_status_blk;
1834 u16 rc = 0;
1835
1836 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
1837 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1838 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1839 rc |= 1;
1840 }
1841 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1842 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1843 rc |= 2;
1844 }
1845 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1846 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1847 rc |= 4;
1848 }
1849 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1850 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1851 rc |= 8;
1852 }
1853 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1854 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1855 rc |= 16;
1856 }
1857 return rc;
1858}
1859
1860/*
1861 * slow path service functions
1862 */
1863
1864static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1865{
34f80b04 1866 int port = BP_PORT(bp);
5c862848
EG
1867 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1868 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
1869 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1870 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
1871 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1872 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 1873 u32 aeu_mask;
87942b46 1874 u32 nig_mask = 0;
a2fbb9ea 1875
a2fbb9ea
ET
1876 if (bp->attn_state & asserted)
1877 BNX2X_ERR("IGU ERROR\n");
1878
3fcaf2e5
EG
1879 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1880 aeu_mask = REG_RD(bp, aeu_addr);
1881
a2fbb9ea 1882 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 1883 aeu_mask, asserted);
72fd0718 1884 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 1885 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 1886
3fcaf2e5
EG
1887 REG_WR(bp, aeu_addr, aeu_mask);
1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 1889
3fcaf2e5 1890 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 1891 bp->attn_state |= asserted;
3fcaf2e5 1892 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
1893
1894 if (asserted & ATTN_HARD_WIRED_MASK) {
1895 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 1896
a5e9a7cf
EG
1897 bnx2x_acquire_phy_lock(bp);
1898
877e9aa4 1899 /* save nig interrupt mask */
87942b46 1900 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 1901 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 1902
c18487ee 1903 bnx2x_link_attn(bp);
a2fbb9ea
ET
1904
1905 /* handle unicore attn? */
1906 }
1907 if (asserted & ATTN_SW_TIMER_4_FUNC)
1908 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1909
1910 if (asserted & GPIO_2_FUNC)
1911 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1912
1913 if (asserted & GPIO_3_FUNC)
1914 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1915
1916 if (asserted & GPIO_4_FUNC)
1917 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1918
1919 if (port == 0) {
1920 if (asserted & ATTN_GENERAL_ATTN_1) {
1921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1923 }
1924 if (asserted & ATTN_GENERAL_ATTN_2) {
1925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1927 }
1928 if (asserted & ATTN_GENERAL_ATTN_3) {
1929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1931 }
1932 } else {
1933 if (asserted & ATTN_GENERAL_ATTN_4) {
1934 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1935 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1936 }
1937 if (asserted & ATTN_GENERAL_ATTN_5) {
1938 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1939 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1940 }
1941 if (asserted & ATTN_GENERAL_ATTN_6) {
1942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1944 }
1945 }
1946
1947 } /* if hardwired */
1948
5c862848
EG
1949 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1950 asserted, hc_addr);
1951 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
1952
1953 /* now set back the mask */
a5e9a7cf 1954 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 1955 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
1956 bnx2x_release_phy_lock(bp);
1957 }
a2fbb9ea
ET
1958}
1959
fd4ef40d
EG
1960static inline void bnx2x_fan_failure(struct bnx2x *bp)
1961{
1962 int port = BP_PORT(bp);
1963
1964 /* mark the failure */
1965 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1966 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1967 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1968 bp->link_params.ext_phy_config);
1969
1970 /* log the failure */
cdaa7cb8
VZ
1971 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1972 " the driver to shutdown the card to prevent permanent"
1973 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 1974}
ab6ad5a4 1975
877e9aa4 1976static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 1977{
34f80b04 1978 int port = BP_PORT(bp);
877e9aa4 1979 int reg_offset;
4d295db0 1980 u32 val, swap_val, swap_override;
877e9aa4 1981
34f80b04
EG
1982 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1983 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 1984
34f80b04 1985 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
1986
1987 val = REG_RD(bp, reg_offset);
1988 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1989 REG_WR(bp, reg_offset, val);
1990
1991 BNX2X_ERR("SPIO5 hw attention\n");
1992
fd4ef40d 1993 /* Fan failure attention */
35b19ba5
EG
1994 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1995 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 1996 /* Low power mode is controlled by GPIO 2 */
877e9aa4 1997 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 1998 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
1999 /* The PHY reset is controlled by GPIO 1 */
2000 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2001 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2002 break;
2003
4d295db0
EG
2004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2005 /* The PHY reset is controlled by GPIO 1 */
2006 /* fake the port number to cancel the swap done in
2007 set_gpio() */
2008 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2009 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2010 port = (swap_val && swap_override) ^ 1;
2011 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2012 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2013 break;
2014
877e9aa4
ET
2015 default:
2016 break;
2017 }
fd4ef40d 2018 bnx2x_fan_failure(bp);
877e9aa4 2019 }
34f80b04 2020
589abe3a
EG
2021 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2022 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2023 bnx2x_acquire_phy_lock(bp);
2024 bnx2x_handle_module_detect_int(&bp->link_params);
2025 bnx2x_release_phy_lock(bp);
2026 }
2027
34f80b04
EG
2028 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2029
2030 val = REG_RD(bp, reg_offset);
2031 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2032 REG_WR(bp, reg_offset, val);
2033
2034 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2035 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2036 bnx2x_panic();
2037 }
877e9aa4
ET
2038}
2039
2040static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2041{
2042 u32 val;
2043
0626b899 2044 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2045
2046 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2047 BNX2X_ERR("DB hw attention 0x%x\n", val);
2048 /* DORQ discard attention */
2049 if (val & 0x2)
2050 BNX2X_ERR("FATAL error from DORQ\n");
2051 }
34f80b04
EG
2052
2053 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2054
2055 int port = BP_PORT(bp);
2056 int reg_offset;
2057
2058 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2059 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2060
2061 val = REG_RD(bp, reg_offset);
2062 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2063 REG_WR(bp, reg_offset, val);
2064
2065 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2066 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2067 bnx2x_panic();
2068 }
877e9aa4
ET
2069}
2070
2071static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2072{
2073 u32 val;
2074
2075 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2076
2077 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2078 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2079 /* CFC error attention */
2080 if (val & 0x2)
2081 BNX2X_ERR("FATAL error from CFC\n");
2082 }
2083
2084 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2085
2086 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2087 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2088 /* RQ_USDMDP_FIFO_OVERFLOW */
2089 if (val & 0x18000)
2090 BNX2X_ERR("FATAL error from PXP\n");
2091 }
34f80b04
EG
2092
2093 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2094
2095 int port = BP_PORT(bp);
2096 int reg_offset;
2097
2098 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2099 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2100
2101 val = REG_RD(bp, reg_offset);
2102 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2103 REG_WR(bp, reg_offset, val);
2104
2105 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 2106 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
2107 bnx2x_panic();
2108 }
877e9aa4
ET
2109}
2110
2111static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2112{
34f80b04
EG
2113 u32 val;
2114
877e9aa4
ET
2115 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2116
34f80b04
EG
2117 if (attn & BNX2X_PMF_LINK_ASSERT) {
2118 int func = BP_FUNC(bp);
2119
2120 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
2121 bp->mf_config = SHMEM_RD(bp,
2122 mf_cfg.func_mf_config[func].config);
2691d51d
EG
2123 val = SHMEM_RD(bp, func_mb[func].drv_status);
2124 if (val & DRV_STATUS_DCC_EVENT_MASK)
2125 bnx2x_dcc_event(bp,
2126 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 2127 bnx2x__link_status_update(bp);
2691d51d 2128 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
2129 bnx2x_pmf_update(bp);
2130
2131 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2132
2133 BNX2X_ERR("MC assert!\n");
2134 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2136 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2137 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2138 bnx2x_panic();
2139
2140 } else if (attn & BNX2X_MCP_ASSERT) {
2141
2142 BNX2X_ERR("MCP assert!\n");
2143 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2144 bnx2x_fw_dump(bp);
877e9aa4
ET
2145
2146 } else
2147 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2148 }
2149
2150 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2151 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2152 if (attn & BNX2X_GRC_TIMEOUT) {
2153 val = CHIP_IS_E1H(bp) ?
2154 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2155 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2156 }
2157 if (attn & BNX2X_GRC_RSV) {
2158 val = CHIP_IS_E1H(bp) ?
2159 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2160 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2161 }
877e9aa4 2162 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2163 }
2164}
2165
72fd0718
VZ
2166#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2167#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2168#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2169#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2170#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2171#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2172/*
2173 * should be run under rtnl lock
2174 */
2175static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2176{
2177 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2178 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2179 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2180 barrier();
2181 mmiowb();
2182}
2183
2184/*
2185 * should be run under rtnl lock
2186 */
2187static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2188{
2189 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2190 val |= (1 << 16);
2191 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2192 barrier();
2193 mmiowb();
2194}
2195
2196/*
2197 * should be run under rtnl lock
2198 */
9f6c9258 2199bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
2200{
2201 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2202 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2203 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2204}
2205
2206/*
2207 * should be run under rtnl lock
2208 */
9f6c9258 2209inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2210{
2211 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2212
2213 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2214
2215 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2216 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2217 barrier();
2218 mmiowb();
2219}
2220
2221/*
2222 * should be run under rtnl lock
2223 */
9f6c9258 2224u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2225{
2226 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2227
2228 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2229
2230 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2231 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2232 barrier();
2233 mmiowb();
2234
2235 return val1;
2236}
2237
2238/*
2239 * should be run under rtnl lock
2240 */
2241static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2242{
2243 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2244}
2245
2246static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2247{
2248 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2249 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2250}
2251
2252static inline void _print_next_block(int idx, const char *blk)
2253{
2254 if (idx)
2255 pr_cont(", ");
2256 pr_cont("%s", blk);
2257}
2258
2259static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2260{
2261 int i = 0;
2262 u32 cur_bit = 0;
2263 for (i = 0; sig; i++) {
2264 cur_bit = ((u32)0x1 << i);
2265 if (sig & cur_bit) {
2266 switch (cur_bit) {
2267 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2268 _print_next_block(par_num++, "BRB");
2269 break;
2270 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2271 _print_next_block(par_num++, "PARSER");
2272 break;
2273 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2274 _print_next_block(par_num++, "TSDM");
2275 break;
2276 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2277 _print_next_block(par_num++, "SEARCHER");
2278 break;
2279 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2280 _print_next_block(par_num++, "TSEMI");
2281 break;
2282 }
2283
2284 /* Clear the bit */
2285 sig &= ~cur_bit;
2286 }
2287 }
2288
2289 return par_num;
2290}
2291
2292static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2293{
2294 int i = 0;
2295 u32 cur_bit = 0;
2296 for (i = 0; sig; i++) {
2297 cur_bit = ((u32)0x1 << i);
2298 if (sig & cur_bit) {
2299 switch (cur_bit) {
2300 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2301 _print_next_block(par_num++, "PBCLIENT");
2302 break;
2303 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2304 _print_next_block(par_num++, "QM");
2305 break;
2306 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2307 _print_next_block(par_num++, "XSDM");
2308 break;
2309 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2310 _print_next_block(par_num++, "XSEMI");
2311 break;
2312 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2313 _print_next_block(par_num++, "DOORBELLQ");
2314 break;
2315 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2316 _print_next_block(par_num++, "VAUX PCI CORE");
2317 break;
2318 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2319 _print_next_block(par_num++, "DEBUG");
2320 break;
2321 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2322 _print_next_block(par_num++, "USDM");
2323 break;
2324 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2325 _print_next_block(par_num++, "USEMI");
2326 break;
2327 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2328 _print_next_block(par_num++, "UPB");
2329 break;
2330 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2331 _print_next_block(par_num++, "CSDM");
2332 break;
2333 }
2334
2335 /* Clear the bit */
2336 sig &= ~cur_bit;
2337 }
2338 }
2339
2340 return par_num;
2341}
2342
2343static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2344{
2345 int i = 0;
2346 u32 cur_bit = 0;
2347 for (i = 0; sig; i++) {
2348 cur_bit = ((u32)0x1 << i);
2349 if (sig & cur_bit) {
2350 switch (cur_bit) {
2351 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2352 _print_next_block(par_num++, "CSEMI");
2353 break;
2354 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2355 _print_next_block(par_num++, "PXP");
2356 break;
2357 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2358 _print_next_block(par_num++,
2359 "PXPPCICLOCKCLIENT");
2360 break;
2361 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2362 _print_next_block(par_num++, "CFC");
2363 break;
2364 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2365 _print_next_block(par_num++, "CDU");
2366 break;
2367 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2368 _print_next_block(par_num++, "IGU");
2369 break;
2370 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2371 _print_next_block(par_num++, "MISC");
2372 break;
2373 }
2374
2375 /* Clear the bit */
2376 sig &= ~cur_bit;
2377 }
2378 }
2379
2380 return par_num;
2381}
2382
2383static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2384{
2385 int i = 0;
2386 u32 cur_bit = 0;
2387 for (i = 0; sig; i++) {
2388 cur_bit = ((u32)0x1 << i);
2389 if (sig & cur_bit) {
2390 switch (cur_bit) {
2391 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2392 _print_next_block(par_num++, "MCP ROM");
2393 break;
2394 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2395 _print_next_block(par_num++, "MCP UMP RX");
2396 break;
2397 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2398 _print_next_block(par_num++, "MCP UMP TX");
2399 break;
2400 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2401 _print_next_block(par_num++, "MCP SCPAD");
2402 break;
2403 }
2404
2405 /* Clear the bit */
2406 sig &= ~cur_bit;
2407 }
2408 }
2409
2410 return par_num;
2411}
2412
2413static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2414 u32 sig2, u32 sig3)
2415{
2416 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2417 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2418 int par_num = 0;
2419 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2420 "[0]:0x%08x [1]:0x%08x "
2421 "[2]:0x%08x [3]:0x%08x\n",
2422 sig0 & HW_PRTY_ASSERT_SET_0,
2423 sig1 & HW_PRTY_ASSERT_SET_1,
2424 sig2 & HW_PRTY_ASSERT_SET_2,
2425 sig3 & HW_PRTY_ASSERT_SET_3);
2426 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2427 bp->dev->name);
2428 par_num = bnx2x_print_blocks_with_parity0(
2429 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2430 par_num = bnx2x_print_blocks_with_parity1(
2431 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2432 par_num = bnx2x_print_blocks_with_parity2(
2433 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2434 par_num = bnx2x_print_blocks_with_parity3(
2435 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2436 printk("\n");
2437 return true;
2438 } else
2439 return false;
2440}
2441
9f6c9258 2442bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 2443{
a2fbb9ea 2444 struct attn_route attn;
72fd0718
VZ
2445 int port = BP_PORT(bp);
2446
2447 attn.sig[0] = REG_RD(bp,
2448 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2449 port*4);
2450 attn.sig[1] = REG_RD(bp,
2451 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2452 port*4);
2453 attn.sig[2] = REG_RD(bp,
2454 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2455 port*4);
2456 attn.sig[3] = REG_RD(bp,
2457 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2458 port*4);
2459
2460 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2461 attn.sig[3]);
2462}
2463
2464static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2465{
2466 struct attn_route attn, *group_mask;
34f80b04 2467 int port = BP_PORT(bp);
877e9aa4 2468 int index;
a2fbb9ea
ET
2469 u32 reg_addr;
2470 u32 val;
3fcaf2e5 2471 u32 aeu_mask;
a2fbb9ea
ET
2472
2473 /* need to take HW lock because MCP or other port might also
2474 try to handle this event */
4a37fb66 2475 bnx2x_acquire_alr(bp);
a2fbb9ea 2476
72fd0718
VZ
2477 if (bnx2x_chk_parity_attn(bp)) {
2478 bp->recovery_state = BNX2X_RECOVERY_INIT;
2479 bnx2x_set_reset_in_progress(bp);
2480 schedule_delayed_work(&bp->reset_task, 0);
2481 /* Disable HW interrupts */
2482 bnx2x_int_disable(bp);
2483 bnx2x_release_alr(bp);
2484 /* In case of parity errors don't handle attentions so that
2485 * other function would "see" parity errors.
2486 */
2487 return;
2488 }
2489
a2fbb9ea
ET
2490 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2491 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2492 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2493 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2494 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2495 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2496
2497 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2498 if (deasserted & (1 << index)) {
72fd0718 2499 group_mask = &bp->attn_group[index];
a2fbb9ea 2500
34f80b04 2501 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
2502 index, group_mask->sig[0], group_mask->sig[1],
2503 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 2504
877e9aa4 2505 bnx2x_attn_int_deasserted3(bp,
72fd0718 2506 attn.sig[3] & group_mask->sig[3]);
877e9aa4 2507 bnx2x_attn_int_deasserted1(bp,
72fd0718 2508 attn.sig[1] & group_mask->sig[1]);
877e9aa4 2509 bnx2x_attn_int_deasserted2(bp,
72fd0718 2510 attn.sig[2] & group_mask->sig[2]);
877e9aa4 2511 bnx2x_attn_int_deasserted0(bp,
72fd0718 2512 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
2513 }
2514 }
2515
4a37fb66 2516 bnx2x_release_alr(bp);
a2fbb9ea 2517
5c862848 2518 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2519
2520 val = ~deasserted;
3fcaf2e5
EG
2521 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2522 val, reg_addr);
5c862848 2523 REG_WR(bp, reg_addr, val);
a2fbb9ea 2524
a2fbb9ea 2525 if (~bp->attn_state & deasserted)
3fcaf2e5 2526 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2527
2528 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2529 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2530
3fcaf2e5
EG
2531 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2532 aeu_mask = REG_RD(bp, reg_addr);
2533
2534 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2535 aeu_mask, deasserted);
72fd0718 2536 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 2537 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2538
3fcaf2e5
EG
2539 REG_WR(bp, reg_addr, aeu_mask);
2540 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2541
2542 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2543 bp->attn_state &= ~deasserted;
2544 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2545}
2546
2547static void bnx2x_attn_int(struct bnx2x *bp)
2548{
2549 /* read local copy of bits */
68d59484
EG
2550 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2551 attn_bits);
2552 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2553 attn_bits_ack);
a2fbb9ea
ET
2554 u32 attn_state = bp->attn_state;
2555
2556 /* look for changed bits */
2557 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2558 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2559
2560 DP(NETIF_MSG_HW,
2561 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2562 attn_bits, attn_ack, asserted, deasserted);
2563
2564 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2565 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2566
2567 /* handle bits that were raised */
2568 if (asserted)
2569 bnx2x_attn_int_asserted(bp, asserted);
2570
2571 if (deasserted)
2572 bnx2x_attn_int_deasserted(bp, deasserted);
2573}
2574
2575static void bnx2x_sp_task(struct work_struct *work)
2576{
1cf167f2 2577 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2578 u16 status;
2579
2580 /* Return here if interrupt is disabled */
2581 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2582 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2583 return;
2584 }
2585
2586 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2587/* if (status == 0) */
2588/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2589
cdaa7cb8 2590 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 2591
877e9aa4 2592 /* HW attentions */
cdaa7cb8 2593 if (status & 0x1) {
a2fbb9ea 2594 bnx2x_attn_int(bp);
cdaa7cb8
VZ
2595 status &= ~0x1;
2596 }
2597
2598 /* CStorm events: STAT_QUERY */
2599 if (status & 0x2) {
2600 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2601 status &= ~0x2;
2602 }
2603
2604 if (unlikely(status))
2605 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2606 status);
a2fbb9ea 2607
68d59484 2608 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2609 IGU_INT_NOP, 1);
2610 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2611 IGU_INT_NOP, 1);
2612 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2613 IGU_INT_NOP, 1);
2614 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2615 IGU_INT_NOP, 1);
2616 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2617 IGU_INT_ENABLE, 1);
2618}
2619
9f6c9258 2620irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
2621{
2622 struct net_device *dev = dev_instance;
2623 struct bnx2x *bp = netdev_priv(dev);
2624
2625 /* Return here if interrupt is disabled */
2626 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2627 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2628 return IRQ_HANDLED;
2629 }
2630
8d9c5f34 2631 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2632
2633#ifdef BNX2X_STOP_ON_ERROR
2634 if (unlikely(bp->panic))
2635 return IRQ_HANDLED;
2636#endif
2637
993ac7b5
MC
2638#ifdef BCM_CNIC
2639 {
2640 struct cnic_ops *c_ops;
2641
2642 rcu_read_lock();
2643 c_ops = rcu_dereference(bp->cnic_ops);
2644 if (c_ops)
2645 c_ops->cnic_handler(bp->cnic_data, NULL);
2646 rcu_read_unlock();
2647 }
2648#endif
1cf167f2 2649 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2650
2651 return IRQ_HANDLED;
2652}
2653
2654/* end of slow path */
2655
2656/* Statistics */
2657
2658/****************************************************************************
2659* Macros
2660****************************************************************************/
2661
a2fbb9ea
ET
2662/* sum[hi:lo] += add[hi:lo] */
2663#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2664 do { \
2665 s_lo += a_lo; \
f5ba6772 2666 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2667 } while (0)
2668
2669/* difference = minuend - subtrahend */
2670#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2671 do { \
bb2a0f7a
YG
2672 if (m_lo < s_lo) { \
2673 /* underflow */ \
a2fbb9ea 2674 d_hi = m_hi - s_hi; \
bb2a0f7a 2675 if (d_hi > 0) { \
6378c025 2676 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2677 d_hi--; \
2678 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2679 } else { \
6378c025 2680 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2681 d_hi = 0; \
2682 d_lo = 0; \
2683 } \
bb2a0f7a
YG
2684 } else { \
2685 /* m_lo >= s_lo */ \
a2fbb9ea 2686 if (m_hi < s_hi) { \
bb2a0f7a
YG
2687 d_hi = 0; \
2688 d_lo = 0; \
2689 } else { \
6378c025 2690 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2691 d_hi = m_hi - s_hi; \
2692 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2693 } \
2694 } \
2695 } while (0)
2696
bb2a0f7a 2697#define UPDATE_STAT64(s, t) \
a2fbb9ea 2698 do { \
bb2a0f7a
YG
2699 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2700 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2701 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2702 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2703 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2704 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2705 } while (0)
2706
bb2a0f7a 2707#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2708 do { \
bb2a0f7a
YG
2709 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2710 diff.lo, new->s##_lo, old->s##_lo); \
2711 ADD_64(estats->t##_hi, diff.hi, \
2712 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2713 } while (0)
2714
2715/* sum[hi:lo] += add */
2716#define ADD_EXTEND_64(s_hi, s_lo, a) \
2717 do { \
2718 s_lo += a; \
2719 s_hi += (s_lo < a) ? 1 : 0; \
2720 } while (0)
2721
bb2a0f7a 2722#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2723 do { \
bb2a0f7a
YG
2724 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2725 pstats->mac_stx[1].s##_lo, \
2726 new->s); \
a2fbb9ea
ET
2727 } while (0)
2728
bb2a0f7a 2729#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 2730 do { \
4781bfad
EG
2731 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
2732 old_tclient->s = tclient->s; \
de832a55
EG
2733 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2734 } while (0)
2735
2736#define UPDATE_EXTEND_USTAT(s, t) \
2737 do { \
2738 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2739 old_uclient->s = uclient->s; \
2740 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
2741 } while (0)
2742
2743#define UPDATE_EXTEND_XSTAT(s, t) \
2744 do { \
4781bfad
EG
2745 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
2746 old_xclient->s = xclient->s; \
de832a55
EG
2747 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2748 } while (0)
2749
2750/* minuend -= subtrahend */
2751#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2752 do { \
2753 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2754 } while (0)
2755
2756/* minuend[hi:lo] -= subtrahend */
2757#define SUB_EXTEND_64(m_hi, m_lo, s) \
2758 do { \
2759 SUB_64(m_hi, 0, m_lo, s); \
2760 } while (0)
2761
2762#define SUB_EXTEND_USTAT(s, t) \
2763 do { \
2764 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2765 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
2766 } while (0)
2767
2768/*
2769 * General service functions
2770 */
2771
2772static inline long bnx2x_hilo(u32 *hiref)
2773{
2774 u32 lo = *(hiref + 1);
2775#if (BITS_PER_LONG == 64)
2776 u32 hi = *hiref;
2777
2778 return HILO_U64(hi, lo);
2779#else
2780 return lo;
2781#endif
2782}
2783
2784/*
2785 * Init service functions
2786 */
2787
bb2a0f7a
YG
2788static void bnx2x_storm_stats_post(struct bnx2x *bp)
2789{
2790 if (!bp->stats_pending) {
2791 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 2792 int i, rc;
bb2a0f7a
YG
2793
2794 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 2795 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
2796 for_each_queue(bp, i)
2797 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
2798
2799 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2800 ((u32 *)&ramrod_data)[1],
2801 ((u32 *)&ramrod_data)[0], 0);
2802 if (rc == 0) {
2803 /* stats ramrod has it's own slot on the spq */
2804 bp->spq_left++;
2805 bp->stats_pending = 1;
2806 }
2807 }
2808}
2809
bb2a0f7a
YG
2810static void bnx2x_hw_stats_post(struct bnx2x *bp)
2811{
2812 struct dmae_command *dmae = &bp->stats_dmae;
2813 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2814
2815 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
2816 if (CHIP_REV_IS_SLOW(bp))
2817 return;
bb2a0f7a
YG
2818
2819 /* loader */
2820 if (bp->executer_idx) {
2821 int loader_idx = PMF_DMAE_C(bp);
2822
2823 memset(dmae, 0, sizeof(struct dmae_command));
2824
2825 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2826 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2827 DMAE_CMD_DST_RESET |
2828#ifdef __BIG_ENDIAN
2829 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2830#else
2831 DMAE_CMD_ENDIANITY_DW_SWAP |
2832#endif
2833 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
2834 DMAE_CMD_PORT_0) |
2835 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
2836 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
2837 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
2838 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
2839 sizeof(struct dmae_command) *
2840 (loader_idx + 1)) >> 2;
2841 dmae->dst_addr_hi = 0;
2842 dmae->len = sizeof(struct dmae_command) >> 2;
2843 if (CHIP_IS_E1(bp))
2844 dmae->len--;
2845 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
2846 dmae->comp_addr_hi = 0;
2847 dmae->comp_val = 1;
2848
2849 *stats_comp = 0;
2850 bnx2x_post_dmae(bp, dmae, loader_idx);
2851
2852 } else if (bp->func_stx) {
2853 *stats_comp = 0;
2854 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
2855 }
2856}
2857
2858static int bnx2x_stats_comp(struct bnx2x *bp)
2859{
2860 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2861 int cnt = 10;
2862
2863 might_sleep();
2864 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
2865 if (!cnt) {
2866 BNX2X_ERR("timeout waiting for stats finished\n");
2867 break;
2868 }
2869 cnt--;
12469401 2870 msleep(1);
bb2a0f7a
YG
2871 }
2872 return 1;
2873}
2874
2875/*
2876 * Statistics service functions
2877 */
2878
2879static void bnx2x_stats_pmf_update(struct bnx2x *bp)
2880{
2881 struct dmae_command *dmae;
2882 u32 opcode;
2883 int loader_idx = PMF_DMAE_C(bp);
2884 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2885
2886 /* sanity */
2887 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
2888 BNX2X_ERR("BUG!\n");
2889 return;
2890 }
2891
2892 bp->executer_idx = 0;
2893
2894 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2895 DMAE_CMD_C_ENABLE |
2896 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2897#ifdef __BIG_ENDIAN
2898 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2899#else
2900 DMAE_CMD_ENDIANITY_DW_SWAP |
2901#endif
2902 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
2903 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
2904
2905 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2906 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
2907 dmae->src_addr_lo = bp->port.port_stx >> 2;
2908 dmae->src_addr_hi = 0;
2909 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
2910 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
2911 dmae->len = DMAE_LEN32_RD_MAX;
2912 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2913 dmae->comp_addr_hi = 0;
2914 dmae->comp_val = 1;
2915
2916 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2917 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
2918 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
2919 dmae->src_addr_hi = 0;
7a9b2557
VZ
2920 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
2921 DMAE_LEN32_RD_MAX * 4);
2922 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
2923 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
2924 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
2925 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
2926 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
2927 dmae->comp_val = DMAE_COMP_VAL;
2928
2929 *stats_comp = 0;
2930 bnx2x_hw_stats_post(bp);
2931 bnx2x_stats_comp(bp);
2932}
2933
2934static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
2935{
2936 struct dmae_command *dmae;
34f80b04 2937 int port = BP_PORT(bp);
bb2a0f7a 2938 int vn = BP_E1HVN(bp);
a2fbb9ea 2939 u32 opcode;
bb2a0f7a 2940 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 2941 u32 mac_addr;
bb2a0f7a
YG
2942 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2943
2944 /* sanity */
2945 if (!bp->link_vars.link_up || !bp->port.pmf) {
2946 BNX2X_ERR("BUG!\n");
2947 return;
2948 }
a2fbb9ea
ET
2949
2950 bp->executer_idx = 0;
bb2a0f7a
YG
2951
2952 /* MCP */
2953 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2954 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2955 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 2956#ifdef __BIG_ENDIAN
bb2a0f7a 2957 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 2958#else
bb2a0f7a 2959 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 2960#endif
bb2a0f7a
YG
2961 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
2962 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 2963
bb2a0f7a 2964 if (bp->port.port_stx) {
a2fbb9ea
ET
2965
2966 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2967 dmae->opcode = opcode;
bb2a0f7a
YG
2968 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
2969 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
2970 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 2971 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
2972 dmae->len = sizeof(struct host_port_stats) >> 2;
2973 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2974 dmae->comp_addr_hi = 0;
2975 dmae->comp_val = 1;
a2fbb9ea
ET
2976 }
2977
bb2a0f7a
YG
2978 if (bp->func_stx) {
2979
2980 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2981 dmae->opcode = opcode;
2982 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
2983 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
2984 dmae->dst_addr_lo = bp->func_stx >> 2;
2985 dmae->dst_addr_hi = 0;
2986 dmae->len = sizeof(struct host_func_stats) >> 2;
2987 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2988 dmae->comp_addr_hi = 0;
2989 dmae->comp_val = 1;
a2fbb9ea
ET
2990 }
2991
bb2a0f7a 2992 /* MAC */
a2fbb9ea
ET
2993 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2994 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2995 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2996#ifdef __BIG_ENDIAN
2997 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2998#else
2999 DMAE_CMD_ENDIANITY_DW_SWAP |
3000#endif
bb2a0f7a
YG
3001 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3002 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3003
c18487ee 3004 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3005
3006 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3007 NIG_REG_INGRESS_BMAC0_MEM);
3008
3009 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3010 BIGMAC_REGISTER_TX_STAT_GTBYT */
3011 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3012 dmae->opcode = opcode;
3013 dmae->src_addr_lo = (mac_addr +
3014 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3015 dmae->src_addr_hi = 0;
3016 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3017 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3018 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3019 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3020 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3021 dmae->comp_addr_hi = 0;
3022 dmae->comp_val = 1;
3023
3024 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3025 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3026 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3027 dmae->opcode = opcode;
3028 dmae->src_addr_lo = (mac_addr +
3029 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3030 dmae->src_addr_hi = 0;
3031 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3032 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3033 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3034 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3035 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3036 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3037 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3038 dmae->comp_addr_hi = 0;
3039 dmae->comp_val = 1;
3040
c18487ee 3041 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3042
3043 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3044
3045 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3046 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3047 dmae->opcode = opcode;
3048 dmae->src_addr_lo = (mac_addr +
3049 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3050 dmae->src_addr_hi = 0;
3051 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3052 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3053 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3054 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3055 dmae->comp_addr_hi = 0;
3056 dmae->comp_val = 1;
3057
3058 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3059 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3060 dmae->opcode = opcode;
3061 dmae->src_addr_lo = (mac_addr +
3062 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3063 dmae->src_addr_hi = 0;
3064 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3065 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3066 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3067 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3068 dmae->len = 1;
3069 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3070 dmae->comp_addr_hi = 0;
3071 dmae->comp_val = 1;
3072
3073 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3074 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3075 dmae->opcode = opcode;
3076 dmae->src_addr_lo = (mac_addr +
3077 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3078 dmae->src_addr_hi = 0;
3079 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3080 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3081 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3082 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3083 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3084 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3085 dmae->comp_addr_hi = 0;
3086 dmae->comp_val = 1;
3087 }
3088
3089 /* NIG */
bb2a0f7a
YG
3090 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3091 dmae->opcode = opcode;
3092 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3093 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3094 dmae->src_addr_hi = 0;
3095 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3096 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3097 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3098 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3099 dmae->comp_addr_hi = 0;
3100 dmae->comp_val = 1;
3101
3102 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3103 dmae->opcode = opcode;
3104 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3105 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3106 dmae->src_addr_hi = 0;
3107 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3108 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3109 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3110 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3111 dmae->len = (2*sizeof(u32)) >> 2;
3112 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3113 dmae->comp_addr_hi = 0;
3114 dmae->comp_val = 1;
3115
a2fbb9ea
ET
3116 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3117 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3118 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3119 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3120#ifdef __BIG_ENDIAN
3121 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3122#else
3123 DMAE_CMD_ENDIANITY_DW_SWAP |
3124#endif
bb2a0f7a
YG
3125 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3126 (vn << DMAE_CMD_E1HVN_SHIFT));
3127 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3128 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3129 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3130 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3131 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3132 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3133 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3134 dmae->len = (2*sizeof(u32)) >> 2;
3135 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3136 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3137 dmae->comp_val = DMAE_COMP_VAL;
3138
3139 *stats_comp = 0;
a2fbb9ea
ET
3140}
3141
bb2a0f7a 3142static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3143{
bb2a0f7a
YG
3144 struct dmae_command *dmae = &bp->stats_dmae;
3145 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3146
bb2a0f7a
YG
3147 /* sanity */
3148 if (!bp->func_stx) {
3149 BNX2X_ERR("BUG!\n");
3150 return;
3151 }
a2fbb9ea 3152
bb2a0f7a
YG
3153 bp->executer_idx = 0;
3154 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3155
bb2a0f7a
YG
3156 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3157 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3158 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3159#ifdef __BIG_ENDIAN
3160 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3161#else
3162 DMAE_CMD_ENDIANITY_DW_SWAP |
3163#endif
3164 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3165 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3166 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3167 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3168 dmae->dst_addr_lo = bp->func_stx >> 2;
3169 dmae->dst_addr_hi = 0;
3170 dmae->len = sizeof(struct host_func_stats) >> 2;
3171 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3172 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3173 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3174
bb2a0f7a
YG
3175 *stats_comp = 0;
3176}
a2fbb9ea 3177
bb2a0f7a
YG
3178static void bnx2x_stats_start(struct bnx2x *bp)
3179{
3180 if (bp->port.pmf)
3181 bnx2x_port_stats_init(bp);
3182
3183 else if (bp->func_stx)
3184 bnx2x_func_stats_init(bp);
3185
3186 bnx2x_hw_stats_post(bp);
3187 bnx2x_storm_stats_post(bp);
3188}
3189
3190static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3191{
3192 bnx2x_stats_comp(bp);
3193 bnx2x_stats_pmf_update(bp);
3194 bnx2x_stats_start(bp);
3195}
3196
3197static void bnx2x_stats_restart(struct bnx2x *bp)
3198{
3199 bnx2x_stats_comp(bp);
3200 bnx2x_stats_start(bp);
3201}
3202
3203static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3204{
3205 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3206 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3207 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3208 struct {
3209 u32 lo;
3210 u32 hi;
3211 } diff;
bb2a0f7a
YG
3212
3213 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3214 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3215 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3216 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3217 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3218 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3219 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3220 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3221 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3222 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3223 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3224 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3225 UPDATE_STAT64(tx_stat_gt127,
3226 tx_stat_etherstatspkts65octetsto127octets);
3227 UPDATE_STAT64(tx_stat_gt255,
3228 tx_stat_etherstatspkts128octetsto255octets);
3229 UPDATE_STAT64(tx_stat_gt511,
3230 tx_stat_etherstatspkts256octetsto511octets);
3231 UPDATE_STAT64(tx_stat_gt1023,
3232 tx_stat_etherstatspkts512octetsto1023octets);
3233 UPDATE_STAT64(tx_stat_gt1518,
3234 tx_stat_etherstatspkts1024octetsto1522octets);
3235 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3236 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3237 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3238 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3239 UPDATE_STAT64(tx_stat_gterr,
3240 tx_stat_dot3statsinternalmactransmiterrors);
3241 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3242
3243 estats->pause_frames_received_hi =
3244 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3245 estats->pause_frames_received_lo =
3246 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3247
3248 estats->pause_frames_sent_hi =
3249 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3250 estats->pause_frames_sent_lo =
3251 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3252}
3253
3254static void bnx2x_emac_stats_update(struct bnx2x *bp)
3255{
3256 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3257 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3258 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3259
3260 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3261 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3262 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3263 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3264 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3265 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3266 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3267 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3268 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3269 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3270 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3271 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3272 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3273 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3274 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3275 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3276 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3277 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3278 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3279 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3280 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3281 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3282 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3283 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3284 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3285 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3286 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3287 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3288 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3289 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3290 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3291
3292 estats->pause_frames_received_hi =
3293 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3294 estats->pause_frames_received_lo =
3295 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3296 ADD_64(estats->pause_frames_received_hi,
3297 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3298 estats->pause_frames_received_lo,
3299 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3300
3301 estats->pause_frames_sent_hi =
3302 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3303 estats->pause_frames_sent_lo =
3304 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3305 ADD_64(estats->pause_frames_sent_hi,
3306 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3307 estats->pause_frames_sent_lo,
3308 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3309}
3310
3311static int bnx2x_hw_stats_update(struct bnx2x *bp)
3312{
3313 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3314 struct nig_stats *old = &(bp->port.old_nig_stats);
3315 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3316 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3317 struct {
3318 u32 lo;
3319 u32 hi;
3320 } diff;
bb2a0f7a
YG
3321
3322 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3323 bnx2x_bmac_stats_update(bp);
3324
3325 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3326 bnx2x_emac_stats_update(bp);
3327
3328 else { /* unreached */
c3eefaf6 3329 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3330 return -1;
3331 }
a2fbb9ea 3332
bb2a0f7a
YG
3333 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3334 new->brb_discard - old->brb_discard);
66e855f3
YG
3335 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3336 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3337
bb2a0f7a
YG
3338 UPDATE_STAT64_NIG(egress_mac_pkt0,
3339 etherstatspkts1024octetsto1522octets);
3340 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3341
bb2a0f7a 3342 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3343
bb2a0f7a
YG
3344 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3345 sizeof(struct mac_stx));
3346 estats->brb_drop_hi = pstats->brb_drop_hi;
3347 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3348
bb2a0f7a 3349 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3350
2145a920
VZ
3351 if (!BP_NOMCP(bp)) {
3352 u32 nig_timer_max =
3353 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3354 if (nig_timer_max != estats->nig_timer_max) {
3355 estats->nig_timer_max = nig_timer_max;
3356 BNX2X_ERR("NIG timer max (%u)\n",
3357 estats->nig_timer_max);
3358 }
de832a55
EG
3359 }
3360
bb2a0f7a 3361 return 0;
a2fbb9ea
ET
3362}
3363
bb2a0f7a 3364static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3365{
3366 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3367 struct tstorm_per_port_stats *tport =
de832a55 3368 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3369 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3370 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3371 int i;
3372
6fe49bb9
EG
3373 memcpy(&(fstats->total_bytes_received_hi),
3374 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3375 sizeof(struct host_func_stats) - 2*sizeof(u32));
3376 estats->error_bytes_received_hi = 0;
3377 estats->error_bytes_received_lo = 0;
3378 estats->etherstatsoverrsizepkts_hi = 0;
3379 estats->etherstatsoverrsizepkts_lo = 0;
3380 estats->no_buff_discard_hi = 0;
3381 estats->no_buff_discard_lo = 0;
a2fbb9ea 3382
54b9ddaa 3383 for_each_queue(bp, i) {
de832a55
EG
3384 struct bnx2x_fastpath *fp = &bp->fp[i];
3385 int cl_id = fp->cl_id;
3386 struct tstorm_per_client_stats *tclient =
3387 &stats->tstorm_common.client_statistics[cl_id];
3388 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3389 struct ustorm_per_client_stats *uclient =
3390 &stats->ustorm_common.client_statistics[cl_id];
3391 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3392 struct xstorm_per_client_stats *xclient =
3393 &stats->xstorm_common.client_statistics[cl_id];
3394 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3395 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3396 u32 diff;
3397
3398 /* are storm stats valid? */
3399 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3400 bp->stats_counter) {
de832a55 3401 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
cdaa7cb8 3402 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
3403 i, xclient->stats_counter, bp->stats_counter);
3404 return -1;
3405 }
3406 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3407 bp->stats_counter) {
de832a55 3408 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
cdaa7cb8 3409 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
3410 i, tclient->stats_counter, bp->stats_counter);
3411 return -2;
3412 }
3413 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3414 bp->stats_counter) {
3415 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
cdaa7cb8 3416 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
3417 i, uclient->stats_counter, bp->stats_counter);
3418 return -4;
3419 }
a2fbb9ea 3420
de832a55 3421 qstats->total_bytes_received_hi =
ca00392c 3422 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 3423 qstats->total_bytes_received_lo =
ca00392c
EG
3424 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3425
3426 ADD_64(qstats->total_bytes_received_hi,
3427 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3428 qstats->total_bytes_received_lo,
3429 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3430
3431 ADD_64(qstats->total_bytes_received_hi,
3432 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3433 qstats->total_bytes_received_lo,
3434 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3435
dea7aab1
VZ
3436 SUB_64(qstats->total_bytes_received_hi,
3437 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
3438 qstats->total_bytes_received_lo,
3439 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
3440
3441 SUB_64(qstats->total_bytes_received_hi,
3442 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
3443 qstats->total_bytes_received_lo,
3444 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
3445
3446 SUB_64(qstats->total_bytes_received_hi,
3447 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
3448 qstats->total_bytes_received_lo,
3449 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
3450
ca00392c
EG
3451 qstats->valid_bytes_received_hi =
3452 qstats->total_bytes_received_hi;
de832a55 3453 qstats->valid_bytes_received_lo =
ca00392c 3454 qstats->total_bytes_received_lo;
bb2a0f7a 3455
de832a55 3456 qstats->error_bytes_received_hi =
bb2a0f7a 3457 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3458 qstats->error_bytes_received_lo =
bb2a0f7a 3459 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3460
de832a55
EG
3461 ADD_64(qstats->total_bytes_received_hi,
3462 qstats->error_bytes_received_hi,
3463 qstats->total_bytes_received_lo,
3464 qstats->error_bytes_received_lo);
3465
3466 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3467 total_unicast_packets_received);
3468 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3469 total_multicast_packets_received);
3470 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3471 total_broadcast_packets_received);
3472 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3473 etherstatsoverrsizepkts);
3474 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3475
3476 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3477 total_unicast_packets_received);
3478 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3479 total_multicast_packets_received);
3480 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3481 total_broadcast_packets_received);
3482 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3483 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3484 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3485
3486 qstats->total_bytes_transmitted_hi =
ca00392c 3487 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 3488 qstats->total_bytes_transmitted_lo =
ca00392c
EG
3489 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3490
3491 ADD_64(qstats->total_bytes_transmitted_hi,
3492 le32_to_cpu(xclient->multicast_bytes_sent.hi),
3493 qstats->total_bytes_transmitted_lo,
3494 le32_to_cpu(xclient->multicast_bytes_sent.lo));
3495
3496 ADD_64(qstats->total_bytes_transmitted_hi,
3497 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
3498 qstats->total_bytes_transmitted_lo,
3499 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 3500
de832a55
EG
3501 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3502 total_unicast_packets_transmitted);
3503 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3504 total_multicast_packets_transmitted);
3505 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3506 total_broadcast_packets_transmitted);
3507
3508 old_tclient->checksum_discard = tclient->checksum_discard;
3509 old_tclient->ttl0_discard = tclient->ttl0_discard;
3510
3511 ADD_64(fstats->total_bytes_received_hi,
3512 qstats->total_bytes_received_hi,
3513 fstats->total_bytes_received_lo,
3514 qstats->total_bytes_received_lo);
3515 ADD_64(fstats->total_bytes_transmitted_hi,
3516 qstats->total_bytes_transmitted_hi,
3517 fstats->total_bytes_transmitted_lo,
3518 qstats->total_bytes_transmitted_lo);
3519 ADD_64(fstats->total_unicast_packets_received_hi,
3520 qstats->total_unicast_packets_received_hi,
3521 fstats->total_unicast_packets_received_lo,
3522 qstats->total_unicast_packets_received_lo);
3523 ADD_64(fstats->total_multicast_packets_received_hi,
3524 qstats->total_multicast_packets_received_hi,
3525 fstats->total_multicast_packets_received_lo,
3526 qstats->total_multicast_packets_received_lo);
3527 ADD_64(fstats->total_broadcast_packets_received_hi,
3528 qstats->total_broadcast_packets_received_hi,
3529 fstats->total_broadcast_packets_received_lo,
3530 qstats->total_broadcast_packets_received_lo);
3531 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3532 qstats->total_unicast_packets_transmitted_hi,
3533 fstats->total_unicast_packets_transmitted_lo,
3534 qstats->total_unicast_packets_transmitted_lo);
3535 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3536 qstats->total_multicast_packets_transmitted_hi,
3537 fstats->total_multicast_packets_transmitted_lo,
3538 qstats->total_multicast_packets_transmitted_lo);
3539 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3540 qstats->total_broadcast_packets_transmitted_hi,
3541 fstats->total_broadcast_packets_transmitted_lo,
3542 qstats->total_broadcast_packets_transmitted_lo);
3543 ADD_64(fstats->valid_bytes_received_hi,
3544 qstats->valid_bytes_received_hi,
3545 fstats->valid_bytes_received_lo,
3546 qstats->valid_bytes_received_lo);
3547
3548 ADD_64(estats->error_bytes_received_hi,
3549 qstats->error_bytes_received_hi,
3550 estats->error_bytes_received_lo,
3551 qstats->error_bytes_received_lo);
3552 ADD_64(estats->etherstatsoverrsizepkts_hi,
3553 qstats->etherstatsoverrsizepkts_hi,
3554 estats->etherstatsoverrsizepkts_lo,
3555 qstats->etherstatsoverrsizepkts_lo);
3556 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3557 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3558 }
3559
3560 ADD_64(fstats->total_bytes_received_hi,
3561 estats->rx_stat_ifhcinbadoctets_hi,
3562 fstats->total_bytes_received_lo,
3563 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3564
3565 memcpy(estats, &(fstats->total_bytes_received_hi),
3566 sizeof(struct host_func_stats) - 2*sizeof(u32));
3567
de832a55
EG
3568 ADD_64(estats->etherstatsoverrsizepkts_hi,
3569 estats->rx_stat_dot3statsframestoolong_hi,
3570 estats->etherstatsoverrsizepkts_lo,
3571 estats->rx_stat_dot3statsframestoolong_lo);
3572 ADD_64(estats->error_bytes_received_hi,
3573 estats->rx_stat_ifhcinbadoctets_hi,
3574 estats->error_bytes_received_lo,
3575 estats->rx_stat_ifhcinbadoctets_lo);
3576
3577 if (bp->port.pmf) {
3578 estats->mac_filter_discard =
3579 le32_to_cpu(tport->mac_filter_discard);
3580 estats->xxoverflow_discard =
3581 le32_to_cpu(tport->xxoverflow_discard);
3582 estats->brb_truncate_discard =
bb2a0f7a 3583 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3584 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3585 }
bb2a0f7a
YG
3586
3587 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3588
de832a55
EG
3589 bp->stats_pending = 0;
3590
a2fbb9ea
ET
3591 return 0;
3592}
3593
bb2a0f7a 3594static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3595{
bb2a0f7a 3596 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3597 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3598 int i;
a2fbb9ea
ET
3599
3600 nstats->rx_packets =
3601 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3602 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3603 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3604
3605 nstats->tx_packets =
3606 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3607 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3608 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3609
de832a55 3610 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3611
0e39e645 3612 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3613
de832a55 3614 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 3615 for_each_queue(bp, i)
de832a55
EG
3616 nstats->rx_dropped +=
3617 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3618
a2fbb9ea
ET
3619 nstats->tx_dropped = 0;
3620
3621 nstats->multicast =
de832a55 3622 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3623
bb2a0f7a 3624 nstats->collisions =
de832a55 3625 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3626
3627 nstats->rx_length_errors =
de832a55
EG
3628 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3629 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3630 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3631 bnx2x_hilo(&estats->brb_truncate_hi);
3632 nstats->rx_crc_errors =
3633 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3634 nstats->rx_frame_errors =
3635 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3636 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3637 nstats->rx_missed_errors = estats->xxoverflow_discard;
3638
3639 nstats->rx_errors = nstats->rx_length_errors +
3640 nstats->rx_over_errors +
3641 nstats->rx_crc_errors +
3642 nstats->rx_frame_errors +
0e39e645
ET
3643 nstats->rx_fifo_errors +
3644 nstats->rx_missed_errors;
a2fbb9ea 3645
bb2a0f7a 3646 nstats->tx_aborted_errors =
de832a55
EG
3647 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3648 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3649 nstats->tx_carrier_errors =
3650 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3651 nstats->tx_fifo_errors = 0;
3652 nstats->tx_heartbeat_errors = 0;
3653 nstats->tx_window_errors = 0;
3654
3655 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3656 nstats->tx_carrier_errors +
3657 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3658}
3659
3660static void bnx2x_drv_stats_update(struct bnx2x *bp)
3661{
3662 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3663 int i;
3664
3665 estats->driver_xoff = 0;
3666 estats->rx_err_discard_pkt = 0;
3667 estats->rx_skb_alloc_failed = 0;
3668 estats->hw_csum_err = 0;
54b9ddaa 3669 for_each_queue(bp, i) {
de832a55
EG
3670 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3671
3672 estats->driver_xoff += qstats->driver_xoff;
3673 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3674 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3675 estats->hw_csum_err += qstats->hw_csum_err;
3676 }
a2fbb9ea
ET
3677}
3678
bb2a0f7a 3679static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3680{
bb2a0f7a 3681 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3682
bb2a0f7a
YG
3683 if (*stats_comp != DMAE_COMP_VAL)
3684 return;
3685
3686 if (bp->port.pmf)
de832a55 3687 bnx2x_hw_stats_update(bp);
a2fbb9ea 3688
de832a55
EG
3689 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3690 BNX2X_ERR("storm stats were not updated for 3 times\n");
3691 bnx2x_panic();
3692 return;
a2fbb9ea
ET
3693 }
3694
de832a55
EG
3695 bnx2x_net_stats_update(bp);
3696 bnx2x_drv_stats_update(bp);
3697
7995c64e 3698 if (netif_msg_timer(bp)) {
bb2a0f7a 3699 struct bnx2x_eth_stats *estats = &bp->eth_stats;
34f80b04 3700 int i;
a2fbb9ea 3701
dea7aab1
VZ
3702 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
3703 bp->dev->name,
de832a55 3704 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea
ET
3705
3706 for_each_queue(bp, i) {
dea7aab1
VZ
3707 struct bnx2x_fastpath *fp = &bp->fp[i];
3708 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3709
3710 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
3711 " rx pkt(%lu) rx calls(%lu %lu)\n",
3712 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
3713 fp->rx_comp_cons),
3714 le16_to_cpu(*fp->rx_cons_sb),
3715 bnx2x_hilo(&qstats->
3716 total_unicast_packets_received_hi),
3717 fp->rx_calls, fp->rx_pkt);
3718 }
3719
3720 for_each_queue(bp, i) {
3721 struct bnx2x_fastpath *fp = &bp->fp[i];
3722 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3723 struct netdev_queue *txq =
3724 netdev_get_tx_queue(bp->dev, i);
3725
3726 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
3727 " tx pkt(%lu) tx calls (%lu)"
3728 " %s (Xoff events %u)\n",
3729 fp->name, bnx2x_tx_avail(fp),
3730 le16_to_cpu(*fp->tx_cons_sb),
3731 bnx2x_hilo(&qstats->
3732 total_unicast_packets_transmitted_hi),
3733 fp->tx_pkt,
3734 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
3735 qstats->driver_xoff);
a2fbb9ea
ET
3736 }
3737 }
3738
bb2a0f7a
YG
3739 bnx2x_hw_stats_post(bp);
3740 bnx2x_storm_stats_post(bp);
3741}
a2fbb9ea 3742
bb2a0f7a
YG
3743static void bnx2x_port_stats_stop(struct bnx2x *bp)
3744{
3745 struct dmae_command *dmae;
3746 u32 opcode;
3747 int loader_idx = PMF_DMAE_C(bp);
3748 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3749
bb2a0f7a 3750 bp->executer_idx = 0;
a2fbb9ea 3751
bb2a0f7a
YG
3752 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3753 DMAE_CMD_C_ENABLE |
3754 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3755#ifdef __BIG_ENDIAN
bb2a0f7a 3756 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3757#else
bb2a0f7a 3758 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3759#endif
bb2a0f7a
YG
3760 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3761 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3762
3763 if (bp->port.port_stx) {
3764
3765 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3766 if (bp->func_stx)
3767 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3768 else
3769 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3770 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3771 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3772 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3773 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3774 dmae->len = sizeof(struct host_port_stats) >> 2;
3775 if (bp->func_stx) {
3776 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3777 dmae->comp_addr_hi = 0;
3778 dmae->comp_val = 1;
3779 } else {
3780 dmae->comp_addr_lo =
3781 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3782 dmae->comp_addr_hi =
3783 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3784 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3785
bb2a0f7a
YG
3786 *stats_comp = 0;
3787 }
a2fbb9ea
ET
3788 }
3789
bb2a0f7a
YG
3790 if (bp->func_stx) {
3791
3792 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3793 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3794 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3795 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3796 dmae->dst_addr_lo = bp->func_stx >> 2;
3797 dmae->dst_addr_hi = 0;
3798 dmae->len = sizeof(struct host_func_stats) >> 2;
3799 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3800 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3801 dmae->comp_val = DMAE_COMP_VAL;
3802
3803 *stats_comp = 0;
a2fbb9ea 3804 }
bb2a0f7a
YG
3805}
3806
3807static void bnx2x_stats_stop(struct bnx2x *bp)
3808{
3809 int update = 0;
3810
3811 bnx2x_stats_comp(bp);
3812
3813 if (bp->port.pmf)
3814 update = (bnx2x_hw_stats_update(bp) == 0);
3815
3816 update |= (bnx2x_storm_stats_update(bp) == 0);
3817
3818 if (update) {
3819 bnx2x_net_stats_update(bp);
a2fbb9ea 3820
bb2a0f7a
YG
3821 if (bp->port.pmf)
3822 bnx2x_port_stats_stop(bp);
3823
3824 bnx2x_hw_stats_post(bp);
3825 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3826 }
3827}
3828
bb2a0f7a
YG
3829static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3830{
3831}
3832
3833static const struct {
3834 void (*action)(struct bnx2x *bp);
3835 enum bnx2x_stats_state next_state;
3836} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3837/* state event */
3838{
3839/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3840/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3841/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3842/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3843},
3844{
3845/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3846/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3847/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3848/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3849}
3850};
3851
9f6c9258 3852void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
bb2a0f7a
YG
3853{
3854 enum bnx2x_stats_state state = bp->stats_state;
3855
cdaa7cb8
VZ
3856 if (unlikely(bp->panic))
3857 return;
3858
bb2a0f7a
YG
3859 bnx2x_stats_stm[state][event].action(bp);
3860 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3861
8924665a
EG
3862 /* Make sure the state has been "changed" */
3863 smp_wmb();
3864
7995c64e 3865 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
3866 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3867 state, event, bp->stats_state);
3868}
3869
6fe49bb9
EG
3870static void bnx2x_port_stats_base_init(struct bnx2x *bp)
3871{
3872 struct dmae_command *dmae;
3873 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3874
3875 /* sanity */
3876 if (!bp->port.pmf || !bp->port.port_stx) {
3877 BNX2X_ERR("BUG!\n");
3878 return;
3879 }
3880
3881 bp->executer_idx = 0;
3882
3883 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3884 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3885 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3886 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3887#ifdef __BIG_ENDIAN
3888 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3889#else
3890 DMAE_CMD_ENDIANITY_DW_SWAP |
3891#endif
3892 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3893 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3894 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3895 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3896 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3897 dmae->dst_addr_hi = 0;
3898 dmae->len = sizeof(struct host_port_stats) >> 2;
3899 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3900 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3901 dmae->comp_val = DMAE_COMP_VAL;
3902
3903 *stats_comp = 0;
3904 bnx2x_hw_stats_post(bp);
3905 bnx2x_stats_comp(bp);
3906}
3907
3908static void bnx2x_func_stats_base_init(struct bnx2x *bp)
3909{
3910 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
3911 int port = BP_PORT(bp);
3912 int func;
3913 u32 func_stx;
3914
3915 /* sanity */
3916 if (!bp->port.pmf || !bp->func_stx) {
3917 BNX2X_ERR("BUG!\n");
3918 return;
3919 }
3920
3921 /* save our func_stx */
3922 func_stx = bp->func_stx;
3923
3924 for (vn = VN_0; vn < vn_max; vn++) {
3925 func = 2*vn + port;
3926
3927 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
3928 bnx2x_func_stats_init(bp);
3929 bnx2x_hw_stats_post(bp);
3930 bnx2x_stats_comp(bp);
3931 }
3932
3933 /* restore our func_stx */
3934 bp->func_stx = func_stx;
3935}
3936
3937static void bnx2x_func_stats_base_update(struct bnx2x *bp)
3938{
3939 struct dmae_command *dmae = &bp->stats_dmae;
3940 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3941
3942 /* sanity */
3943 if (!bp->func_stx) {
3944 BNX2X_ERR("BUG!\n");
3945 return;
3946 }
3947
3948 bp->executer_idx = 0;
3949 memset(dmae, 0, sizeof(struct dmae_command));
3950
3951 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3952 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3953 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3954#ifdef __BIG_ENDIAN
3955 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3956#else
3957 DMAE_CMD_ENDIANITY_DW_SWAP |
3958#endif
3959 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3960 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3961 dmae->src_addr_lo = bp->func_stx >> 2;
3962 dmae->src_addr_hi = 0;
3963 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
3964 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
3965 dmae->len = sizeof(struct host_func_stats) >> 2;
3966 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3967 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3968 dmae->comp_val = DMAE_COMP_VAL;
3969
3970 *stats_comp = 0;
3971 bnx2x_hw_stats_post(bp);
3972 bnx2x_stats_comp(bp);
3973}
3974
3975static void bnx2x_stats_init(struct bnx2x *bp)
3976{
3977 int port = BP_PORT(bp);
3978 int func = BP_FUNC(bp);
3979 int i;
3980
3981 bp->stats_pending = 0;
3982 bp->executer_idx = 0;
3983 bp->stats_counter = 0;
3984
3985 /* port and func stats for management */
3986 if (!BP_NOMCP(bp)) {
3987 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3988 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
3989
3990 } else {
3991 bp->port.port_stx = 0;
3992 bp->func_stx = 0;
3993 }
3994 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
3995 bp->port.port_stx, bp->func_stx);
3996
3997 /* port stats */
3998 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3999 bp->port.old_nig_stats.brb_discard =
4000 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4001 bp->port.old_nig_stats.brb_truncate =
4002 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4003 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4004 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4005 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4006 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4007
4008 /* function stats */
4009 for_each_queue(bp, i) {
4010 struct bnx2x_fastpath *fp = &bp->fp[i];
4011
4012 memset(&fp->old_tclient, 0,
4013 sizeof(struct tstorm_per_client_stats));
4014 memset(&fp->old_uclient, 0,
4015 sizeof(struct ustorm_per_client_stats));
4016 memset(&fp->old_xclient, 0,
4017 sizeof(struct xstorm_per_client_stats));
4018 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4019 }
4020
4021 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4022 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4023
4024 bp->stats_state = STATS_STATE_DISABLED;
4025
4026 if (bp->port.pmf) {
4027 if (bp->port.port_stx)
4028 bnx2x_port_stats_base_init(bp);
4029
4030 if (bp->func_stx)
4031 bnx2x_func_stats_base_init(bp);
4032
4033 } else if (bp->func_stx)
4034 bnx2x_func_stats_base_update(bp);
4035}
4036
a2fbb9ea
ET
4037static void bnx2x_timer(unsigned long data)
4038{
4039 struct bnx2x *bp = (struct bnx2x *) data;
4040
4041 if (!netif_running(bp->dev))
4042 return;
4043
4044 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4045 goto timer_restart;
a2fbb9ea
ET
4046
4047 if (poll) {
4048 struct bnx2x_fastpath *fp = &bp->fp[0];
4049 int rc;
4050
7961f791 4051 bnx2x_tx_int(fp);
a2fbb9ea
ET
4052 rc = bnx2x_rx_int(fp, 1000);
4053 }
4054
34f80b04
EG
4055 if (!BP_NOMCP(bp)) {
4056 int func = BP_FUNC(bp);
a2fbb9ea
ET
4057 u32 drv_pulse;
4058 u32 mcp_pulse;
4059
4060 ++bp->fw_drv_pulse_wr_seq;
4061 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4062 /* TBD - add SYSTEM_TIME */
4063 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4064 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4065
34f80b04 4066 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4067 MCP_PULSE_SEQ_MASK);
4068 /* The delta between driver pulse and mcp response
4069 * should be 1 (before mcp response) or 0 (after mcp response)
4070 */
4071 if ((drv_pulse != mcp_pulse) &&
4072 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4073 /* someone lost a heartbeat... */
4074 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4075 drv_pulse, mcp_pulse);
4076 }
4077 }
4078
f34d28ea 4079 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 4080 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4081
f1410647 4082timer_restart:
a2fbb9ea
ET
4083 mod_timer(&bp->timer, jiffies + bp->current_interval);
4084}
4085
4086/* end of Statistics */
4087
4088/* nic init */
4089
4090/*
4091 * nic init service functions
4092 */
4093
34f80b04 4094static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4095{
34f80b04
EG
4096 int port = BP_PORT(bp);
4097
ca00392c
EG
4098 /* "CSTORM" */
4099 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4100 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4101 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4102 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4103 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4104 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4105}
4106
9f6c9258 4107void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5c862848 4108 dma_addr_t mapping, int sb_id)
34f80b04
EG
4109{
4110 int port = BP_PORT(bp);
bb2a0f7a 4111 int func = BP_FUNC(bp);
a2fbb9ea 4112 int index;
34f80b04 4113 u64 section;
a2fbb9ea
ET
4114
4115 /* USTORM */
4116 section = ((u64)mapping) + offsetof(struct host_status_block,
4117 u_status_block);
34f80b04 4118 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4119
ca00392c
EG
4120 REG_WR(bp, BAR_CSTRORM_INTMEM +
4121 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4122 REG_WR(bp, BAR_CSTRORM_INTMEM +
4123 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4124 U64_HI(section));
ca00392c
EG
4125 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4126 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4127
4128 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4129 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4130 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4131
4132 /* CSTORM */
4133 section = ((u64)mapping) + offsetof(struct host_status_block,
4134 c_status_block);
34f80b04 4135 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4136
4137 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4138 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4139 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4140 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4141 U64_HI(section));
7a9b2557 4142 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4143 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4144
4145 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4146 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4147 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4148
4149 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4150}
4151
4152static void bnx2x_zero_def_sb(struct bnx2x *bp)
4153{
4154 int func = BP_FUNC(bp);
a2fbb9ea 4155
ca00392c 4156 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4157 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4158 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4159 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4160 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4161 sizeof(struct cstorm_def_status_block_u)/4);
4162 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4163 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4164 sizeof(struct cstorm_def_status_block_c)/4);
4165 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4166 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4167 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4168}
4169
4170static void bnx2x_init_def_sb(struct bnx2x *bp,
4171 struct host_def_status_block *def_sb,
34f80b04 4172 dma_addr_t mapping, int sb_id)
a2fbb9ea 4173{
34f80b04
EG
4174 int port = BP_PORT(bp);
4175 int func = BP_FUNC(bp);
a2fbb9ea
ET
4176 int index, val, reg_offset;
4177 u64 section;
4178
4179 /* ATTN */
4180 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4181 atten_status_block);
34f80b04 4182 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4183
49d66772
ET
4184 bp->attn_state = 0;
4185
a2fbb9ea
ET
4186 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4187 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4188
34f80b04 4189 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4190 bp->attn_group[index].sig[0] = REG_RD(bp,
4191 reg_offset + 0x10*index);
4192 bp->attn_group[index].sig[1] = REG_RD(bp,
4193 reg_offset + 0x4 + 0x10*index);
4194 bp->attn_group[index].sig[2] = REG_RD(bp,
4195 reg_offset + 0x8 + 0x10*index);
4196 bp->attn_group[index].sig[3] = REG_RD(bp,
4197 reg_offset + 0xc + 0x10*index);
4198 }
4199
a2fbb9ea
ET
4200 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4201 HC_REG_ATTN_MSG0_ADDR_L);
4202
4203 REG_WR(bp, reg_offset, U64_LO(section));
4204 REG_WR(bp, reg_offset + 4, U64_HI(section));
4205
4206 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4207
4208 val = REG_RD(bp, reg_offset);
34f80b04 4209 val |= sb_id;
a2fbb9ea
ET
4210 REG_WR(bp, reg_offset, val);
4211
4212 /* USTORM */
4213 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4214 u_def_status_block);
34f80b04 4215 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4216
ca00392c
EG
4217 REG_WR(bp, BAR_CSTRORM_INTMEM +
4218 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4219 REG_WR(bp, BAR_CSTRORM_INTMEM +
4220 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4221 U64_HI(section));
ca00392c
EG
4222 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4223 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4224
4225 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4226 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4227 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4228
4229 /* CSTORM */
4230 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4231 c_def_status_block);
34f80b04 4232 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4233
4234 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4235 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4236 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4237 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4238 U64_HI(section));
5c862848 4239 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4240 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4241
4242 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4243 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4244 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4245
4246 /* TSTORM */
4247 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4248 t_def_status_block);
34f80b04 4249 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4250
4251 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4252 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4253 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4254 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4255 U64_HI(section));
5c862848 4256 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4257 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4258
4259 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4260 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4261 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4262
4263 /* XSTORM */
4264 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4265 x_def_status_block);
34f80b04 4266 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4267
4268 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4269 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4270 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4271 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4272 U64_HI(section));
5c862848 4273 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4274 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4275
4276 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4277 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4278 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4279
bb2a0f7a 4280 bp->stats_pending = 0;
66e855f3 4281 bp->set_mac_pending = 0;
bb2a0f7a 4282
34f80b04 4283 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4284}
4285
9f6c9258 4286void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 4287{
34f80b04 4288 int port = BP_PORT(bp);
a2fbb9ea
ET
4289 int i;
4290
4291 for_each_queue(bp, i) {
34f80b04 4292 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4293
4294 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4295 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4296 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4297 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 4298 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
4299 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4300 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4301 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 4302 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
4303
4304 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4305 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4306 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4307 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 4308 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 4309 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4310 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4311 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 4312 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
4313 }
4314}
4315
a2fbb9ea
ET
4316static void bnx2x_init_sp_ring(struct bnx2x *bp)
4317{
34f80b04 4318 int func = BP_FUNC(bp);
a2fbb9ea
ET
4319
4320 spin_lock_init(&bp->spq_lock);
4321
4322 bp->spq_left = MAX_SPQ_PENDING;
4323 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4324 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4325 bp->spq_prod_bd = bp->spq;
4326 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4327
34f80b04 4328 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4329 U64_LO(bp->spq_mapping));
34f80b04
EG
4330 REG_WR(bp,
4331 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4332 U64_HI(bp->spq_mapping));
4333
34f80b04 4334 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4335 bp->spq_prod_idx);
4336}
4337
4338static void bnx2x_init_context(struct bnx2x *bp)
4339{
4340 int i;
4341
54b9ddaa
VZ
4342 /* Rx */
4343 for_each_queue(bp, i) {
a2fbb9ea
ET
4344 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4345 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4346 u8 cl_id = fp->cl_id;
a2fbb9ea 4347
34f80b04
EG
4348 context->ustorm_st_context.common.sb_index_numbers =
4349 BNX2X_RX_SB_INDEX_NUM;
0626b899 4350 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 4351 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 4352 context->ustorm_st_context.common.flags =
de832a55
EG
4353 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4354 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4355 context->ustorm_st_context.common.statistics_counter_id =
4356 cl_id;
8d9c5f34 4357 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4358 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4359 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4360 bp->rx_buf_size;
34f80b04 4361 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4362 U64_HI(fp->rx_desc_mapping);
34f80b04 4363 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4364 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4365 if (!fp->disable_tpa) {
4366 context->ustorm_st_context.common.flags |=
ca00392c 4367 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 4368 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
4369 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
4370 0xffff);
7a9b2557
VZ
4371 context->ustorm_st_context.common.sge_page_base_hi =
4372 U64_HI(fp->rx_sge_mapping);
4373 context->ustorm_st_context.common.sge_page_base_lo =
4374 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
4375
4376 context->ustorm_st_context.common.max_sges_for_packet =
4377 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
4378 context->ustorm_st_context.common.max_sges_for_packet =
4379 ((context->ustorm_st_context.common.
4380 max_sges_for_packet + PAGES_PER_SGE - 1) &
4381 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
4382 }
4383
8d9c5f34
EG
4384 context->ustorm_ag_context.cdu_usage =
4385 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4386 CDU_REGION_NUMBER_UCM_AG,
4387 ETH_CONNECTION_TYPE);
4388
ca00392c
EG
4389 context->xstorm_ag_context.cdu_reserved =
4390 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4391 CDU_REGION_NUMBER_XCM_AG,
4392 ETH_CONNECTION_TYPE);
4393 }
4394
54b9ddaa
VZ
4395 /* Tx */
4396 for_each_queue(bp, i) {
ca00392c
EG
4397 struct bnx2x_fastpath *fp = &bp->fp[i];
4398 struct eth_context *context =
54b9ddaa 4399 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
4400
4401 context->cstorm_st_context.sb_index_number =
4402 C_SB_ETH_TX_CQ_INDEX;
4403 context->cstorm_st_context.status_block_id = fp->sb_id;
4404
8d9c5f34
EG
4405 context->xstorm_st_context.tx_bd_page_base_hi =
4406 U64_HI(fp->tx_desc_mapping);
4407 context->xstorm_st_context.tx_bd_page_base_lo =
4408 U64_LO(fp->tx_desc_mapping);
ca00392c 4409 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 4410 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
4411 }
4412}
4413
4414static void bnx2x_init_ind_table(struct bnx2x *bp)
4415{
26c8fa4d 4416 int func = BP_FUNC(bp);
a2fbb9ea
ET
4417 int i;
4418
555f6c78 4419 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4420 return;
4421
555f6c78
EG
4422 DP(NETIF_MSG_IFUP,
4423 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4424 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4425 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4426 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 4427 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
4428}
4429
9f6c9258 4430void bnx2x_set_client_config(struct bnx2x *bp)
49d66772 4431{
49d66772 4432 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4433 int port = BP_PORT(bp);
4434 int i;
49d66772 4435
e7799c5f 4436 tstorm_client.mtu = bp->dev->mtu;
49d66772 4437 tstorm_client.config_flags =
de832a55
EG
4438 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4439 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4440#ifdef BCM_VLAN
0c6671b0 4441 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4442 tstorm_client.config_flags |=
8d9c5f34 4443 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4444 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4445 }
4446#endif
49d66772
ET
4447
4448 for_each_queue(bp, i) {
de832a55
EG
4449 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4450
49d66772 4451 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4452 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4453 ((u32 *)&tstorm_client)[0]);
4454 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4455 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4456 ((u32 *)&tstorm_client)[1]);
4457 }
4458
34f80b04
EG
4459 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4460 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4461}
4462
9f6c9258 4463void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 4464{
a2fbb9ea 4465 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 4466 int mode = bp->rx_mode;
37b091ba 4467 int mask = bp->rx_mode_cl_mask;
34f80b04 4468 int func = BP_FUNC(bp);
581ce43d 4469 int port = BP_PORT(bp);
a2fbb9ea 4470 int i;
581ce43d
EG
4471 /* All but management unicast packets should pass to the host as well */
4472 u32 llh_mask =
4473 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4474 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4475 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4476 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4477
3196a88a 4478 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4479
4480 switch (mode) {
4481 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4482 tstorm_mac_filter.ucast_drop_all = mask;
4483 tstorm_mac_filter.mcast_drop_all = mask;
4484 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4485 break;
356e2385 4486
a2fbb9ea 4487 case BNX2X_RX_MODE_NORMAL:
34f80b04 4488 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4489 break;
356e2385 4490
a2fbb9ea 4491 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4492 tstorm_mac_filter.mcast_accept_all = mask;
4493 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4494 break;
356e2385 4495
a2fbb9ea 4496 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4497 tstorm_mac_filter.ucast_accept_all = mask;
4498 tstorm_mac_filter.mcast_accept_all = mask;
4499 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
4500 /* pass management unicast packets as well */
4501 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4502 break;
356e2385 4503
a2fbb9ea 4504 default:
34f80b04
EG
4505 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4506 break;
a2fbb9ea
ET
4507 }
4508
581ce43d
EG
4509 REG_WR(bp,
4510 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
4511 llh_mask);
4512
a2fbb9ea
ET
4513 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4514 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4515 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4516 ((u32 *)&tstorm_mac_filter)[i]);
4517
34f80b04 4518/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4519 ((u32 *)&tstorm_mac_filter)[i]); */
4520 }
a2fbb9ea 4521
49d66772
ET
4522 if (mode != BNX2X_RX_MODE_NONE)
4523 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4524}
4525
471de716
EG
4526static void bnx2x_init_internal_common(struct bnx2x *bp)
4527{
4528 int i;
4529
4530 /* Zero this manually as its initialization is
4531 currently missing in the initTool */
4532 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4533 REG_WR(bp, BAR_USTRORM_INTMEM +
4534 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4535}
4536
4537static void bnx2x_init_internal_port(struct bnx2x *bp)
4538{
4539 int port = BP_PORT(bp);
4540
ca00392c
EG
4541 REG_WR(bp,
4542 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
4543 REG_WR(bp,
4544 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
4545 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4546 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4547}
4548
4549static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4550{
a2fbb9ea
ET
4551 struct tstorm_eth_function_common_config tstorm_config = {0};
4552 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4553 int port = BP_PORT(bp);
4554 int func = BP_FUNC(bp);
de832a55
EG
4555 int i, j;
4556 u32 offset;
471de716 4557 u16 max_agg_size;
a2fbb9ea 4558
c68ed255
TH
4559 tstorm_config.config_flags = RSS_FLAGS(bp);
4560
4561 if (is_multi(bp))
a2fbb9ea 4562 tstorm_config.rss_result_mask = MULTI_MASK;
ca00392c
EG
4563
4564 /* Enable TPA if needed */
4565 if (bp->flags & TPA_ENABLE_FLAG)
4566 tstorm_config.config_flags |=
4567 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
4568
8d9c5f34
EG
4569 if (IS_E1HMF(bp))
4570 tstorm_config.config_flags |=
4571 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4572
34f80b04
EG
4573 tstorm_config.leading_client_id = BP_L_ID(bp);
4574
a2fbb9ea 4575 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4576 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4577 (*(u32 *)&tstorm_config));
4578
c14423fe 4579 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 4580 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
4581 bnx2x_set_storm_rx_mode(bp);
4582
de832a55
EG
4583 for_each_queue(bp, i) {
4584 u8 cl_id = bp->fp[i].cl_id;
4585
4586 /* reset xstorm per client statistics */
4587 offset = BAR_XSTRORM_INTMEM +
4588 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4589 for (j = 0;
4590 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4591 REG_WR(bp, offset + j*4, 0);
4592
4593 /* reset tstorm per client statistics */
4594 offset = BAR_TSTRORM_INTMEM +
4595 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4596 for (j = 0;
4597 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4598 REG_WR(bp, offset + j*4, 0);
4599
4600 /* reset ustorm per client statistics */
4601 offset = BAR_USTRORM_INTMEM +
4602 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4603 for (j = 0;
4604 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4605 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4606 }
4607
4608 /* Init statistics related context */
34f80b04 4609 stats_flags.collect_eth = 1;
a2fbb9ea 4610
66e855f3 4611 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4612 ((u32 *)&stats_flags)[0]);
66e855f3 4613 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4614 ((u32 *)&stats_flags)[1]);
4615
66e855f3 4616 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4617 ((u32 *)&stats_flags)[0]);
66e855f3 4618 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4619 ((u32 *)&stats_flags)[1]);
4620
de832a55
EG
4621 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4622 ((u32 *)&stats_flags)[0]);
4623 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4624 ((u32 *)&stats_flags)[1]);
4625
66e855f3 4626 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4627 ((u32 *)&stats_flags)[0]);
66e855f3 4628 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4629 ((u32 *)&stats_flags)[1]);
4630
66e855f3
YG
4631 REG_WR(bp, BAR_XSTRORM_INTMEM +
4632 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4633 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4634 REG_WR(bp, BAR_XSTRORM_INTMEM +
4635 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4636 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4637
4638 REG_WR(bp, BAR_TSTRORM_INTMEM +
4639 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4640 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4641 REG_WR(bp, BAR_TSTRORM_INTMEM +
4642 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4643 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4644
de832a55
EG
4645 REG_WR(bp, BAR_USTRORM_INTMEM +
4646 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4647 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4648 REG_WR(bp, BAR_USTRORM_INTMEM +
4649 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4650 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4651
34f80b04
EG
4652 if (CHIP_IS_E1H(bp)) {
4653 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4654 IS_E1HMF(bp));
4655 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4656 IS_E1HMF(bp));
4657 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4658 IS_E1HMF(bp));
4659 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4660 IS_E1HMF(bp));
4661
7a9b2557
VZ
4662 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4663 bp->e1hov);
34f80b04
EG
4664 }
4665
4f40f2cb 4666 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
4667 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
4668 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 4669 for_each_queue(bp, i) {
7a9b2557 4670 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4671
4672 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 4673 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
4674 U64_LO(fp->rx_comp_mapping));
4675 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 4676 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
4677 U64_HI(fp->rx_comp_mapping));
4678
ca00392c
EG
4679 /* Next page */
4680 REG_WR(bp, BAR_USTRORM_INTMEM +
4681 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
4682 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
4683 REG_WR(bp, BAR_USTRORM_INTMEM +
4684 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
4685 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
4686
7a9b2557 4687 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 4688 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
4689 max_agg_size);
4690 }
8a1c38d1 4691
1c06328c
EG
4692 /* dropless flow control */
4693 if (CHIP_IS_E1H(bp)) {
4694 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
4695
4696 rx_pause.bd_thr_low = 250;
4697 rx_pause.cqe_thr_low = 250;
4698 rx_pause.cos = 1;
4699 rx_pause.sge_thr_low = 0;
4700 rx_pause.bd_thr_high = 350;
4701 rx_pause.cqe_thr_high = 350;
4702 rx_pause.sge_thr_high = 0;
4703
54b9ddaa 4704 for_each_queue(bp, i) {
1c06328c
EG
4705 struct bnx2x_fastpath *fp = &bp->fp[i];
4706
4707 if (!fp->disable_tpa) {
4708 rx_pause.sge_thr_low = 150;
4709 rx_pause.sge_thr_high = 250;
4710 }
4711
4712
4713 offset = BAR_USTRORM_INTMEM +
4714 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
4715 fp->cl_id);
4716 for (j = 0;
4717 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
4718 j++)
4719 REG_WR(bp, offset + j*4,
4720 ((u32 *)&rx_pause)[j]);
4721 }
4722 }
4723
8a1c38d1
EG
4724 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
4725
4726 /* Init rate shaping and fairness contexts */
4727 if (IS_E1HMF(bp)) {
4728 int vn;
4729
4730 /* During init there is no active link
4731 Until link is up, set link rate to 10Gbps */
4732 bp->link_vars.line_speed = SPEED_10000;
4733 bnx2x_init_port_minmax(bp);
4734
b015e3d1
EG
4735 if (!BP_NOMCP(bp))
4736 bp->mf_config =
4737 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
4738 bnx2x_calc_vn_weight_sum(bp);
4739
4740 for (vn = VN_0; vn < E1HVN_MAX; vn++)
4741 bnx2x_init_vn_minmax(bp, 2*vn + port);
4742
4743 /* Enable rate shaping and fairness */
b015e3d1 4744 bp->cmng.flags.cmng_enables |=
8a1c38d1 4745 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 4746
8a1c38d1
EG
4747 } else {
4748 /* rate shaping and fairness are disabled */
4749 DP(NETIF_MSG_IFUP,
4750 "single function mode minmax will be disabled\n");
4751 }
4752
4753
cdaa7cb8 4754 /* Store cmng structures to internal memory */
8a1c38d1
EG
4755 if (bp->port.pmf)
4756 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
4757 REG_WR(bp, BAR_XSTRORM_INTMEM +
4758 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
4759 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
4760}
4761
471de716
EG
4762static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4763{
4764 switch (load_code) {
4765 case FW_MSG_CODE_DRV_LOAD_COMMON:
4766 bnx2x_init_internal_common(bp);
4767 /* no break */
4768
4769 case FW_MSG_CODE_DRV_LOAD_PORT:
4770 bnx2x_init_internal_port(bp);
4771 /* no break */
4772
4773 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4774 bnx2x_init_internal_func(bp);
4775 break;
4776
4777 default:
4778 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4779 break;
4780 }
4781}
4782
9f6c9258 4783void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4784{
4785 int i;
4786
4787 for_each_queue(bp, i) {
4788 struct bnx2x_fastpath *fp = &bp->fp[i];
4789
34f80b04 4790 fp->bp = bp;
a2fbb9ea 4791 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4792 fp->index = i;
34f80b04 4793 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
4794#ifdef BCM_CNIC
4795 fp->sb_id = fp->cl_id + 1;
4796#else
34f80b04 4797 fp->sb_id = fp->cl_id;
37b091ba 4798#endif
34f80b04 4799 DP(NETIF_MSG_IFUP,
f5372251
EG
4800 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
4801 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 4802 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 4803 fp->sb_id);
5c862848 4804 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4805 }
4806
16119785
EG
4807 /* ensure status block indices were read */
4808 rmb();
4809
4810
5c862848
EG
4811 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4812 DEF_SB_ID);
4813 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4814 bnx2x_update_coalesce(bp);
4815 bnx2x_init_rx_rings(bp);
4816 bnx2x_init_tx_ring(bp);
4817 bnx2x_init_sp_ring(bp);
4818 bnx2x_init_context(bp);
471de716 4819 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4820 bnx2x_init_ind_table(bp);
0ef00459
EG
4821 bnx2x_stats_init(bp);
4822
4823 /* At this point, we are ready for interrupts */
4824 atomic_set(&bp->intr_sem, 0);
4825
4826 /* flush all before enabling interrupts */
4827 mb();
4828 mmiowb();
4829
615f8fd9 4830 bnx2x_int_enable(bp);
eb8da205
EG
4831
4832 /* Check for SPIO5 */
4833 bnx2x_attn_int_deasserted0(bp,
4834 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4835 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4836}
4837
4838/* end of nic init */
4839
4840/*
4841 * gzip service functions
4842 */
4843
4844static int bnx2x_gunzip_init(struct bnx2x *bp)
4845{
1a983142
FT
4846 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4847 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4848 if (bp->gunzip_buf == NULL)
4849 goto gunzip_nomem1;
4850
4851 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4852 if (bp->strm == NULL)
4853 goto gunzip_nomem2;
4854
4855 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4856 GFP_KERNEL);
4857 if (bp->strm->workspace == NULL)
4858 goto gunzip_nomem3;
4859
4860 return 0;
4861
4862gunzip_nomem3:
4863 kfree(bp->strm);
4864 bp->strm = NULL;
4865
4866gunzip_nomem2:
1a983142
FT
4867 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4868 bp->gunzip_mapping);
a2fbb9ea
ET
4869 bp->gunzip_buf = NULL;
4870
4871gunzip_nomem1:
cdaa7cb8
VZ
4872 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4873 " un-compression\n");
a2fbb9ea
ET
4874 return -ENOMEM;
4875}
4876
4877static void bnx2x_gunzip_end(struct bnx2x *bp)
4878{
4879 kfree(bp->strm->workspace);
4880
4881 kfree(bp->strm);
4882 bp->strm = NULL;
4883
4884 if (bp->gunzip_buf) {
1a983142
FT
4885 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4886 bp->gunzip_mapping);
a2fbb9ea
ET
4887 bp->gunzip_buf = NULL;
4888 }
4889}
4890
94a78b79 4891static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4892{
4893 int n, rc;
4894
4895 /* check gzip header */
94a78b79
VZ
4896 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4897 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4898 return -EINVAL;
94a78b79 4899 }
a2fbb9ea
ET
4900
4901 n = 10;
4902
34f80b04 4903#define FNAME 0x8
a2fbb9ea
ET
4904
4905 if (zbuf[3] & FNAME)
4906 while ((zbuf[n++] != 0) && (n < len));
4907
94a78b79 4908 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4909 bp->strm->avail_in = len - n;
4910 bp->strm->next_out = bp->gunzip_buf;
4911 bp->strm->avail_out = FW_BUF_SIZE;
4912
4913 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4914 if (rc != Z_OK)
4915 return rc;
4916
4917 rc = zlib_inflate(bp->strm, Z_FINISH);
4918 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4919 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4920 bp->strm->msg);
a2fbb9ea
ET
4921
4922 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4923 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4924 netdev_err(bp->dev, "Firmware decompression error:"
4925 " gunzip_outlen (%d) not aligned\n",
4926 bp->gunzip_outlen);
a2fbb9ea
ET
4927 bp->gunzip_outlen >>= 2;
4928
4929 zlib_inflateEnd(bp->strm);
4930
4931 if (rc == Z_STREAM_END)
4932 return 0;
4933
4934 return rc;
4935}
4936
4937/* nic load/unload */
4938
4939/*
34f80b04 4940 * General service functions
a2fbb9ea
ET
4941 */
4942
4943/* send a NIG loopback debug packet */
4944static void bnx2x_lb_pckt(struct bnx2x *bp)
4945{
a2fbb9ea 4946 u32 wb_write[3];
a2fbb9ea
ET
4947
4948 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4949 wb_write[0] = 0x55555555;
4950 wb_write[1] = 0x55555555;
34f80b04 4951 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4952 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4953
4954 /* NON-IP protocol */
a2fbb9ea
ET
4955 wb_write[0] = 0x09000000;
4956 wb_write[1] = 0x55555555;
34f80b04 4957 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4958 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4959}
4960
4961/* some of the internal memories
4962 * are not directly readable from the driver
4963 * to test them we send debug packets
4964 */
4965static int bnx2x_int_mem_test(struct bnx2x *bp)
4966{
4967 int factor;
4968 int count, i;
4969 u32 val = 0;
4970
ad8d3948 4971 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4972 factor = 120;
ad8d3948
EG
4973 else if (CHIP_REV_IS_EMUL(bp))
4974 factor = 200;
4975 else
a2fbb9ea 4976 factor = 1;
a2fbb9ea
ET
4977
4978 DP(NETIF_MSG_HW, "start part1\n");
4979
4980 /* Disable inputs of parser neighbor blocks */
4981 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4982 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4983 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4984 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4985
4986 /* Write 0 to parser credits for CFC search request */
4987 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4988
4989 /* send Ethernet packet */
4990 bnx2x_lb_pckt(bp);
4991
4992 /* TODO do i reset NIG statistic? */
4993 /* Wait until NIG register shows 1 packet of size 0x10 */
4994 count = 1000 * factor;
4995 while (count) {
34f80b04 4996
a2fbb9ea
ET
4997 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4998 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4999 if (val == 0x10)
5000 break;
5001
5002 msleep(10);
5003 count--;
5004 }
5005 if (val != 0x10) {
5006 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5007 return -1;
5008 }
5009
5010 /* Wait until PRS register shows 1 packet */
5011 count = 1000 * factor;
5012 while (count) {
5013 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5014 if (val == 1)
5015 break;
5016
5017 msleep(10);
5018 count--;
5019 }
5020 if (val != 0x1) {
5021 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5022 return -2;
5023 }
5024
5025 /* Reset and init BRB, PRS */
34f80b04 5026 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5027 msleep(50);
34f80b04 5028 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5029 msleep(50);
94a78b79
VZ
5030 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5031 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5032
5033 DP(NETIF_MSG_HW, "part2\n");
5034
5035 /* Disable inputs of parser neighbor blocks */
5036 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5037 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5038 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5039 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5040
5041 /* Write 0 to parser credits for CFC search request */
5042 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5043
5044 /* send 10 Ethernet packets */
5045 for (i = 0; i < 10; i++)
5046 bnx2x_lb_pckt(bp);
5047
5048 /* Wait until NIG register shows 10 + 1
5049 packets of size 11*0x10 = 0xb0 */
5050 count = 1000 * factor;
5051 while (count) {
34f80b04 5052
a2fbb9ea
ET
5053 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5054 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5055 if (val == 0xb0)
5056 break;
5057
5058 msleep(10);
5059 count--;
5060 }
5061 if (val != 0xb0) {
5062 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5063 return -3;
5064 }
5065
5066 /* Wait until PRS register shows 2 packets */
5067 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5068 if (val != 2)
5069 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5070
5071 /* Write 1 to parser credits for CFC search request */
5072 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5073
5074 /* Wait until PRS register shows 3 packets */
5075 msleep(10 * factor);
5076 /* Wait until NIG register shows 1 packet of size 0x10 */
5077 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5078 if (val != 3)
5079 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5080
5081 /* clear NIG EOP FIFO */
5082 for (i = 0; i < 11; i++)
5083 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5084 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5085 if (val != 1) {
5086 BNX2X_ERR("clear of NIG failed\n");
5087 return -4;
5088 }
5089
5090 /* Reset and init BRB, PRS, NIG */
5091 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5092 msleep(50);
5093 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5094 msleep(50);
94a78b79
VZ
5095 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5096 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 5097#ifndef BCM_CNIC
a2fbb9ea
ET
5098 /* set NIC mode */
5099 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5100#endif
5101
5102 /* Enable inputs of parser neighbor blocks */
5103 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5104 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5105 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5106 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5107
5108 DP(NETIF_MSG_HW, "done\n");
5109
5110 return 0; /* OK */
5111}
5112
5113static void enable_blocks_attention(struct bnx2x *bp)
5114{
5115 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5116 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5117 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5118 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5119 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5120 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5121 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5122 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5123 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5124/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5125/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5126 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5127 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5128 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5129/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5130/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5131 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5132 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5133 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5134 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5135/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5136/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5137 if (CHIP_REV_IS_FPGA(bp))
5138 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5139 else
5140 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5141 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5142 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5143 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5144/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5145/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5146 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5147 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5148/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5149 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5150}
5151
72fd0718
VZ
5152static const struct {
5153 u32 addr;
5154 u32 mask;
5155} bnx2x_parity_mask[] = {
5156 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
5157 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
5158 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
5159 {HC_REG_HC_PRTY_MASK, 0xffffffff},
5160 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
5161 {QM_REG_QM_PRTY_MASK, 0x0},
5162 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
5163 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
5164 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
5165 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
5166 {CDU_REG_CDU_PRTY_MASK, 0x0},
5167 {CFC_REG_CFC_PRTY_MASK, 0x0},
5168 {DBG_REG_DBG_PRTY_MASK, 0x0},
5169 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
5170 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
5171 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
5172 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
5173 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
5174 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
5175 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
5176 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
5177 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
5178 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
5179 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
5180 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
5181 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
5182 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
5183 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
5184};
5185
5186static void enable_blocks_parity(struct bnx2x *bp)
5187{
5188 int i, mask_arr_len =
5189 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
5190
5191 for (i = 0; i < mask_arr_len; i++)
5192 REG_WR(bp, bnx2x_parity_mask[i].addr,
5193 bnx2x_parity_mask[i].mask);
5194}
5195
34f80b04 5196
81f75bbf
EG
5197static void bnx2x_reset_common(struct bnx2x *bp)
5198{
5199 /* reset_common */
5200 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5201 0xd3ffff7f);
5202 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5203}
5204
573f2035
EG
5205static void bnx2x_init_pxp(struct bnx2x *bp)
5206{
5207 u16 devctl;
5208 int r_order, w_order;
5209
5210 pci_read_config_word(bp->pdev,
5211 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5212 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5213 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5214 if (bp->mrrs == -1)
5215 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5216 else {
5217 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5218 r_order = bp->mrrs;
5219 }
5220
5221 bnx2x_init_pxp_arb(bp, r_order, w_order);
5222}
fd4ef40d
EG
5223
5224static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5225{
2145a920 5226 int is_required;
fd4ef40d 5227 u32 val;
2145a920 5228 int port;
fd4ef40d 5229
2145a920
VZ
5230 if (BP_NOMCP(bp))
5231 return;
5232
5233 is_required = 0;
fd4ef40d
EG
5234 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5235 SHARED_HW_CFG_FAN_FAILURE_MASK;
5236
5237 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5238 is_required = 1;
5239
5240 /*
5241 * The fan failure mechanism is usually related to the PHY type since
5242 * the power consumption of the board is affected by the PHY. Currently,
5243 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5244 */
5245 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5246 for (port = PORT_0; port < PORT_MAX; port++) {
5247 u32 phy_type =
5248 SHMEM_RD(bp, dev_info.port_hw_config[port].
5249 external_phy_config) &
5250 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5251 is_required |=
5252 ((phy_type ==
5253 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5254 (phy_type ==
5255 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5256 (phy_type ==
5257 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5258 }
5259
5260 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5261
5262 if (is_required == 0)
5263 return;
5264
5265 /* Fan failure is indicated by SPIO 5 */
5266 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5267 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5268
5269 /* set to active low mode */
5270 val = REG_RD(bp, MISC_REG_SPIO_INT);
5271 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 5272 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
5273 REG_WR(bp, MISC_REG_SPIO_INT, val);
5274
5275 /* enable interrupt to signal the IGU */
5276 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5277 val |= (1 << MISC_REGISTERS_SPIO_5);
5278 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5279}
5280
34f80b04 5281static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5282{
a2fbb9ea 5283 u32 val, i;
37b091ba
MC
5284#ifdef BCM_CNIC
5285 u32 wb_write[2];
5286#endif
a2fbb9ea 5287
34f80b04 5288 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5289
81f75bbf 5290 bnx2x_reset_common(bp);
34f80b04
EG
5291 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5292 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5293
94a78b79 5294 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5295 if (CHIP_IS_E1H(bp))
5296 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5297
34f80b04
EG
5298 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5299 msleep(30);
5300 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5301
94a78b79 5302 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5303 if (CHIP_IS_E1(bp)) {
5304 /* enable HW interrupt from PXP on USDM overflow
5305 bit 16 on INT_MASK_0 */
5306 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5307 }
a2fbb9ea 5308
94a78b79 5309 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5310 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5311
5312#ifdef __BIG_ENDIAN
34f80b04
EG
5313 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5314 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5315 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5316 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5317 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5318 /* make sure this value is 0 */
5319 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5320
5321/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5322 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5323 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5324 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5325 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5326#endif
5327
34f80b04 5328 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 5329#ifdef BCM_CNIC
34f80b04
EG
5330 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5331 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5332 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5333#endif
5334
34f80b04
EG
5335 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5336 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5337
34f80b04
EG
5338 /* let the HW do it's magic ... */
5339 msleep(100);
5340 /* finish PXP init */
5341 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5342 if (val != 1) {
5343 BNX2X_ERR("PXP2 CFG failed\n");
5344 return -EBUSY;
5345 }
5346 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5347 if (val != 1) {
5348 BNX2X_ERR("PXP2 RD_INIT failed\n");
5349 return -EBUSY;
5350 }
a2fbb9ea 5351
34f80b04
EG
5352 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5353 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5354
94a78b79 5355 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5356
34f80b04
EG
5357 /* clean the DMAE memory */
5358 bp->dmae_ready = 1;
5359 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5360
94a78b79
VZ
5361 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5362 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5363 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5364 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5365
34f80b04
EG
5366 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5367 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5368 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5369 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5370
94a78b79 5371 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
5372
5373#ifdef BCM_CNIC
5374 wb_write[0] = 0;
5375 wb_write[1] = 0;
5376 for (i = 0; i < 64; i++) {
5377 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
5378 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
5379
5380 if (CHIP_IS_E1H(bp)) {
5381 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
5382 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
5383 wb_write, 2);
5384 }
5385 }
5386#endif
34f80b04
EG
5387 /* soft reset pulse */
5388 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5389 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 5390
37b091ba 5391#ifdef BCM_CNIC
94a78b79 5392 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5393#endif
a2fbb9ea 5394
94a78b79 5395 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
5396 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5397 if (!CHIP_REV_IS_SLOW(bp)) {
5398 /* enable hw interrupt from doorbell Q */
5399 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5400 }
a2fbb9ea 5401
94a78b79
VZ
5402 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5403 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5404 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 5405#ifndef BCM_CNIC
3196a88a
EG
5406 /* set NIC mode */
5407 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 5408#endif
34f80b04
EG
5409 if (CHIP_IS_E1H(bp))
5410 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5411
94a78b79
VZ
5412 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5413 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5414 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5415 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5416
ca00392c
EG
5417 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5418 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5419 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5420 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5421
94a78b79
VZ
5422 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5423 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5424 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5425 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5426
34f80b04
EG
5427 /* sync semi rtc */
5428 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5429 0x80000000);
5430 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5431 0x80000000);
a2fbb9ea 5432
94a78b79
VZ
5433 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5434 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5435 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5436
34f80b04 5437 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
5438 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5439 REG_WR(bp, i, random32());
94a78b79 5440 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
5441#ifdef BCM_CNIC
5442 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5443 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5444 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5445 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5446 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5447 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5448 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5449 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5450 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5451 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5452#endif
34f80b04 5453 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5454
34f80b04
EG
5455 if (sizeof(union cdu_context) != 1024)
5456 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
5457 dev_alert(&bp->pdev->dev, "please adjust the size "
5458 "of cdu_context(%ld)\n",
7995c64e 5459 (long)sizeof(union cdu_context));
a2fbb9ea 5460
94a78b79 5461 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5462 val = (4 << 24) + (0 << 12) + 1024;
5463 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5464
94a78b79 5465 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5466 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5467 /* enable context validation interrupt from CFC */
5468 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5469
5470 /* set the thresholds to prevent CFC/CDU race */
5471 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5472
94a78b79
VZ
5473 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5474 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5475
94a78b79 5476 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5477 /* Reset PCIE errors for debug */
5478 REG_WR(bp, 0x2814, 0xffffffff);
5479 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5480
94a78b79 5481 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5482 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5483 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5484 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5485
94a78b79 5486 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
5487 if (CHIP_IS_E1H(bp)) {
5488 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5489 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5490 }
5491
5492 if (CHIP_REV_IS_SLOW(bp))
5493 msleep(200);
5494
5495 /* finish CFC init */
5496 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5497 if (val != 1) {
5498 BNX2X_ERR("CFC LL_INIT failed\n");
5499 return -EBUSY;
5500 }
5501 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5502 if (val != 1) {
5503 BNX2X_ERR("CFC AC_INIT failed\n");
5504 return -EBUSY;
5505 }
5506 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5507 if (val != 1) {
5508 BNX2X_ERR("CFC CAM_INIT failed\n");
5509 return -EBUSY;
5510 }
5511 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5512
34f80b04
EG
5513 /* read NIG statistic
5514 to see if this is our first up since powerup */
5515 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5516 val = *bnx2x_sp(bp, wb_data[0]);
5517
5518 /* do internal memory self test */
5519 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5520 BNX2X_ERR("internal mem self test failed\n");
5521 return -EBUSY;
5522 }
5523
35b19ba5 5524 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5525 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5526 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5527 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 5528 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
5529 bp->port.need_hw_lock = 1;
5530 break;
5531
34f80b04
EG
5532 default:
5533 break;
5534 }
f1410647 5535
fd4ef40d
EG
5536 bnx2x_setup_fan_failure_detection(bp);
5537
34f80b04
EG
5538 /* clear PXP2 attentions */
5539 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5540
34f80b04 5541 enable_blocks_attention(bp);
72fd0718
VZ
5542 if (CHIP_PARITY_SUPPORTED(bp))
5543 enable_blocks_parity(bp);
a2fbb9ea 5544
6bbca910
YR
5545 if (!BP_NOMCP(bp)) {
5546 bnx2x_acquire_phy_lock(bp);
5547 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5548 bnx2x_release_phy_lock(bp);
5549 } else
5550 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5551
34f80b04
EG
5552 return 0;
5553}
a2fbb9ea 5554
34f80b04
EG
5555static int bnx2x_init_port(struct bnx2x *bp)
5556{
5557 int port = BP_PORT(bp);
94a78b79 5558 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5559 u32 low, high;
34f80b04 5560 u32 val;
a2fbb9ea 5561
cdaa7cb8 5562 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
5563
5564 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5565
94a78b79 5566 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5567 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
5568
5569 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5570 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5571 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 5572 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5573
37b091ba
MC
5574#ifdef BCM_CNIC
5575 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 5576
94a78b79 5577 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
5578 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5579 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 5580#endif
cdaa7cb8 5581
94a78b79 5582 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5583
94a78b79 5584 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
5585 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5586 /* no pause for emulation and FPGA */
5587 low = 0;
5588 high = 513;
5589 } else {
5590 if (IS_E1HMF(bp))
5591 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5592 else if (bp->dev->mtu > 4096) {
5593 if (bp->flags & ONE_PORT_FLAG)
5594 low = 160;
5595 else {
5596 val = bp->dev->mtu;
5597 /* (24*1024 + val*4)/256 */
5598 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5599 }
5600 } else
5601 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5602 high = low + 56; /* 14*1024/256 */
5603 }
5604 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5605 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5606
5607
94a78b79 5608 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5609
94a78b79 5610 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 5611 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 5612 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 5613 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5614
94a78b79
VZ
5615 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5616 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5617 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5618 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 5619
94a78b79 5620 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5621 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5622
94a78b79 5623 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
5624
5625 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5626 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5627
5628 /* update threshold */
34f80b04 5629 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5630 /* update init credit */
34f80b04 5631 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5632
5633 /* probe changes */
34f80b04 5634 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5635 msleep(5);
34f80b04 5636 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 5637
37b091ba
MC
5638#ifdef BCM_CNIC
5639 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5640#endif
94a78b79 5641 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5642 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5643
5644 if (CHIP_IS_E1(bp)) {
5645 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5646 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5647 }
94a78b79 5648 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5649
94a78b79 5650 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5651 /* init aeu_mask_attn_func_0/1:
5652 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5653 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5654 * bits 4-7 are used for "per vn group attention" */
5655 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5656 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5657
94a78b79 5658 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5659 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5660 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5661 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5662 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5663
94a78b79 5664 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5665
5666 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5667
5668 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5669 /* 0x2 disable e1hov, 0x1 enable */
5670 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5671 (IS_E1HMF(bp) ? 0x1 : 0x2));
5672
1c06328c
EG
5673 {
5674 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5675 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5676 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5677 }
34f80b04
EG
5678 }
5679
94a78b79 5680 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5681 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 5682
35b19ba5 5683 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5684 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5685 {
5686 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5687
5688 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5689 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5690
5691 /* The GPIO should be swapped if the swap register is
5692 set and active */
5693 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5694 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5695
5696 /* Select function upon port-swap configuration */
5697 if (port == 0) {
5698 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5699 aeu_gpio_mask = (swap_val && swap_override) ?
5700 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5701 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5702 } else {
5703 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5704 aeu_gpio_mask = (swap_val && swap_override) ?
5705 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5706 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5707 }
5708 val = REG_RD(bp, offset);
5709 /* add GPIO3 to group */
5710 val |= aeu_gpio_mask;
5711 REG_WR(bp, offset, val);
5712 }
5713 break;
5714
35b19ba5 5715 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 5716 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 5717 /* add SPIO 5 to group 0 */
4d295db0
EG
5718 {
5719 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5720 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5721 val = REG_RD(bp, reg_addr);
f1410647 5722 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
5723 REG_WR(bp, reg_addr, val);
5724 }
f1410647
ET
5725 break;
5726
5727 default:
5728 break;
5729 }
5730
c18487ee 5731 bnx2x__link_reset(bp);
a2fbb9ea 5732
34f80b04
EG
5733 return 0;
5734}
5735
5736#define ILT_PER_FUNC (768/2)
5737#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5738/* the phys address is shifted right 12 bits and has an added
5739 1=valid bit added to the 53rd bit
5740 then since this is a wide register(TM)
5741 we split it into two 32 bit writes
5742 */
5743#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5744#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5745#define PXP_ONE_ILT(x) (((x) << 10) | x)
5746#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5747
37b091ba
MC
5748#ifdef BCM_CNIC
5749#define CNIC_ILT_LINES 127
5750#define CNIC_CTX_PER_ILT 16
5751#else
34f80b04 5752#define CNIC_ILT_LINES 0
37b091ba 5753#endif
34f80b04
EG
5754
5755static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5756{
5757 int reg;
5758
5759 if (CHIP_IS_E1H(bp))
5760 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5761 else /* E1 */
5762 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5763
5764 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5765}
5766
5767static int bnx2x_init_func(struct bnx2x *bp)
5768{
5769 int port = BP_PORT(bp);
5770 int func = BP_FUNC(bp);
8badd27a 5771 u32 addr, val;
34f80b04
EG
5772 int i;
5773
cdaa7cb8 5774 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5775
8badd27a
EG
5776 /* set MSI reconfigure capability */
5777 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5778 val = REG_RD(bp, addr);
5779 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5780 REG_WR(bp, addr, val);
5781
34f80b04
EG
5782 i = FUNC_ILT_BASE(func);
5783
5784 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5785 if (CHIP_IS_E1H(bp)) {
5786 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5787 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5788 } else /* E1 */
5789 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5790 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5791
37b091ba
MC
5792#ifdef BCM_CNIC
5793 i += 1 + CNIC_ILT_LINES;
5794 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
5795 if (CHIP_IS_E1(bp))
5796 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5797 else {
5798 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
5799 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
5800 }
5801
5802 i++;
5803 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
5804 if (CHIP_IS_E1(bp))
5805 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5806 else {
5807 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
5808 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
5809 }
5810
5811 i++;
5812 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
5813 if (CHIP_IS_E1(bp))
5814 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5815 else {
5816 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
5817 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
5818 }
5819
5820 /* tell the searcher where the T2 table is */
5821 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
5822
5823 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
5824 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
5825
5826 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
5827 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
5828 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
5829
5830 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
5831#endif
34f80b04
EG
5832
5833 if (CHIP_IS_E1H(bp)) {
573f2035
EG
5834 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5835 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5836 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5837 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5838 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5839 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5840 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5841 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5842 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
5843
5844 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5845 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5846 }
5847
5848 /* HC init per function */
5849 if (CHIP_IS_E1H(bp)) {
5850 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5851
5852 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5853 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5854 }
94a78b79 5855 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 5856
c14423fe 5857 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5858 REG_WR(bp, 0x2114, 0xffffffff);
5859 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5860
34f80b04
EG
5861 return 0;
5862}
5863
9f6c9258 5864int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04
EG
5865{
5866 int i, rc = 0;
a2fbb9ea 5867
34f80b04
EG
5868 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5869 BP_FUNC(bp), load_code);
a2fbb9ea 5870
34f80b04
EG
5871 bp->dmae_ready = 0;
5872 mutex_init(&bp->dmae_mutex);
54016b26
EG
5873 rc = bnx2x_gunzip_init(bp);
5874 if (rc)
5875 return rc;
a2fbb9ea 5876
34f80b04
EG
5877 switch (load_code) {
5878 case FW_MSG_CODE_DRV_LOAD_COMMON:
5879 rc = bnx2x_init_common(bp);
5880 if (rc)
5881 goto init_hw_err;
5882 /* no break */
5883
5884 case FW_MSG_CODE_DRV_LOAD_PORT:
5885 bp->dmae_ready = 1;
5886 rc = bnx2x_init_port(bp);
5887 if (rc)
5888 goto init_hw_err;
5889 /* no break */
5890
5891 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5892 bp->dmae_ready = 1;
5893 rc = bnx2x_init_func(bp);
5894 if (rc)
5895 goto init_hw_err;
5896 break;
5897
5898 default:
5899 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5900 break;
5901 }
5902
5903 if (!BP_NOMCP(bp)) {
5904 int func = BP_FUNC(bp);
a2fbb9ea
ET
5905
5906 bp->fw_drv_pulse_wr_seq =
34f80b04 5907 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5908 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5909 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5910 }
a2fbb9ea 5911
34f80b04
EG
5912 /* this needs to be done before gunzip end */
5913 bnx2x_zero_def_sb(bp);
5914 for_each_queue(bp, i)
5915 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
5916#ifdef BCM_CNIC
5917 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5918#endif
34f80b04
EG
5919
5920init_hw_err:
5921 bnx2x_gunzip_end(bp);
5922
5923 return rc;
a2fbb9ea
ET
5924}
5925
9f6c9258 5926void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
5927{
5928
5929#define BNX2X_PCI_FREE(x, y, size) \
5930 do { \
5931 if (x) { \
1a983142 5932 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
5933 x = NULL; \
5934 y = 0; \
5935 } \
5936 } while (0)
5937
5938#define BNX2X_FREE(x) \
5939 do { \
5940 if (x) { \
5941 vfree(x); \
5942 x = NULL; \
5943 } \
5944 } while (0)
5945
5946 int i;
5947
5948 /* fastpath */
555f6c78 5949 /* Common */
a2fbb9ea
ET
5950 for_each_queue(bp, i) {
5951
555f6c78 5952 /* status blocks */
a2fbb9ea
ET
5953 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5954 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 5955 sizeof(struct host_status_block));
555f6c78
EG
5956 }
5957 /* Rx */
54b9ddaa 5958 for_each_queue(bp, i) {
a2fbb9ea 5959
555f6c78 5960 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5961 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5962 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5963 bnx2x_fp(bp, i, rx_desc_mapping),
5964 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5965
5966 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5967 bnx2x_fp(bp, i, rx_comp_mapping),
5968 sizeof(struct eth_fast_path_rx_cqe) *
5969 NUM_RCQ_BD);
a2fbb9ea 5970
7a9b2557 5971 /* SGE ring */
32626230 5972 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5973 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5974 bnx2x_fp(bp, i, rx_sge_mapping),
5975 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5976 }
555f6c78 5977 /* Tx */
54b9ddaa 5978 for_each_queue(bp, i) {
555f6c78
EG
5979
5980 /* fastpath tx rings: tx_buf tx_desc */
5981 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5982 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5983 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 5984 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 5985 }
a2fbb9ea
ET
5986 /* end of fastpath */
5987
5988 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5989 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5990
5991 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5992 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5993
37b091ba 5994#ifdef BCM_CNIC
a2fbb9ea
ET
5995 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5996 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5997 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5998 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
5999 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6000 sizeof(struct host_status_block));
a2fbb9ea 6001#endif
7a9b2557 6002 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6003
6004#undef BNX2X_PCI_FREE
6005#undef BNX2X_KFREE
6006}
6007
9f6c9258 6008int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea
ET
6009{
6010
6011#define BNX2X_PCI_ALLOC(x, y, size) \
6012 do { \
1a983142 6013 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
6014 if (x == NULL) \
6015 goto alloc_mem_err; \
6016 memset(x, 0, size); \
6017 } while (0)
a2fbb9ea 6018
9f6c9258
DK
6019#define BNX2X_ALLOC(x, size) \
6020 do { \
6021 x = vmalloc(size); \
6022 if (x == NULL) \
6023 goto alloc_mem_err; \
6024 memset(x, 0, size); \
6025 } while (0)
a2fbb9ea 6026
9f6c9258 6027 int i;
a2fbb9ea 6028
9f6c9258
DK
6029 /* fastpath */
6030 /* Common */
a2fbb9ea 6031 for_each_queue(bp, i) {
9f6c9258 6032 bnx2x_fp(bp, i, bp) = bp;
a2fbb9ea 6033
9f6c9258
DK
6034 /* status blocks */
6035 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6036 &bnx2x_fp(bp, i, status_blk_mapping),
6037 sizeof(struct host_status_block));
a2fbb9ea 6038 }
9f6c9258
DK
6039 /* Rx */
6040 for_each_queue(bp, i) {
a2fbb9ea 6041
9f6c9258
DK
6042 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6043 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6044 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6045 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6046 &bnx2x_fp(bp, i, rx_desc_mapping),
6047 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 6048
9f6c9258
DK
6049 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6050 &bnx2x_fp(bp, i, rx_comp_mapping),
6051 sizeof(struct eth_fast_path_rx_cqe) *
6052 NUM_RCQ_BD);
a2fbb9ea 6053
9f6c9258
DK
6054 /* SGE ring */
6055 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6056 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6057 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6058 &bnx2x_fp(bp, i, rx_sge_mapping),
6059 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6060 }
6061 /* Tx */
6062 for_each_queue(bp, i) {
8badd27a 6063
9f6c9258
DK
6064 /* fastpath tx rings: tx_buf tx_desc */
6065 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6066 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6067 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6068 &bnx2x_fp(bp, i, tx_desc_mapping),
6069 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 6070 }
9f6c9258 6071 /* end of fastpath */
8badd27a 6072
9f6c9258
DK
6073 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6074 sizeof(struct host_def_status_block));
8badd27a 6075
9f6c9258
DK
6076 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6077 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6078
9f6c9258
DK
6079#ifdef BCM_CNIC
6080 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
8badd27a 6081
9f6c9258
DK
6082 /* allocate searcher T2 table
6083 we allocate 1/4 of alloc num for T2
6084 (which is not entered into the ILT) */
6085 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
a2fbb9ea 6086
9f6c9258
DK
6087 /* Initialize T2 (for 1024 connections) */
6088 for (i = 0; i < 16*1024; i += 64)
6089 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 6090
9f6c9258
DK
6091 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6092 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
65abd74d 6093
9f6c9258
DK
6094 /* QM queues (128*MAX_CONN) */
6095 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
65abd74d 6096
9f6c9258
DK
6097 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6098 sizeof(struct host_status_block));
6099#endif
65abd74d 6100
9f6c9258
DK
6101 /* Slow path ring */
6102 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 6103
9f6c9258 6104 return 0;
e1510706 6105
9f6c9258
DK
6106alloc_mem_err:
6107 bnx2x_free_mem(bp);
6108 return -ENOMEM;
e1510706 6109
9f6c9258
DK
6110#undef BNX2X_PCI_ALLOC
6111#undef BNX2X_ALLOC
65abd74d
YG
6112}
6113
65abd74d 6114
a2fbb9ea
ET
6115/*
6116 * Init service functions
6117 */
6118
e665bfda
MC
6119/**
6120 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
6121 *
6122 * @param bp driver descriptor
6123 * @param set set or clear an entry (1 or 0)
6124 * @param mac pointer to a buffer containing a MAC
6125 * @param cl_bit_vec bit vector of clients to register a MAC for
6126 * @param cam_offset offset in a CAM to use
6127 * @param with_bcast set broadcast MAC as well
6128 */
6129static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
6130 u32 cl_bit_vec, u8 cam_offset,
6131 u8 with_bcast)
a2fbb9ea
ET
6132{
6133 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6134 int port = BP_PORT(bp);
a2fbb9ea
ET
6135
6136 /* CAM allocation
6137 * unicasts 0-31:port0 32-63:port1
6138 * multicast 64-127:port0 128-191:port1
6139 */
e665bfda
MC
6140 config->hdr.length = 1 + (with_bcast ? 1 : 0);
6141 config->hdr.offset = cam_offset;
6142 config->hdr.client_id = 0xff;
a2fbb9ea
ET
6143 config->hdr.reserved1 = 0;
6144
6145 /* primary MAC */
6146 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 6147 swab16(*(u16 *)&mac[0]);
a2fbb9ea 6148 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 6149 swab16(*(u16 *)&mac[2]);
a2fbb9ea 6150 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 6151 swab16(*(u16 *)&mac[4]);
34f80b04 6152 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6153 if (set)
6154 config->config_table[0].target_table_entry.flags = 0;
6155 else
6156 CAM_INVALIDATE(config->config_table[0]);
ca00392c 6157 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 6158 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
6159 config->config_table[0].target_table_entry.vlan_id = 0;
6160
3101c2bc
YG
6161 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6162 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6163 config->config_table[0].cam_entry.msb_mac_addr,
6164 config->config_table[0].cam_entry.middle_mac_addr,
6165 config->config_table[0].cam_entry.lsb_mac_addr);
6166
6167 /* broadcast */
e665bfda
MC
6168 if (with_bcast) {
6169 config->config_table[1].cam_entry.msb_mac_addr =
6170 cpu_to_le16(0xffff);
6171 config->config_table[1].cam_entry.middle_mac_addr =
6172 cpu_to_le16(0xffff);
6173 config->config_table[1].cam_entry.lsb_mac_addr =
6174 cpu_to_le16(0xffff);
6175 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6176 if (set)
6177 config->config_table[1].target_table_entry.flags =
6178 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6179 else
6180 CAM_INVALIDATE(config->config_table[1]);
6181 config->config_table[1].target_table_entry.clients_bit_vector =
6182 cpu_to_le32(cl_bit_vec);
6183 config->config_table[1].target_table_entry.vlan_id = 0;
6184 }
a2fbb9ea
ET
6185
6186 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6187 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6188 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6189}
6190
e665bfda
MC
6191/**
6192 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
6193 *
6194 * @param bp driver descriptor
6195 * @param set set or clear an entry (1 or 0)
6196 * @param mac pointer to a buffer containing a MAC
6197 * @param cl_bit_vec bit vector of clients to register a MAC for
6198 * @param cam_offset offset in a CAM to use
6199 */
6200static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
6201 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
6202{
6203 struct mac_configuration_cmd_e1h *config =
6204 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6205
8d9c5f34 6206 config->hdr.length = 1;
e665bfda
MC
6207 config->hdr.offset = cam_offset;
6208 config->hdr.client_id = 0xff;
34f80b04
EG
6209 config->hdr.reserved1 = 0;
6210
6211 /* primary MAC */
6212 config->config_table[0].msb_mac_addr =
e665bfda 6213 swab16(*(u16 *)&mac[0]);
34f80b04 6214 config->config_table[0].middle_mac_addr =
e665bfda 6215 swab16(*(u16 *)&mac[2]);
34f80b04 6216 config->config_table[0].lsb_mac_addr =
e665bfda 6217 swab16(*(u16 *)&mac[4]);
ca00392c 6218 config->config_table[0].clients_bit_vector =
e665bfda 6219 cpu_to_le32(cl_bit_vec);
34f80b04
EG
6220 config->config_table[0].vlan_id = 0;
6221 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6222 if (set)
6223 config->config_table[0].flags = BP_PORT(bp);
6224 else
6225 config->config_table[0].flags =
6226 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6227
e665bfda 6228 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 6229 (set ? "setting" : "clearing"),
34f80b04
EG
6230 config->config_table[0].msb_mac_addr,
6231 config->config_table[0].middle_mac_addr,
e665bfda 6232 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
6233
6234 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6235 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6236 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6237}
6238
a2fbb9ea
ET
6239static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6240 int *state_p, int poll)
6241{
6242 /* can take a while if any port is running */
8b3a0f0b 6243 int cnt = 5000;
a2fbb9ea 6244
c14423fe
ET
6245 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6246 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6247
6248 might_sleep();
34f80b04 6249 while (cnt--) {
a2fbb9ea
ET
6250 if (poll) {
6251 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6252 /* if index is different from 0
6253 * the reply for some commands will
3101c2bc 6254 * be on the non default queue
a2fbb9ea
ET
6255 */
6256 if (idx)
6257 bnx2x_rx_int(&bp->fp[idx], 10);
6258 }
a2fbb9ea 6259
3101c2bc 6260 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6261 if (*state_p == state) {
6262#ifdef BNX2X_STOP_ON_ERROR
6263 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6264#endif
a2fbb9ea 6265 return 0;
8b3a0f0b 6266 }
a2fbb9ea 6267
a2fbb9ea 6268 msleep(1);
e3553b29
EG
6269
6270 if (bp->panic)
6271 return -EIO;
a2fbb9ea
ET
6272 }
6273
a2fbb9ea 6274 /* timeout! */
49d66772
ET
6275 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6276 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6277#ifdef BNX2X_STOP_ON_ERROR
6278 bnx2x_panic();
6279#endif
a2fbb9ea 6280
49d66772 6281 return -EBUSY;
a2fbb9ea
ET
6282}
6283
9f6c9258 6284void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
e665bfda
MC
6285{
6286 bp->set_mac_pending++;
6287 smp_wmb();
6288
6289 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
6290 (1 << bp->fp->cl_id), BP_FUNC(bp));
6291
6292 /* Wait for a completion */
6293 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
6294}
6295
9f6c9258 6296void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
e665bfda
MC
6297{
6298 bp->set_mac_pending++;
6299 smp_wmb();
6300
6301 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
6302 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
6303 1);
6304
6305 /* Wait for a completion */
6306 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
6307}
6308
993ac7b5
MC
6309#ifdef BCM_CNIC
6310/**
6311 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6312 * MAC(s). This function will wait until the ramdord completion
6313 * returns.
6314 *
6315 * @param bp driver handle
6316 * @param set set or clear the CAM entry
6317 *
6318 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6319 */
9f6c9258 6320int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5
MC
6321{
6322 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
6323
6324 bp->set_mac_pending++;
6325 smp_wmb();
6326
6327 /* Send a SET_MAC ramrod */
6328 if (CHIP_IS_E1(bp))
6329 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
6330 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
6331 1);
6332 else
6333 /* CAM allocation for E1H
6334 * unicasts: by func number
6335 * multicast: 20+FUNC*20, 20 each
6336 */
6337 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
6338 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
6339
6340 /* Wait for a completion when setting */
6341 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
6342
6343 return 0;
6344}
6345#endif
6346
9f6c9258 6347int bnx2x_setup_leading(struct bnx2x *bp)
a2fbb9ea 6348{
34f80b04 6349 int rc;
a2fbb9ea 6350
c14423fe 6351 /* reset IGU state */
34f80b04 6352 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6353
6354 /* SETUP ramrod */
6355 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6356
34f80b04
EG
6357 /* Wait for completion */
6358 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6359
34f80b04 6360 return rc;
a2fbb9ea
ET
6361}
6362
9f6c9258 6363int bnx2x_setup_multi(struct bnx2x *bp, int index)
a2fbb9ea 6364{
555f6c78
EG
6365 struct bnx2x_fastpath *fp = &bp->fp[index];
6366
a2fbb9ea 6367 /* reset IGU state */
555f6c78 6368 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6369
228241eb 6370 /* SETUP ramrod */
555f6c78
EG
6371 fp->state = BNX2X_FP_STATE_OPENING;
6372 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6373 fp->cl_id, 0);
a2fbb9ea
ET
6374
6375 /* Wait for completion */
6376 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6377 &(fp->state), 0);
a2fbb9ea
ET
6378}
6379
a2fbb9ea 6380
9f6c9258 6381void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 6382{
ca00392c
EG
6383
6384 switch (bp->multi_mode) {
6385 case ETH_RSS_MODE_DISABLED:
54b9ddaa 6386 bp->num_queues = 1;
ca00392c
EG
6387 break;
6388
6389 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
6390 if (num_queues)
6391 bp->num_queues = min_t(u32, num_queues,
6392 BNX2X_MAX_QUEUES(bp));
ca00392c 6393 else
54b9ddaa
VZ
6394 bp->num_queues = min_t(u32, num_online_cpus(),
6395 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
6396 break;
6397
6398
6399 default:
54b9ddaa 6400 bp->num_queues = 1;
9f6c9258
DK
6401 break;
6402 }
a2fbb9ea
ET
6403}
6404
9f6c9258
DK
6405
6406
a2fbb9ea
ET
6407static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6408{
555f6c78 6409 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
6410 int rc;
6411
c14423fe 6412 /* halt the connection */
555f6c78
EG
6413 fp->state = BNX2X_FP_STATE_HALTING;
6414 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 6415
34f80b04 6416 /* Wait for completion */
a2fbb9ea 6417 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 6418 &(fp->state), 1);
c14423fe 6419 if (rc) /* timeout */
a2fbb9ea
ET
6420 return rc;
6421
6422 /* delete cfc entry */
6423 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6424
34f80b04
EG
6425 /* Wait for completion */
6426 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 6427 &(fp->state), 1);
34f80b04 6428 return rc;
a2fbb9ea
ET
6429}
6430
da5a662a 6431static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6432{
4781bfad 6433 __le16 dsb_sp_prod_idx;
c14423fe 6434 /* if the other port is handling traffic,
a2fbb9ea 6435 this can take a lot of time */
34f80b04
EG
6436 int cnt = 500;
6437 int rc;
a2fbb9ea
ET
6438
6439 might_sleep();
6440
6441 /* Send HALT ramrod */
6442 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 6443 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 6444
34f80b04
EG
6445 /* Wait for completion */
6446 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6447 &(bp->fp[0].state), 1);
6448 if (rc) /* timeout */
da5a662a 6449 return rc;
a2fbb9ea 6450
49d66772 6451 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6452
228241eb 6453 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6454 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6455
49d66772 6456 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6457 we are going to reset the chip anyway
6458 so there is not much to do if this times out
6459 */
34f80b04 6460 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6461 if (!cnt) {
6462 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6463 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6464 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6465#ifdef BNX2X_STOP_ON_ERROR
6466 bnx2x_panic();
6467#endif
36e552ab 6468 rc = -EBUSY;
34f80b04
EG
6469 break;
6470 }
6471 cnt--;
da5a662a 6472 msleep(1);
5650d9d4 6473 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
6474 }
6475 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6476 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6477
6478 return rc;
a2fbb9ea
ET
6479}
6480
34f80b04
EG
6481static void bnx2x_reset_func(struct bnx2x *bp)
6482{
6483 int port = BP_PORT(bp);
6484 int func = BP_FUNC(bp);
6485 int base, i;
6486
6487 /* Configure IGU */
6488 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6489 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6490
37b091ba
MC
6491#ifdef BCM_CNIC
6492 /* Disable Timer scan */
6493 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6494 /*
6495 * Wait for at least 10ms and up to 2 second for the timers scan to
6496 * complete
6497 */
6498 for (i = 0; i < 200; i++) {
6499 msleep(10);
6500 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6501 break;
6502 }
6503#endif
34f80b04
EG
6504 /* Clear ILT */
6505 base = FUNC_ILT_BASE(func);
6506 for (i = base; i < base + ILT_PER_FUNC; i++)
6507 bnx2x_ilt_wr(bp, i, 0);
6508}
6509
6510static void bnx2x_reset_port(struct bnx2x *bp)
6511{
6512 int port = BP_PORT(bp);
6513 u32 val;
6514
6515 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6516
6517 /* Do not rcv packets to BRB */
6518 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6519 /* Do not direct rcv packets that are not for MCP to the BRB */
6520 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6521 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6522
6523 /* Configure AEU */
6524 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6525
6526 msleep(100);
6527 /* Check for BRB port occupancy */
6528 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6529 if (val)
6530 DP(NETIF_MSG_IFDOWN,
33471629 6531 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6532
6533 /* TODO: Close Doorbell port? */
6534}
6535
34f80b04
EG
6536static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6537{
6538 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6539 BP_FUNC(bp), reset_code);
6540
6541 switch (reset_code) {
6542 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6543 bnx2x_reset_port(bp);
6544 bnx2x_reset_func(bp);
6545 bnx2x_reset_common(bp);
6546 break;
6547
6548 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6549 bnx2x_reset_port(bp);
6550 bnx2x_reset_func(bp);
6551 break;
6552
6553 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6554 bnx2x_reset_func(bp);
6555 break;
49d66772 6556
34f80b04
EG
6557 default:
6558 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6559 break;
6560 }
6561}
6562
9f6c9258 6563void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6564{
da5a662a 6565 int port = BP_PORT(bp);
a2fbb9ea 6566 u32 reset_code = 0;
da5a662a 6567 int i, cnt, rc;
a2fbb9ea 6568
555f6c78 6569 /* Wait until tx fastpath tasks complete */
54b9ddaa 6570 for_each_queue(bp, i) {
228241eb
ET
6571 struct bnx2x_fastpath *fp = &bp->fp[i];
6572
34f80b04 6573 cnt = 1000;
e8b5fc51 6574 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6575
7961f791 6576 bnx2x_tx_int(fp);
34f80b04
EG
6577 if (!cnt) {
6578 BNX2X_ERR("timeout waiting for queue[%d]\n",
6579 i);
6580#ifdef BNX2X_STOP_ON_ERROR
6581 bnx2x_panic();
6582 return -EBUSY;
6583#else
6584 break;
6585#endif
6586 }
6587 cnt--;
da5a662a 6588 msleep(1);
34f80b04 6589 }
228241eb 6590 }
da5a662a
VZ
6591 /* Give HW time to discard old tx messages */
6592 msleep(1);
a2fbb9ea 6593
3101c2bc
YG
6594 if (CHIP_IS_E1(bp)) {
6595 struct mac_configuration_cmd *config =
6596 bnx2x_sp(bp, mcast_config);
6597
e665bfda 6598 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 6599
8d9c5f34 6600 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
6601 CAM_INVALIDATE(config->config_table[i]);
6602
8d9c5f34 6603 config->hdr.length = i;
3101c2bc
YG
6604 if (CHIP_REV_IS_SLOW(bp))
6605 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6606 else
6607 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 6608 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
6609 config->hdr.reserved1 = 0;
6610
e665bfda
MC
6611 bp->set_mac_pending++;
6612 smp_wmb();
6613
3101c2bc
YG
6614 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6615 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6616 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6617
6618 } else { /* E1H */
65abd74d
YG
6619 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6620
e665bfda 6621 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
6622
6623 for (i = 0; i < MC_HASH_SIZE; i++)
6624 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
6625
6626 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 6627 }
993ac7b5
MC
6628#ifdef BCM_CNIC
6629 /* Clear iSCSI L2 MAC */
6630 mutex_lock(&bp->cnic_mutex);
6631 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6632 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6633 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6634 }
6635 mutex_unlock(&bp->cnic_mutex);
6636#endif
3101c2bc 6637
65abd74d
YG
6638 if (unload_mode == UNLOAD_NORMAL)
6639 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6640
7d0446c2 6641 else if (bp->flags & NO_WOL_FLAG)
65abd74d 6642 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 6643
7d0446c2 6644 else if (bp->wol) {
65abd74d
YG
6645 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6646 u8 *mac_addr = bp->dev->dev_addr;
6647 u32 val;
6648 /* The mac address is written to entries 1-4 to
6649 preserve entry 0 which is used by the PMF */
6650 u8 entry = (BP_E1HVN(bp) + 1)*8;
6651
6652 val = (mac_addr[0] << 8) | mac_addr[1];
6653 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6654
6655 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6656 (mac_addr[4] << 8) | mac_addr[5];
6657 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6658
6659 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6660
6661 } else
6662 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6663
34f80b04
EG
6664 /* Close multi and leading connections
6665 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6666 for_each_nondefault_queue(bp, i)
6667 if (bnx2x_stop_multi(bp, i))
228241eb 6668 goto unload_error;
a2fbb9ea 6669
da5a662a
VZ
6670 rc = bnx2x_stop_leading(bp);
6671 if (rc) {
34f80b04 6672 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6673#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6674 return -EBUSY;
da5a662a
VZ
6675#else
6676 goto unload_error;
34f80b04 6677#endif
228241eb
ET
6678 }
6679
6680unload_error:
34f80b04 6681 if (!BP_NOMCP(bp))
228241eb 6682 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 6683 else {
f5372251 6684 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
6685 load_count[0], load_count[1], load_count[2]);
6686 load_count[0]--;
da5a662a 6687 load_count[1 + port]--;
f5372251 6688 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
6689 load_count[0], load_count[1], load_count[2]);
6690 if (load_count[0] == 0)
6691 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6692 else if (load_count[1 + port] == 0)
34f80b04
EG
6693 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6694 else
6695 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6696 }
a2fbb9ea 6697
34f80b04
EG
6698 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6699 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6700 bnx2x__link_reset(bp);
a2fbb9ea
ET
6701
6702 /* Reset the chip */
228241eb 6703 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6704
6705 /* Report UNLOAD_DONE to MCP */
34f80b04 6706 if (!BP_NOMCP(bp))
a2fbb9ea 6707 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 6708
72fd0718
VZ
6709}
6710
9f6c9258 6711void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
6712{
6713 u32 val;
6714
6715 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6716
6717 if (CHIP_IS_E1(bp)) {
6718 int port = BP_PORT(bp);
6719 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6720 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6721
6722 val = REG_RD(bp, addr);
6723 val &= ~(0x300);
6724 REG_WR(bp, addr, val);
6725 } else if (CHIP_IS_E1H(bp)) {
6726 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6727 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6728 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6729 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6730 }
6731}
6732
72fd0718
VZ
6733
6734/* Close gates #2, #3 and #4: */
6735static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6736{
6737 u32 val, addr;
6738
6739 /* Gates #2 and #4a are closed/opened for "not E1" only */
6740 if (!CHIP_IS_E1(bp)) {
6741 /* #4 */
6742 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6743 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6744 close ? (val | 0x1) : (val & (~(u32)1)));
6745 /* #2 */
6746 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6747 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6748 close ? (val | 0x1) : (val & (~(u32)1)));
6749 }
6750
6751 /* #3 */
6752 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6753 val = REG_RD(bp, addr);
6754 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6755
6756 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6757 close ? "closing" : "opening");
6758 mmiowb();
6759}
6760
6761#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6762
6763static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6764{
6765 /* Do some magic... */
6766 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6767 *magic_val = val & SHARED_MF_CLP_MAGIC;
6768 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6769}
6770
6771/* Restore the value of the `magic' bit.
6772 *
6773 * @param pdev Device handle.
6774 * @param magic_val Old value of the `magic' bit.
6775 */
6776static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6777{
6778 /* Restore the `magic' bit value... */
6779 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
6780 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
6781 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
6782 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6783 MF_CFG_WR(bp, shared_mf_config.clp_mb,
6784 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
6785}
6786
6787/* Prepares for MCP reset: takes care of CLP configurations.
6788 *
6789 * @param bp
6790 * @param magic_val Old value of 'magic' bit.
6791 */
6792static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
6793{
6794 u32 shmem;
6795 u32 validity_offset;
6796
6797 DP(NETIF_MSG_HW, "Starting\n");
6798
6799 /* Set `magic' bit in order to save MF config */
6800 if (!CHIP_IS_E1(bp))
6801 bnx2x_clp_reset_prep(bp, magic_val);
6802
6803 /* Get shmem offset */
6804 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6805 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6806
6807 /* Clear validity map flags */
6808 if (shmem > 0)
6809 REG_WR(bp, shmem + validity_offset, 0);
6810}
6811
6812#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
6813#define MCP_ONE_TIMEOUT 100 /* 100 ms */
6814
6815/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
6816 * depending on the HW type.
6817 *
6818 * @param bp
6819 */
6820static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
6821{
6822 /* special handling for emulation and FPGA,
6823 wait 10 times longer */
6824 if (CHIP_REV_IS_SLOW(bp))
6825 msleep(MCP_ONE_TIMEOUT*10);
6826 else
6827 msleep(MCP_ONE_TIMEOUT);
6828}
6829
6830static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
6831{
6832 u32 shmem, cnt, validity_offset, val;
6833 int rc = 0;
6834
6835 msleep(100);
6836
6837 /* Get shmem offset */
6838 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6839 if (shmem == 0) {
6840 BNX2X_ERR("Shmem 0 return failure\n");
6841 rc = -ENOTTY;
6842 goto exit_lbl;
6843 }
6844
6845 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6846
6847 /* Wait for MCP to come up */
6848 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
6849 /* TBD: its best to check validity map of last port.
6850 * currently checks on port 0.
6851 */
6852 val = REG_RD(bp, shmem + validity_offset);
6853 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
6854 shmem + validity_offset, val);
6855
6856 /* check that shared memory is valid. */
6857 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6858 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6859 break;
6860
6861 bnx2x_mcp_wait_one(bp);
6862 }
6863
6864 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
6865
6866 /* Check that shared memory is valid. This indicates that MCP is up. */
6867 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
6868 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
6869 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
6870 rc = -ENOTTY;
6871 goto exit_lbl;
6872 }
6873
6874exit_lbl:
6875 /* Restore the `magic' bit value */
6876 if (!CHIP_IS_E1(bp))
6877 bnx2x_clp_reset_done(bp, magic_val);
6878
6879 return rc;
6880}
6881
6882static void bnx2x_pxp_prep(struct bnx2x *bp)
6883{
6884 if (!CHIP_IS_E1(bp)) {
6885 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
6886 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
6887 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
6888 mmiowb();
6889 }
6890}
6891
6892/*
6893 * Reset the whole chip except for:
6894 * - PCIE core
6895 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
6896 * one reset bit)
6897 * - IGU
6898 * - MISC (including AEU)
6899 * - GRC
6900 * - RBCN, RBCP
6901 */
6902static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
6903{
6904 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
6905
6906 not_reset_mask1 =
6907 MISC_REGISTERS_RESET_REG_1_RST_HC |
6908 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
6909 MISC_REGISTERS_RESET_REG_1_RST_PXP;
6910
6911 not_reset_mask2 =
6912 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
6913 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
6914 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
6915 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
6916 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
6917 MISC_REGISTERS_RESET_REG_2_RST_GRC |
6918 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
6919 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
6920
6921 reset_mask1 = 0xffffffff;
6922
6923 if (CHIP_IS_E1(bp))
6924 reset_mask2 = 0xffff;
6925 else
6926 reset_mask2 = 0x1ffff;
6927
6928 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6929 reset_mask1 & (~not_reset_mask1));
6930 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6931 reset_mask2 & (~not_reset_mask2));
6932
6933 barrier();
6934 mmiowb();
6935
6936 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
6937 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
6938 mmiowb();
6939}
6940
6941static int bnx2x_process_kill(struct bnx2x *bp)
6942{
6943 int cnt = 1000;
6944 u32 val = 0;
6945 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
6946
6947
6948 /* Empty the Tetris buffer, wait for 1s */
6949 do {
6950 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
6951 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
6952 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
6953 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
6954 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
6955 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
6956 ((port_is_idle_0 & 0x1) == 0x1) &&
6957 ((port_is_idle_1 & 0x1) == 0x1) &&
6958 (pgl_exp_rom2 == 0xffffffff))
6959 break;
6960 msleep(1);
6961 } while (cnt-- > 0);
6962
6963 if (cnt <= 0) {
6964 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
6965 " are still"
6966 " outstanding read requests after 1s!\n");
6967 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
6968 " port_is_idle_0=0x%08x,"
6969 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
6970 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
6971 pgl_exp_rom2);
6972 return -EAGAIN;
6973 }
6974
6975 barrier();
6976
6977 /* Close gates #2, #3 and #4 */
6978 bnx2x_set_234_gates(bp, true);
6979
6980 /* TBD: Indicate that "process kill" is in progress to MCP */
6981
6982 /* Clear "unprepared" bit */
6983 REG_WR(bp, MISC_REG_UNPREPARED, 0);
6984 barrier();
6985
6986 /* Make sure all is written to the chip before the reset */
6987 mmiowb();
6988
6989 /* Wait for 1ms to empty GLUE and PCI-E core queues,
6990 * PSWHST, GRC and PSWRD Tetris buffer.
6991 */
6992 msleep(1);
6993
6994 /* Prepare to chip reset: */
6995 /* MCP */
6996 bnx2x_reset_mcp_prep(bp, &val);
6997
6998 /* PXP */
6999 bnx2x_pxp_prep(bp);
7000 barrier();
7001
7002 /* reset the chip */
7003 bnx2x_process_kill_chip_reset(bp);
7004 barrier();
7005
7006 /* Recover after reset: */
7007 /* MCP */
7008 if (bnx2x_reset_mcp_comp(bp, val))
7009 return -EAGAIN;
7010
7011 /* PXP */
7012 bnx2x_pxp_prep(bp);
7013
7014 /* Open the gates #2, #3 and #4 */
7015 bnx2x_set_234_gates(bp, false);
7016
7017 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7018 * reset state, re-enable attentions. */
7019
a2fbb9ea
ET
7020 return 0;
7021}
7022
72fd0718
VZ
7023static int bnx2x_leader_reset(struct bnx2x *bp)
7024{
7025 int rc = 0;
7026 /* Try to recover after the failure */
7027 if (bnx2x_process_kill(bp)) {
7028 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7029 bp->dev->name);
7030 rc = -EAGAIN;
7031 goto exit_leader_reset;
7032 }
7033
7034 /* Clear "reset is in progress" bit and update the driver state */
7035 bnx2x_set_reset_done(bp);
7036 bp->recovery_state = BNX2X_RECOVERY_DONE;
7037
7038exit_leader_reset:
7039 bp->is_leader = 0;
7040 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7041 smp_wmb();
7042 return rc;
7043}
7044
72fd0718
VZ
7045/* Assumption: runs under rtnl lock. This together with the fact
7046 * that it's called only from bnx2x_reset_task() ensure that it
7047 * will never be called when netif_running(bp->dev) is false.
7048 */
7049static void bnx2x_parity_recover(struct bnx2x *bp)
7050{
7051 DP(NETIF_MSG_HW, "Handling parity\n");
7052 while (1) {
7053 switch (bp->recovery_state) {
7054 case BNX2X_RECOVERY_INIT:
7055 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7056 /* Try to get a LEADER_LOCK HW lock */
7057 if (bnx2x_trylock_hw_lock(bp,
7058 HW_LOCK_RESOURCE_RESERVED_08))
7059 bp->is_leader = 1;
7060
7061 /* Stop the driver */
7062 /* If interface has been removed - break */
7063 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7064 return;
7065
7066 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7067 /* Ensure "is_leader" and "recovery_state"
7068 * update values are seen on other CPUs
7069 */
7070 smp_wmb();
7071 break;
7072
7073 case BNX2X_RECOVERY_WAIT:
7074 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7075 if (bp->is_leader) {
7076 u32 load_counter = bnx2x_get_load_cnt(bp);
7077 if (load_counter) {
7078 /* Wait until all other functions get
7079 * down.
7080 */
7081 schedule_delayed_work(&bp->reset_task,
7082 HZ/10);
7083 return;
7084 } else {
7085 /* If all other functions got down -
7086 * try to bring the chip back to
7087 * normal. In any case it's an exit
7088 * point for a leader.
7089 */
7090 if (bnx2x_leader_reset(bp) ||
7091 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7092 printk(KERN_ERR"%s: Recovery "
7093 "has failed. Power cycle is "
7094 "needed.\n", bp->dev->name);
7095 /* Disconnect this device */
7096 netif_device_detach(bp->dev);
7097 /* Block ifup for all function
7098 * of this ASIC until
7099 * "process kill" or power
7100 * cycle.
7101 */
7102 bnx2x_set_reset_in_progress(bp);
7103 /* Shut down the power */
7104 bnx2x_set_power_state(bp,
7105 PCI_D3hot);
7106 return;
7107 }
7108
7109 return;
7110 }
7111 } else { /* non-leader */
7112 if (!bnx2x_reset_is_done(bp)) {
7113 /* Try to get a LEADER_LOCK HW lock as
7114 * long as a former leader may have
7115 * been unloaded by the user or
7116 * released a leadership by another
7117 * reason.
7118 */
7119 if (bnx2x_trylock_hw_lock(bp,
7120 HW_LOCK_RESOURCE_RESERVED_08)) {
7121 /* I'm a leader now! Restart a
7122 * switch case.
7123 */
7124 bp->is_leader = 1;
7125 break;
7126 }
7127
7128 schedule_delayed_work(&bp->reset_task,
7129 HZ/10);
7130 return;
7131
7132 } else { /* A leader has completed
7133 * the "process kill". It's an exit
7134 * point for a non-leader.
7135 */
7136 bnx2x_nic_load(bp, LOAD_NORMAL);
7137 bp->recovery_state =
7138 BNX2X_RECOVERY_DONE;
7139 smp_wmb();
7140 return;
7141 }
7142 }
7143 default:
7144 return;
7145 }
7146 }
7147}
7148
7149/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7150 * scheduled on a general queue in order to prevent a dead lock.
7151 */
34f80b04
EG
7152static void bnx2x_reset_task(struct work_struct *work)
7153{
72fd0718 7154 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
7155
7156#ifdef BNX2X_STOP_ON_ERROR
7157 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7158 " so reset not done to allow debug dump,\n"
72fd0718 7159 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
7160 return;
7161#endif
7162
7163 rtnl_lock();
7164
7165 if (!netif_running(bp->dev))
7166 goto reset_task_exit;
7167
72fd0718
VZ
7168 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7169 bnx2x_parity_recover(bp);
7170 else {
7171 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7172 bnx2x_nic_load(bp, LOAD_NORMAL);
7173 }
34f80b04
EG
7174
7175reset_task_exit:
7176 rtnl_unlock();
7177}
7178
a2fbb9ea
ET
7179/* end of nic load/unload */
7180
a2fbb9ea
ET
7181/*
7182 * Init service functions
7183 */
7184
f1ef27ef
EG
7185static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7186{
7187 switch (func) {
7188 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7189 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7190 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7191 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7192 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7193 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7194 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7195 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7196 default:
7197 BNX2X_ERR("Unsupported function index: %d\n", func);
7198 return (u32)(-1);
7199 }
7200}
7201
7202static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7203{
7204 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7205
7206 /* Flush all outstanding writes */
7207 mmiowb();
7208
7209 /* Pretend to be function 0 */
7210 REG_WR(bp, reg, 0);
7211 /* Flush the GRC transaction (in the chip) */
7212 new_val = REG_RD(bp, reg);
7213 if (new_val != 0) {
7214 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7215 new_val);
7216 BUG();
7217 }
7218
7219 /* From now we are in the "like-E1" mode */
7220 bnx2x_int_disable(bp);
7221
7222 /* Flush all outstanding writes */
7223 mmiowb();
7224
7225 /* Restore the original funtion settings */
7226 REG_WR(bp, reg, orig_func);
7227 new_val = REG_RD(bp, reg);
7228 if (new_val != orig_func) {
7229 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7230 orig_func, new_val);
7231 BUG();
7232 }
7233}
7234
7235static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7236{
7237 if (CHIP_IS_E1H(bp))
7238 bnx2x_undi_int_disable_e1h(bp, func);
7239 else
7240 bnx2x_int_disable(bp);
7241}
7242
34f80b04
EG
7243static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7244{
7245 u32 val;
7246
7247 /* Check if there is any driver already loaded */
7248 val = REG_RD(bp, MISC_REG_UNPREPARED);
7249 if (val == 0x1) {
7250 /* Check if it is the UNDI driver
7251 * UNDI driver initializes CID offset for normal bell to 0x7
7252 */
4a37fb66 7253 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7254 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7255 if (val == 0x7) {
7256 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7257 /* save our func */
34f80b04 7258 int func = BP_FUNC(bp);
da5a662a
VZ
7259 u32 swap_en;
7260 u32 swap_val;
34f80b04 7261
b4661739
EG
7262 /* clear the UNDI indication */
7263 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7264
34f80b04
EG
7265 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7266
7267 /* try unload UNDI on port 0 */
7268 bp->func = 0;
da5a662a
VZ
7269 bp->fw_seq =
7270 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7271 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7272 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7273
7274 /* if UNDI is loaded on the other port */
7275 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7276
da5a662a
VZ
7277 /* send "DONE" for previous unload */
7278 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7279
7280 /* unload UNDI on port 1 */
34f80b04 7281 bp->func = 1;
da5a662a
VZ
7282 bp->fw_seq =
7283 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7284 DRV_MSG_SEQ_NUMBER_MASK);
7285 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7286
7287 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7288 }
7289
b4661739
EG
7290 /* now it's safe to release the lock */
7291 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7292
f1ef27ef 7293 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7294
7295 /* close input traffic and wait for it */
7296 /* Do not rcv packets to BRB */
7297 REG_WR(bp,
7298 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7299 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7300 /* Do not direct rcv packets that are not for MCP to
7301 * the BRB */
7302 REG_WR(bp,
7303 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7304 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7305 /* clear AEU */
7306 REG_WR(bp,
7307 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7308 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7309 msleep(10);
7310
7311 /* save NIG port swap info */
7312 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7313 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7314 /* reset device */
7315 REG_WR(bp,
7316 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7317 0xd3ffffff);
34f80b04
EG
7318 REG_WR(bp,
7319 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7320 0x1403);
da5a662a
VZ
7321 /* take the NIG out of reset and restore swap values */
7322 REG_WR(bp,
7323 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7324 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7325 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7326 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7327
7328 /* send unload done to the MCP */
7329 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7330
7331 /* restore our func and fw_seq */
7332 bp->func = func;
7333 bp->fw_seq =
7334 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7335 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7336
7337 } else
7338 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7339 }
7340}
7341
7342static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7343{
7344 u32 val, val2, val3, val4, id;
72ce58c3 7345 u16 pmc;
34f80b04
EG
7346
7347 /* Get the chip revision id and number. */
7348 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7349 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7350 id = ((val & 0xffff) << 16);
7351 val = REG_RD(bp, MISC_REG_CHIP_REV);
7352 id |= ((val & 0xf) << 12);
7353 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7354 id |= ((val & 0xff) << 4);
5a40e08e 7355 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7356 id |= (val & 0xf);
7357 bp->common.chip_id = id;
7358 bp->link_params.chip_id = bp->common.chip_id;
7359 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7360
1c06328c
EG
7361 val = (REG_RD(bp, 0x2874) & 0x55);
7362 if ((bp->common.chip_id & 0x1) ||
7363 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7364 bp->flags |= ONE_PORT_FLAG;
7365 BNX2X_DEV_INFO("single port device\n");
7366 }
7367
34f80b04
EG
7368 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7369 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7370 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7371 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7372 bp->common.flash_size, bp->common.flash_size);
7373
7374 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 7375 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 7376 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
7377 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7378 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
7379
7380 if (!bp->common.shmem_base ||
7381 (bp->common.shmem_base < 0xA0000) ||
7382 (bp->common.shmem_base >= 0xC0000)) {
7383 BNX2X_DEV_INFO("MCP not active\n");
7384 bp->flags |= NO_MCP_FLAG;
7385 return;
7386 }
7387
7388 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7389 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7390 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 7391 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
7392
7393 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7394 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7395
7396 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7397 SHARED_HW_CFG_LED_MODE_MASK) >>
7398 SHARED_HW_CFG_LED_MODE_SHIFT);
7399
c2c8b03e
EG
7400 bp->link_params.feature_config_flags = 0;
7401 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7402 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7403 bp->link_params.feature_config_flags |=
7404 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7405 else
7406 bp->link_params.feature_config_flags &=
7407 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7408
34f80b04
EG
7409 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7410 bp->common.bc_ver = val;
7411 BNX2X_DEV_INFO("bc_ver %X\n", val);
7412 if (val < BNX2X_BC_VER) {
7413 /* for now only warn
7414 * later we might need to enforce this */
cdaa7cb8
VZ
7415 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
7416 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 7417 }
4d295db0
EG
7418 bp->link_params.feature_config_flags |=
7419 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7420 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
7421
7422 if (BP_E1HVN(bp) == 0) {
7423 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7424 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7425 } else {
7426 /* no WOL capability for E1HVN != 0 */
7427 bp->flags |= NO_WOL_FLAG;
7428 }
7429 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7430 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7431
7432 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7433 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7434 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7435 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7436
cdaa7cb8
VZ
7437 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7438 val, val2, val3, val4);
34f80b04
EG
7439}
7440
7441static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7442 u32 switch_cfg)
a2fbb9ea 7443{
34f80b04 7444 int port = BP_PORT(bp);
a2fbb9ea
ET
7445 u32 ext_phy_type;
7446
a2fbb9ea
ET
7447 switch (switch_cfg) {
7448 case SWITCH_CFG_1G:
7449 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7450
c18487ee
YR
7451 ext_phy_type =
7452 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7453 switch (ext_phy_type) {
7454 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7455 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7456 ext_phy_type);
7457
34f80b04
EG
7458 bp->port.supported |= (SUPPORTED_10baseT_Half |
7459 SUPPORTED_10baseT_Full |
7460 SUPPORTED_100baseT_Half |
7461 SUPPORTED_100baseT_Full |
7462 SUPPORTED_1000baseT_Full |
7463 SUPPORTED_2500baseX_Full |
7464 SUPPORTED_TP |
7465 SUPPORTED_FIBRE |
7466 SUPPORTED_Autoneg |
7467 SUPPORTED_Pause |
7468 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7469 break;
7470
7471 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7472 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7473 ext_phy_type);
7474
34f80b04
EG
7475 bp->port.supported |= (SUPPORTED_10baseT_Half |
7476 SUPPORTED_10baseT_Full |
7477 SUPPORTED_100baseT_Half |
7478 SUPPORTED_100baseT_Full |
7479 SUPPORTED_1000baseT_Full |
7480 SUPPORTED_TP |
7481 SUPPORTED_FIBRE |
7482 SUPPORTED_Autoneg |
7483 SUPPORTED_Pause |
7484 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7485 break;
7486
7487 default:
7488 BNX2X_ERR("NVRAM config error. "
7489 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7490 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7491 return;
7492 }
7493
34f80b04
EG
7494 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7495 port*0x10);
7496 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7497 break;
7498
7499 case SWITCH_CFG_10G:
7500 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7501
c18487ee
YR
7502 ext_phy_type =
7503 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7504 switch (ext_phy_type) {
7505 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7506 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7507 ext_phy_type);
7508
34f80b04
EG
7509 bp->port.supported |= (SUPPORTED_10baseT_Half |
7510 SUPPORTED_10baseT_Full |
7511 SUPPORTED_100baseT_Half |
7512 SUPPORTED_100baseT_Full |
7513 SUPPORTED_1000baseT_Full |
7514 SUPPORTED_2500baseX_Full |
7515 SUPPORTED_10000baseT_Full |
7516 SUPPORTED_TP |
7517 SUPPORTED_FIBRE |
7518 SUPPORTED_Autoneg |
7519 SUPPORTED_Pause |
7520 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7521 break;
7522
589abe3a
EG
7523 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7524 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7525 ext_phy_type);
f1410647 7526
34f80b04 7527 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7528 SUPPORTED_1000baseT_Full |
34f80b04 7529 SUPPORTED_FIBRE |
589abe3a 7530 SUPPORTED_Autoneg |
34f80b04
EG
7531 SUPPORTED_Pause |
7532 SUPPORTED_Asym_Pause);
f1410647
ET
7533 break;
7534
589abe3a
EG
7535 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7536 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7537 ext_phy_type);
7538
34f80b04 7539 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7540 SUPPORTED_2500baseX_Full |
34f80b04 7541 SUPPORTED_1000baseT_Full |
589abe3a
EG
7542 SUPPORTED_FIBRE |
7543 SUPPORTED_Autoneg |
7544 SUPPORTED_Pause |
7545 SUPPORTED_Asym_Pause);
7546 break;
7547
7548 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7549 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7550 ext_phy_type);
7551
7552 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7553 SUPPORTED_FIBRE |
7554 SUPPORTED_Pause |
7555 SUPPORTED_Asym_Pause);
f1410647
ET
7556 break;
7557
589abe3a
EG
7558 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7559 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7560 ext_phy_type);
7561
34f80b04
EG
7562 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7563 SUPPORTED_1000baseT_Full |
7564 SUPPORTED_FIBRE |
34f80b04
EG
7565 SUPPORTED_Pause |
7566 SUPPORTED_Asym_Pause);
f1410647
ET
7567 break;
7568
589abe3a
EG
7569 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7570 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7571 ext_phy_type);
7572
34f80b04 7573 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7574 SUPPORTED_1000baseT_Full |
34f80b04 7575 SUPPORTED_Autoneg |
589abe3a 7576 SUPPORTED_FIBRE |
34f80b04
EG
7577 SUPPORTED_Pause |
7578 SUPPORTED_Asym_Pause);
c18487ee
YR
7579 break;
7580
4d295db0
EG
7581 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7582 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7583 ext_phy_type);
7584
7585 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7586 SUPPORTED_1000baseT_Full |
7587 SUPPORTED_Autoneg |
7588 SUPPORTED_FIBRE |
7589 SUPPORTED_Pause |
7590 SUPPORTED_Asym_Pause);
7591 break;
7592
f1410647
ET
7593 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7594 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7595 ext_phy_type);
7596
34f80b04
EG
7597 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7598 SUPPORTED_TP |
7599 SUPPORTED_Autoneg |
7600 SUPPORTED_Pause |
7601 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7602 break;
7603
28577185
EG
7604 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7605 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7606 ext_phy_type);
7607
7608 bp->port.supported |= (SUPPORTED_10baseT_Half |
7609 SUPPORTED_10baseT_Full |
7610 SUPPORTED_100baseT_Half |
7611 SUPPORTED_100baseT_Full |
7612 SUPPORTED_1000baseT_Full |
7613 SUPPORTED_10000baseT_Full |
7614 SUPPORTED_TP |
7615 SUPPORTED_Autoneg |
7616 SUPPORTED_Pause |
7617 SUPPORTED_Asym_Pause);
7618 break;
7619
c18487ee
YR
7620 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7621 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7622 bp->link_params.ext_phy_config);
7623 break;
7624
a2fbb9ea
ET
7625 default:
7626 BNX2X_ERR("NVRAM config error. "
7627 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7628 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7629 return;
7630 }
7631
34f80b04
EG
7632 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7633 port*0x18);
7634 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7635
a2fbb9ea
ET
7636 break;
7637
7638 default:
7639 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7640 bp->port.link_config);
a2fbb9ea
ET
7641 return;
7642 }
34f80b04 7643 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7644
7645 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7646 if (!(bp->link_params.speed_cap_mask &
7647 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7648 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7649
c18487ee
YR
7650 if (!(bp->link_params.speed_cap_mask &
7651 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7652 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7653
c18487ee
YR
7654 if (!(bp->link_params.speed_cap_mask &
7655 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7656 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7657
c18487ee
YR
7658 if (!(bp->link_params.speed_cap_mask &
7659 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7660 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7661
c18487ee
YR
7662 if (!(bp->link_params.speed_cap_mask &
7663 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7664 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7665 SUPPORTED_1000baseT_Full);
a2fbb9ea 7666
c18487ee
YR
7667 if (!(bp->link_params.speed_cap_mask &
7668 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7669 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7670
c18487ee
YR
7671 if (!(bp->link_params.speed_cap_mask &
7672 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7673 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7674
34f80b04 7675 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7676}
7677
34f80b04 7678static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7679{
c18487ee 7680 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7681
34f80b04 7682 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7683 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7684 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7685 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7686 bp->port.advertising = bp->port.supported;
a2fbb9ea 7687 } else {
c18487ee
YR
7688 u32 ext_phy_type =
7689 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7690
7691 if ((ext_phy_type ==
7692 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7693 (ext_phy_type ==
7694 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7695 /* force 10G, no AN */
c18487ee 7696 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7697 bp->port.advertising =
a2fbb9ea
ET
7698 (ADVERTISED_10000baseT_Full |
7699 ADVERTISED_FIBRE);
7700 break;
7701 }
7702 BNX2X_ERR("NVRAM config error. "
7703 "Invalid link_config 0x%x"
7704 " Autoneg not supported\n",
34f80b04 7705 bp->port.link_config);
a2fbb9ea
ET
7706 return;
7707 }
7708 break;
7709
7710 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7711 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7712 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7713 bp->port.advertising = (ADVERTISED_10baseT_Full |
7714 ADVERTISED_TP);
a2fbb9ea 7715 } else {
cdaa7cb8
VZ
7716 BNX2X_ERROR("NVRAM config error. "
7717 "Invalid link_config 0x%x"
7718 " speed_cap_mask 0x%x\n",
7719 bp->port.link_config,
7720 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7721 return;
7722 }
7723 break;
7724
7725 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7726 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7727 bp->link_params.req_line_speed = SPEED_10;
7728 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7729 bp->port.advertising = (ADVERTISED_10baseT_Half |
7730 ADVERTISED_TP);
a2fbb9ea 7731 } else {
cdaa7cb8
VZ
7732 BNX2X_ERROR("NVRAM config error. "
7733 "Invalid link_config 0x%x"
7734 " speed_cap_mask 0x%x\n",
7735 bp->port.link_config,
7736 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7737 return;
7738 }
7739 break;
7740
7741 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7742 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7743 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7744 bp->port.advertising = (ADVERTISED_100baseT_Full |
7745 ADVERTISED_TP);
a2fbb9ea 7746 } else {
cdaa7cb8
VZ
7747 BNX2X_ERROR("NVRAM config error. "
7748 "Invalid link_config 0x%x"
7749 " speed_cap_mask 0x%x\n",
7750 bp->port.link_config,
7751 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7752 return;
7753 }
7754 break;
7755
7756 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7757 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7758 bp->link_params.req_line_speed = SPEED_100;
7759 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7760 bp->port.advertising = (ADVERTISED_100baseT_Half |
7761 ADVERTISED_TP);
a2fbb9ea 7762 } else {
cdaa7cb8
VZ
7763 BNX2X_ERROR("NVRAM config error. "
7764 "Invalid link_config 0x%x"
7765 " speed_cap_mask 0x%x\n",
7766 bp->port.link_config,
7767 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7768 return;
7769 }
7770 break;
7771
7772 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7773 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7774 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7775 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7776 ADVERTISED_TP);
a2fbb9ea 7777 } else {
cdaa7cb8
VZ
7778 BNX2X_ERROR("NVRAM config error. "
7779 "Invalid link_config 0x%x"
7780 " speed_cap_mask 0x%x\n",
7781 bp->port.link_config,
7782 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7783 return;
7784 }
7785 break;
7786
7787 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7788 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7789 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7790 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7791 ADVERTISED_TP);
a2fbb9ea 7792 } else {
cdaa7cb8
VZ
7793 BNX2X_ERROR("NVRAM config error. "
7794 "Invalid link_config 0x%x"
7795 " speed_cap_mask 0x%x\n",
7796 bp->port.link_config,
7797 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7798 return;
7799 }
7800 break;
7801
7802 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7803 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7804 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7805 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7806 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7807 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7808 ADVERTISED_FIBRE);
a2fbb9ea 7809 } else {
cdaa7cb8
VZ
7810 BNX2X_ERROR("NVRAM config error. "
7811 "Invalid link_config 0x%x"
7812 " speed_cap_mask 0x%x\n",
7813 bp->port.link_config,
7814 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7815 return;
7816 }
7817 break;
7818
7819 default:
cdaa7cb8
VZ
7820 BNX2X_ERROR("NVRAM config error. "
7821 "BAD link speed link_config 0x%x\n",
7822 bp->port.link_config);
c18487ee 7823 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7824 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7825 break;
7826 }
a2fbb9ea 7827
34f80b04
EG
7828 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7829 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7830 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7831 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7832 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7833
c18487ee 7834 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7835 " advertising 0x%x\n",
c18487ee
YR
7836 bp->link_params.req_line_speed,
7837 bp->link_params.req_duplex,
34f80b04 7838 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7839}
7840
e665bfda
MC
7841static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
7842{
7843 mac_hi = cpu_to_be16(mac_hi);
7844 mac_lo = cpu_to_be32(mac_lo);
7845 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
7846 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
7847}
7848
34f80b04 7849static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7850{
34f80b04
EG
7851 int port = BP_PORT(bp);
7852 u32 val, val2;
589abe3a 7853 u32 config;
c2c8b03e 7854 u16 i;
01cd4528 7855 u32 ext_phy_type;
a2fbb9ea 7856
c18487ee 7857 bp->link_params.bp = bp;
34f80b04 7858 bp->link_params.port = port;
c18487ee 7859
c18487ee 7860 bp->link_params.lane_config =
a2fbb9ea 7861 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7862 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7863 SHMEM_RD(bp,
7864 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
7865 /* BCM8727_NOC => BCM8727 no over current */
7866 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
7867 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
7868 bp->link_params.ext_phy_config &=
7869 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7870 bp->link_params.ext_phy_config |=
7871 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
7872 bp->link_params.feature_config_flags |=
7873 FEATURE_CONFIG_BCM8727_NOC;
7874 }
7875
c18487ee 7876 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7877 SHMEM_RD(bp,
7878 dev_info.port_hw_config[port].speed_capability_mask);
7879
34f80b04 7880 bp->port.link_config =
a2fbb9ea
ET
7881 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7882
c2c8b03e
EG
7883 /* Get the 4 lanes xgxs config rx and tx */
7884 for (i = 0; i < 2; i++) {
7885 val = SHMEM_RD(bp,
7886 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
7887 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
7888 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
7889
7890 val = SHMEM_RD(bp,
7891 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
7892 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
7893 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
7894 }
7895
3ce2c3f9
EG
7896 /* If the device is capable of WoL, set the default state according
7897 * to the HW
7898 */
4d295db0 7899 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
7900 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
7901 (config & PORT_FEATURE_WOL_ENABLED));
7902
c2c8b03e
EG
7903 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
7904 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
7905 bp->link_params.lane_config,
7906 bp->link_params.ext_phy_config,
34f80b04 7907 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7908
4d295db0
EG
7909 bp->link_params.switch_cfg |= (bp->port.link_config &
7910 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 7911 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7912
7913 bnx2x_link_settings_requested(bp);
7914
01cd4528
EG
7915 /*
7916 * If connected directly, work with the internal PHY, otherwise, work
7917 * with the external PHY
7918 */
7919 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7920 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
7921 bp->mdio.prtad = bp->link_params.phy_addr;
7922
7923 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
7924 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
7925 bp->mdio.prtad =
659bc5c4 7926 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 7927
a2fbb9ea
ET
7928 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7929 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 7930 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
7931 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7932 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
7933
7934#ifdef BCM_CNIC
7935 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
7936 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
7937 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
7938#endif
34f80b04
EG
7939}
7940
7941static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7942{
7943 int func = BP_FUNC(bp);
7944 u32 val, val2;
7945 int rc = 0;
a2fbb9ea 7946
34f80b04 7947 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7948
34f80b04
EG
7949 bp->e1hov = 0;
7950 bp->e1hmf = 0;
2145a920 7951 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
7952 bp->mf_config =
7953 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7954
2691d51d 7955 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 7956 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 7957 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 7958 bp->e1hmf = 1;
2691d51d
EG
7959 BNX2X_DEV_INFO("%s function mode\n",
7960 IS_E1HMF(bp) ? "multi" : "single");
7961
7962 if (IS_E1HMF(bp)) {
7963 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
7964 e1hov_tag) &
7965 FUNC_MF_CFG_E1HOV_TAG_MASK);
7966 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7967 bp->e1hov = val;
7968 BNX2X_DEV_INFO("E1HOV for func %d is %d "
7969 "(0x%04x)\n",
7970 func, bp->e1hov, bp->e1hov);
7971 } else {
cdaa7cb8
VZ
7972 BNX2X_ERROR("No valid E1HOV for func %d,"
7973 " aborting\n", func);
34f80b04
EG
7974 rc = -EPERM;
7975 }
2691d51d
EG
7976 } else {
7977 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
7978 BNX2X_ERROR("VN %d in single function mode,"
7979 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
7980 rc = -EPERM;
7981 }
34f80b04
EG
7982 }
7983 }
a2fbb9ea 7984
34f80b04
EG
7985 if (!BP_NOMCP(bp)) {
7986 bnx2x_get_port_hwinfo(bp);
7987
7988 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7989 DRV_MSG_SEQ_NUMBER_MASK);
7990 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7991 }
7992
7993 if (IS_E1HMF(bp)) {
7994 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7995 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7996 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7997 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7998 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7999 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8000 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8001 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8002 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8003 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8004 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8005 ETH_ALEN);
8006 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8007 ETH_ALEN);
a2fbb9ea 8008 }
34f80b04
EG
8009
8010 return rc;
a2fbb9ea
ET
8011 }
8012
34f80b04
EG
8013 if (BP_NOMCP(bp)) {
8014 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 8015 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
8016 random_ether_addr(bp->dev->dev_addr);
8017 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8018 }
a2fbb9ea 8019
34f80b04
EG
8020 return rc;
8021}
8022
34f24c7f
VZ
8023static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8024{
8025 int cnt, i, block_end, rodi;
8026 char vpd_data[BNX2X_VPD_LEN+1];
8027 char str_id_reg[VENDOR_ID_LEN+1];
8028 char str_id_cap[VENDOR_ID_LEN+1];
8029 u8 len;
8030
8031 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8032 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8033
8034 if (cnt < BNX2X_VPD_LEN)
8035 goto out_not_found;
8036
8037 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8038 PCI_VPD_LRDT_RO_DATA);
8039 if (i < 0)
8040 goto out_not_found;
8041
8042
8043 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8044 pci_vpd_lrdt_size(&vpd_data[i]);
8045
8046 i += PCI_VPD_LRDT_TAG_SIZE;
8047
8048 if (block_end > BNX2X_VPD_LEN)
8049 goto out_not_found;
8050
8051 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8052 PCI_VPD_RO_KEYWORD_MFR_ID);
8053 if (rodi < 0)
8054 goto out_not_found;
8055
8056 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8057
8058 if (len != VENDOR_ID_LEN)
8059 goto out_not_found;
8060
8061 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8062
8063 /* vendor specific info */
8064 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8065 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8066 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8067 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8068
8069 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8070 PCI_VPD_RO_KEYWORD_VENDOR0);
8071 if (rodi >= 0) {
8072 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8073
8074 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8075
8076 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8077 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8078 bp->fw_ver[len] = ' ';
8079 }
8080 }
8081 return;
8082 }
8083out_not_found:
8084 return;
8085}
8086
34f80b04
EG
8087static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8088{
8089 int func = BP_FUNC(bp);
87942b46 8090 int timer_interval;
34f80b04
EG
8091 int rc;
8092
da5a662a
VZ
8093 /* Disable interrupt handling until HW is initialized */
8094 atomic_set(&bp->intr_sem, 1);
e1510706 8095 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8096
34f80b04 8097 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8098 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
8099#ifdef BCM_CNIC
8100 mutex_init(&bp->cnic_mutex);
8101#endif
a2fbb9ea 8102
1cf167f2 8103 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 8104 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
8105
8106 rc = bnx2x_get_hwinfo(bp);
8107
34f24c7f 8108 bnx2x_read_fwinfo(bp);
34f80b04
EG
8109 /* need to reset chip if undi was active */
8110 if (!BP_NOMCP(bp))
8111 bnx2x_undi_unload(bp);
8112
8113 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 8114 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
8115
8116 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
8117 dev_err(&bp->pdev->dev, "MCP disabled, "
8118 "must load devices in order!\n");
34f80b04 8119
555f6c78 8120 /* Set multi queue mode */
8badd27a
EG
8121 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8122 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
8123 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8124 "requested is not MSI-X\n");
555f6c78
EG
8125 multi_mode = ETH_RSS_MODE_DISABLED;
8126 }
8127 bp->multi_mode = multi_mode;
5d7cd496 8128 bp->int_mode = int_mode;
555f6c78 8129
4fd89b7a
DK
8130 bp->dev->features |= NETIF_F_GRO;
8131
7a9b2557
VZ
8132 /* Set TPA flags */
8133 if (disable_tpa) {
8134 bp->flags &= ~TPA_ENABLE_FLAG;
8135 bp->dev->features &= ~NETIF_F_LRO;
8136 } else {
8137 bp->flags |= TPA_ENABLE_FLAG;
8138 bp->dev->features |= NETIF_F_LRO;
8139 }
5d7cd496 8140 bp->disable_tpa = disable_tpa;
7a9b2557 8141
a18f5128
EG
8142 if (CHIP_IS_E1(bp))
8143 bp->dropless_fc = 0;
8144 else
8145 bp->dropless_fc = dropless_fc;
8146
8d5726c4 8147 bp->mrrs = mrrs;
7a9b2557 8148
34f80b04
EG
8149 bp->tx_ring_size = MAX_TX_AVAIL;
8150 bp->rx_ring_size = MAX_RX_AVAIL;
8151
8152 bp->rx_csum = 1;
34f80b04 8153
7d323bfd
EG
8154 /* make sure that the numbers are in the right granularity */
8155 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8156 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 8157
87942b46
EG
8158 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8159 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8160
8161 init_timer(&bp->timer);
8162 bp->timer.expires = jiffies + bp->current_interval;
8163 bp->timer.data = (unsigned long) bp;
8164 bp->timer.function = bnx2x_timer;
8165
8166 return rc;
a2fbb9ea
ET
8167}
8168
a2fbb9ea 8169
de0c62db
DK
8170/****************************************************************************
8171* General service functions
8172****************************************************************************/
a2fbb9ea 8173
bb2a0f7a 8174/* called with rtnl_lock */
a2fbb9ea
ET
8175static int bnx2x_open(struct net_device *dev)
8176{
8177 struct bnx2x *bp = netdev_priv(dev);
8178
6eccabb3
EG
8179 netif_carrier_off(dev);
8180
a2fbb9ea
ET
8181 bnx2x_set_power_state(bp, PCI_D0);
8182
72fd0718
VZ
8183 if (!bnx2x_reset_is_done(bp)) {
8184 do {
8185 /* Reset MCP mail box sequence if there is on going
8186 * recovery
8187 */
8188 bp->fw_seq = 0;
8189
8190 /* If it's the first function to load and reset done
8191 * is still not cleared it may mean that. We don't
8192 * check the attention state here because it may have
8193 * already been cleared by a "common" reset but we
8194 * shell proceed with "process kill" anyway.
8195 */
8196 if ((bnx2x_get_load_cnt(bp) == 0) &&
8197 bnx2x_trylock_hw_lock(bp,
8198 HW_LOCK_RESOURCE_RESERVED_08) &&
8199 (!bnx2x_leader_reset(bp))) {
8200 DP(NETIF_MSG_HW, "Recovered in open\n");
8201 break;
8202 }
8203
8204 bnx2x_set_power_state(bp, PCI_D3hot);
8205
8206 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8207 " completed yet. Try again later. If u still see this"
8208 " message after a few retries then power cycle is"
8209 " required.\n", bp->dev->name);
8210
8211 return -EAGAIN;
8212 } while (0);
8213 }
8214
8215 bp->recovery_state = BNX2X_RECOVERY_DONE;
8216
bb2a0f7a 8217 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8218}
8219
bb2a0f7a 8220/* called with rtnl_lock */
a2fbb9ea
ET
8221static int bnx2x_close(struct net_device *dev)
8222{
a2fbb9ea
ET
8223 struct bnx2x *bp = netdev_priv(dev);
8224
8225 /* Unload the driver, release IRQs */
bb2a0f7a 8226 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 8227 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8228
8229 return 0;
8230}
8231
f5372251 8232/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 8233void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
8234{
8235 struct bnx2x *bp = netdev_priv(dev);
8236 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8237 int port = BP_PORT(bp);
8238
8239 if (bp->state != BNX2X_STATE_OPEN) {
8240 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8241 return;
8242 }
8243
8244 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8245
8246 if (dev->flags & IFF_PROMISC)
8247 rx_mode = BNX2X_RX_MODE_PROMISC;
8248
8249 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
8250 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8251 CHIP_IS_E1(bp)))
34f80b04
EG
8252 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8253
8254 else { /* some multicasts */
8255 if (CHIP_IS_E1(bp)) {
8256 int i, old, offset;
22bedad3 8257 struct netdev_hw_addr *ha;
34f80b04
EG
8258 struct mac_configuration_cmd *config =
8259 bnx2x_sp(bp, mcast_config);
8260
0ddf477b 8261 i = 0;
22bedad3 8262 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
8263 config->config_table[i].
8264 cam_entry.msb_mac_addr =
22bedad3 8265 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
8266 config->config_table[i].
8267 cam_entry.middle_mac_addr =
22bedad3 8268 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
8269 config->config_table[i].
8270 cam_entry.lsb_mac_addr =
22bedad3 8271 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
8272 config->config_table[i].cam_entry.flags =
8273 cpu_to_le16(port);
8274 config->config_table[i].
8275 target_table_entry.flags = 0;
ca00392c
EG
8276 config->config_table[i].target_table_entry.
8277 clients_bit_vector =
8278 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
8279 config->config_table[i].
8280 target_table_entry.vlan_id = 0;
8281
8282 DP(NETIF_MSG_IFUP,
8283 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
8284 config->config_table[i].
8285 cam_entry.msb_mac_addr,
8286 config->config_table[i].
8287 cam_entry.middle_mac_addr,
8288 config->config_table[i].
8289 cam_entry.lsb_mac_addr);
0ddf477b 8290 i++;
34f80b04 8291 }
8d9c5f34 8292 old = config->hdr.length;
34f80b04
EG
8293 if (old > i) {
8294 for (; i < old; i++) {
8295 if (CAM_IS_INVALID(config->
8296 config_table[i])) {
af246401 8297 /* already invalidated */
34f80b04
EG
8298 break;
8299 }
8300 /* invalidate */
8301 CAM_INVALIDATE(config->
8302 config_table[i]);
8303 }
8304 }
8305
8306 if (CHIP_REV_IS_SLOW(bp))
8307 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8308 else
8309 offset = BNX2X_MAX_MULTICAST*(1 + port);
8310
8d9c5f34 8311 config->hdr.length = i;
34f80b04 8312 config->hdr.offset = offset;
8d9c5f34 8313 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
8314 config->hdr.reserved1 = 0;
8315
e665bfda
MC
8316 bp->set_mac_pending++;
8317 smp_wmb();
8318
34f80b04
EG
8319 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8320 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8321 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
8322 0);
8323 } else { /* E1H */
8324 /* Accept one or more multicasts */
22bedad3 8325 struct netdev_hw_addr *ha;
34f80b04
EG
8326 u32 mc_filter[MC_HASH_SIZE];
8327 u32 crc, bit, regidx;
8328 int i;
8329
8330 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8331
22bedad3 8332 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 8333 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 8334 ha->addr);
34f80b04 8335
22bedad3 8336 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
8337 bit = (crc >> 24) & 0xff;
8338 regidx = bit >> 5;
8339 bit &= 0x1f;
8340 mc_filter[regidx] |= (1 << bit);
8341 }
8342
8343 for (i = 0; i < MC_HASH_SIZE; i++)
8344 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8345 mc_filter[i]);
8346 }
8347 }
8348
8349 bp->rx_mode = rx_mode;
8350 bnx2x_set_storm_rx_mode(bp);
8351}
8352
a2fbb9ea 8353
c18487ee 8354/* called with rtnl_lock */
01cd4528
EG
8355static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8356 int devad, u16 addr)
a2fbb9ea 8357{
01cd4528
EG
8358 struct bnx2x *bp = netdev_priv(netdev);
8359 u16 value;
8360 int rc;
8361 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 8362
01cd4528
EG
8363 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8364 prtad, devad, addr);
a2fbb9ea 8365
01cd4528
EG
8366 if (prtad != bp->mdio.prtad) {
8367 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
8368 prtad, bp->mdio.prtad);
8369 return -EINVAL;
8370 }
8371
8372 /* The HW expects different devad if CL22 is used */
8373 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 8374
01cd4528
EG
8375 bnx2x_acquire_phy_lock(bp);
8376 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
8377 devad, addr, &value);
8378 bnx2x_release_phy_lock(bp);
8379 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 8380
01cd4528
EG
8381 if (!rc)
8382 rc = value;
8383 return rc;
8384}
a2fbb9ea 8385
01cd4528
EG
8386/* called with rtnl_lock */
8387static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8388 u16 addr, u16 value)
8389{
8390 struct bnx2x *bp = netdev_priv(netdev);
8391 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8392 int rc;
8393
8394 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8395 " value 0x%x\n", prtad, devad, addr, value);
8396
8397 if (prtad != bp->mdio.prtad) {
8398 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
8399 prtad, bp->mdio.prtad);
8400 return -EINVAL;
a2fbb9ea
ET
8401 }
8402
01cd4528
EG
8403 /* The HW expects different devad if CL22 is used */
8404 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 8405
01cd4528
EG
8406 bnx2x_acquire_phy_lock(bp);
8407 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
8408 devad, addr, value);
8409 bnx2x_release_phy_lock(bp);
8410 return rc;
8411}
c18487ee 8412
01cd4528
EG
8413/* called with rtnl_lock */
8414static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8415{
8416 struct bnx2x *bp = netdev_priv(dev);
8417 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 8418
01cd4528
EG
8419 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8420 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 8421
01cd4528
EG
8422 if (!netif_running(dev))
8423 return -EAGAIN;
8424
8425 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
8426}
8427
257ddbda 8428#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
8429static void poll_bnx2x(struct net_device *dev)
8430{
8431 struct bnx2x *bp = netdev_priv(dev);
8432
8433 disable_irq(bp->pdev->irq);
8434 bnx2x_interrupt(bp->pdev->irq, dev);
8435 enable_irq(bp->pdev->irq);
8436}
8437#endif
8438
c64213cd
SH
8439static const struct net_device_ops bnx2x_netdev_ops = {
8440 .ndo_open = bnx2x_open,
8441 .ndo_stop = bnx2x_close,
8442 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 8443 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
8444 .ndo_set_mac_address = bnx2x_change_mac_addr,
8445 .ndo_validate_addr = eth_validate_addr,
8446 .ndo_do_ioctl = bnx2x_ioctl,
8447 .ndo_change_mtu = bnx2x_change_mtu,
8448 .ndo_tx_timeout = bnx2x_tx_timeout,
8449#ifdef BCM_VLAN
8450 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
8451#endif
257ddbda 8452#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
8453 .ndo_poll_controller = poll_bnx2x,
8454#endif
8455};
8456
34f80b04
EG
8457static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8458 struct net_device *dev)
a2fbb9ea
ET
8459{
8460 struct bnx2x *bp;
8461 int rc;
8462
8463 SET_NETDEV_DEV(dev, &pdev->dev);
8464 bp = netdev_priv(dev);
8465
34f80b04
EG
8466 bp->dev = dev;
8467 bp->pdev = pdev;
a2fbb9ea 8468 bp->flags = 0;
34f80b04 8469 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
8470
8471 rc = pci_enable_device(pdev);
8472 if (rc) {
cdaa7cb8
VZ
8473 dev_err(&bp->pdev->dev,
8474 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
8475 goto err_out;
8476 }
8477
8478 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8479 dev_err(&bp->pdev->dev,
8480 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
8481 rc = -ENODEV;
8482 goto err_out_disable;
8483 }
8484
8485 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8486 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8487 " base address, aborting\n");
a2fbb9ea
ET
8488 rc = -ENODEV;
8489 goto err_out_disable;
8490 }
8491
34f80b04
EG
8492 if (atomic_read(&pdev->enable_cnt) == 1) {
8493 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8494 if (rc) {
cdaa7cb8
VZ
8495 dev_err(&bp->pdev->dev,
8496 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
8497 goto err_out_disable;
8498 }
a2fbb9ea 8499
34f80b04
EG
8500 pci_set_master(pdev);
8501 pci_save_state(pdev);
8502 }
a2fbb9ea
ET
8503
8504 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8505 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
8506 dev_err(&bp->pdev->dev,
8507 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
8508 rc = -EIO;
8509 goto err_out_release;
8510 }
8511
8512 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8513 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
8514 dev_err(&bp->pdev->dev,
8515 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
8516 rc = -EIO;
8517 goto err_out_release;
8518 }
8519
1a983142 8520 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 8521 bp->flags |= USING_DAC_FLAG;
1a983142 8522 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
8523 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8524 " failed, aborting\n");
a2fbb9ea
ET
8525 rc = -EIO;
8526 goto err_out_release;
8527 }
8528
1a983142 8529 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
8530 dev_err(&bp->pdev->dev,
8531 "System does not support DMA, aborting\n");
a2fbb9ea
ET
8532 rc = -EIO;
8533 goto err_out_release;
8534 }
8535
34f80b04
EG
8536 dev->mem_start = pci_resource_start(pdev, 0);
8537 dev->base_addr = dev->mem_start;
8538 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
8539
8540 dev->irq = pdev->irq;
8541
275f165f 8542 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 8543 if (!bp->regview) {
cdaa7cb8
VZ
8544 dev_err(&bp->pdev->dev,
8545 "Cannot map register space, aborting\n");
a2fbb9ea
ET
8546 rc = -ENOMEM;
8547 goto err_out_release;
8548 }
8549
34f80b04
EG
8550 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
8551 min_t(u64, BNX2X_DB_SIZE,
8552 pci_resource_len(pdev, 2)));
a2fbb9ea 8553 if (!bp->doorbells) {
cdaa7cb8
VZ
8554 dev_err(&bp->pdev->dev,
8555 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
8556 rc = -ENOMEM;
8557 goto err_out_unmap;
8558 }
8559
8560 bnx2x_set_power_state(bp, PCI_D0);
8561
34f80b04
EG
8562 /* clean indirect addresses */
8563 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8564 PCICFG_VENDOR_ID_OFFSET);
8565 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8566 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8567 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8568 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 8569
72fd0718
VZ
8570 /* Reset the load counter */
8571 bnx2x_clear_load_cnt(bp);
8572
34f80b04 8573 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 8574
c64213cd 8575 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 8576 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
8577 dev->features |= NETIF_F_SG;
8578 dev->features |= NETIF_F_HW_CSUM;
8579 if (bp->flags & USING_DAC_FLAG)
8580 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
8581 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8582 dev->features |= NETIF_F_TSO6;
34f80b04
EG
8583#ifdef BCM_VLAN
8584 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 8585 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
8586
8587 dev->vlan_features |= NETIF_F_SG;
8588 dev->vlan_features |= NETIF_F_HW_CSUM;
8589 if (bp->flags & USING_DAC_FLAG)
8590 dev->vlan_features |= NETIF_F_HIGHDMA;
8591 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8592 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 8593#endif
a2fbb9ea 8594
01cd4528
EG
8595 /* get_port_hwinfo() will set prtad and mmds properly */
8596 bp->mdio.prtad = MDIO_PRTAD_NONE;
8597 bp->mdio.mmds = 0;
8598 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8599 bp->mdio.dev = dev;
8600 bp->mdio.mdio_read = bnx2x_mdio_read;
8601 bp->mdio.mdio_write = bnx2x_mdio_write;
8602
a2fbb9ea
ET
8603 return 0;
8604
8605err_out_unmap:
8606 if (bp->regview) {
8607 iounmap(bp->regview);
8608 bp->regview = NULL;
8609 }
a2fbb9ea
ET
8610 if (bp->doorbells) {
8611 iounmap(bp->doorbells);
8612 bp->doorbells = NULL;
8613 }
8614
8615err_out_release:
34f80b04
EG
8616 if (atomic_read(&pdev->enable_cnt) == 1)
8617 pci_release_regions(pdev);
a2fbb9ea
ET
8618
8619err_out_disable:
8620 pci_disable_device(pdev);
8621 pci_set_drvdata(pdev, NULL);
8622
8623err_out:
8624 return rc;
8625}
8626
37f9ce62
EG
8627static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8628 int *width, int *speed)
25047950
ET
8629{
8630 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8631
37f9ce62 8632 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 8633
37f9ce62
EG
8634 /* return value of 1=2.5GHz 2=5GHz */
8635 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 8636}
37f9ce62 8637
94a78b79
VZ
8638static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
8639{
37f9ce62 8640 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
8641 struct bnx2x_fw_file_hdr *fw_hdr;
8642 struct bnx2x_fw_file_section *sections;
94a78b79 8643 u32 offset, len, num_ops;
37f9ce62 8644 u16 *ops_offsets;
94a78b79 8645 int i;
37f9ce62 8646 const u8 *fw_ver;
94a78b79
VZ
8647
8648 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8649 return -EINVAL;
8650
8651 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8652 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8653
8654 /* Make sure none of the offsets and sizes make us read beyond
8655 * the end of the firmware data */
8656 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8657 offset = be32_to_cpu(sections[i].offset);
8658 len = be32_to_cpu(sections[i].len);
8659 if (offset + len > firmware->size) {
cdaa7cb8
VZ
8660 dev_err(&bp->pdev->dev,
8661 "Section %d length is out of bounds\n", i);
94a78b79
VZ
8662 return -EINVAL;
8663 }
8664 }
8665
8666 /* Likewise for the init_ops offsets */
8667 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8668 ops_offsets = (u16 *)(firmware->data + offset);
8669 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8670
8671 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8672 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
8673 dev_err(&bp->pdev->dev,
8674 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
8675 return -EINVAL;
8676 }
8677 }
8678
8679 /* Check FW version */
8680 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8681 fw_ver = firmware->data + offset;
8682 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8683 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8684 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8685 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
8686 dev_err(&bp->pdev->dev,
8687 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
8688 fw_ver[0], fw_ver[1], fw_ver[2],
8689 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8690 BCM_5710_FW_MINOR_VERSION,
8691 BCM_5710_FW_REVISION_VERSION,
8692 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 8693 return -EINVAL;
94a78b79
VZ
8694 }
8695
8696 return 0;
8697}
8698
ab6ad5a4 8699static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 8700{
ab6ad5a4
EG
8701 const __be32 *source = (const __be32 *)_source;
8702 u32 *target = (u32 *)_target;
94a78b79 8703 u32 i;
94a78b79
VZ
8704
8705 for (i = 0; i < n/4; i++)
8706 target[i] = be32_to_cpu(source[i]);
8707}
8708
8709/*
8710 Ops array is stored in the following format:
8711 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8712 */
ab6ad5a4 8713static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 8714{
ab6ad5a4
EG
8715 const __be32 *source = (const __be32 *)_source;
8716 struct raw_op *target = (struct raw_op *)_target;
94a78b79 8717 u32 i, j, tmp;
94a78b79 8718
ab6ad5a4 8719 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
8720 tmp = be32_to_cpu(source[j]);
8721 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
8722 target[i].offset = tmp & 0xffffff;
8723 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
8724 }
8725}
ab6ad5a4
EG
8726
8727static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 8728{
ab6ad5a4
EG
8729 const __be16 *source = (const __be16 *)_source;
8730 u16 *target = (u16 *)_target;
94a78b79 8731 u32 i;
94a78b79
VZ
8732
8733 for (i = 0; i < n/2; i++)
8734 target[i] = be16_to_cpu(source[i]);
8735}
8736
7995c64e
JP
8737#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8738do { \
8739 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8740 bp->arr = kmalloc(len, GFP_KERNEL); \
8741 if (!bp->arr) { \
8742 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8743 goto lbl; \
8744 } \
8745 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8746 (u8 *)bp->arr, len); \
8747} while (0)
94a78b79 8748
94a78b79
VZ
8749static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
8750{
45229b42 8751 const char *fw_file_name;
94a78b79 8752 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 8753 int rc;
94a78b79 8754
94a78b79 8755 if (CHIP_IS_E1(bp))
45229b42 8756 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 8757 else if (CHIP_IS_E1H(bp))
45229b42 8758 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8
VZ
8759 else {
8760 dev_err(dev, "Unsupported chip revision\n");
8761 return -EINVAL;
8762 }
94a78b79 8763
cdaa7cb8 8764 dev_info(dev, "Loading %s\n", fw_file_name);
94a78b79
VZ
8765
8766 rc = request_firmware(&bp->firmware, fw_file_name, dev);
8767 if (rc) {
cdaa7cb8 8768 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
8769 goto request_firmware_exit;
8770 }
8771
8772 rc = bnx2x_check_firmware(bp);
8773 if (rc) {
cdaa7cb8 8774 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
8775 goto request_firmware_exit;
8776 }
8777
8778 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8779
8780 /* Initialize the pointers to the init arrays */
8781 /* Blob */
8782 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8783
8784 /* Opcodes */
8785 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8786
8787 /* Offsets */
ab6ad5a4
EG
8788 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8789 be16_to_cpu_n);
94a78b79
VZ
8790
8791 /* STORMs firmware */
573f2035
EG
8792 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8793 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8794 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8795 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8796 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8797 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8798 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8799 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8800 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8801 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8802 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8803 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8804 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8805 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8806 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8807 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
8808
8809 return 0;
ab6ad5a4 8810
94a78b79
VZ
8811init_offsets_alloc_err:
8812 kfree(bp->init_ops);
8813init_ops_alloc_err:
8814 kfree(bp->init_data);
8815request_firmware_exit:
8816 release_firmware(bp->firmware);
8817
8818 return rc;
8819}
8820
8821
a2fbb9ea
ET
8822static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8823 const struct pci_device_id *ent)
8824{
a2fbb9ea
ET
8825 struct net_device *dev = NULL;
8826 struct bnx2x *bp;
37f9ce62 8827 int pcie_width, pcie_speed;
25047950 8828 int rc;
a2fbb9ea 8829
a2fbb9ea 8830 /* dev zeroed in init_etherdev */
555f6c78 8831 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 8832 if (!dev) {
cdaa7cb8 8833 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 8834 return -ENOMEM;
34f80b04 8835 }
a2fbb9ea 8836
a2fbb9ea 8837 bp = netdev_priv(dev);
7995c64e 8838 bp->msg_enable = debug;
a2fbb9ea 8839
df4770de
EG
8840 pci_set_drvdata(pdev, dev);
8841
34f80b04 8842 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
8843 if (rc < 0) {
8844 free_netdev(dev);
8845 return rc;
8846 }
8847
34f80b04 8848 rc = bnx2x_init_bp(bp);
693fc0d1
EG
8849 if (rc)
8850 goto init_one_exit;
8851
94a78b79
VZ
8852 /* Set init arrays */
8853 rc = bnx2x_init_firmware(bp, &pdev->dev);
8854 if (rc) {
cdaa7cb8 8855 dev_err(&pdev->dev, "Error loading firmware\n");
94a78b79
VZ
8856 goto init_one_exit;
8857 }
8858
693fc0d1 8859 rc = register_netdev(dev);
34f80b04 8860 if (rc) {
693fc0d1 8861 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
8862 goto init_one_exit;
8863 }
8864
37f9ce62 8865 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
8866 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
8867 " IRQ %d, ", board_info[ent->driver_data].name,
8868 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
8869 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
8870 dev->base_addr, bp->pdev->irq);
8871 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 8872
a2fbb9ea 8873 return 0;
34f80b04
EG
8874
8875init_one_exit:
8876 if (bp->regview)
8877 iounmap(bp->regview);
8878
8879 if (bp->doorbells)
8880 iounmap(bp->doorbells);
8881
8882 free_netdev(dev);
8883
8884 if (atomic_read(&pdev->enable_cnt) == 1)
8885 pci_release_regions(pdev);
8886
8887 pci_disable_device(pdev);
8888 pci_set_drvdata(pdev, NULL);
8889
8890 return rc;
a2fbb9ea
ET
8891}
8892
8893static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8894{
8895 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
8896 struct bnx2x *bp;
8897
8898 if (!dev) {
cdaa7cb8 8899 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
8900 return;
8901 }
228241eb 8902 bp = netdev_priv(dev);
a2fbb9ea 8903
a2fbb9ea
ET
8904 unregister_netdev(dev);
8905
72fd0718
VZ
8906 /* Make sure RESET task is not scheduled before continuing */
8907 cancel_delayed_work_sync(&bp->reset_task);
8908
94a78b79
VZ
8909 kfree(bp->init_ops_offsets);
8910 kfree(bp->init_ops);
8911 kfree(bp->init_data);
8912 release_firmware(bp->firmware);
8913
a2fbb9ea
ET
8914 if (bp->regview)
8915 iounmap(bp->regview);
8916
8917 if (bp->doorbells)
8918 iounmap(bp->doorbells);
8919
8920 free_netdev(dev);
34f80b04
EG
8921
8922 if (atomic_read(&pdev->enable_cnt) == 1)
8923 pci_release_regions(pdev);
8924
a2fbb9ea
ET
8925 pci_disable_device(pdev);
8926 pci_set_drvdata(pdev, NULL);
8927}
8928
f8ef6e44
YG
8929static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
8930{
8931 int i;
8932
8933 bp->state = BNX2X_STATE_ERROR;
8934
8935 bp->rx_mode = BNX2X_RX_MODE_NONE;
8936
8937 bnx2x_netif_stop(bp, 0);
c89af1a3 8938 netif_carrier_off(bp->dev);
f8ef6e44
YG
8939
8940 del_timer_sync(&bp->timer);
8941 bp->stats_state = STATS_STATE_DISABLED;
8942 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
8943
8944 /* Release IRQs */
6cbe5065 8945 bnx2x_free_irq(bp, false);
f8ef6e44
YG
8946
8947 if (CHIP_IS_E1(bp)) {
8948 struct mac_configuration_cmd *config =
8949 bnx2x_sp(bp, mcast_config);
8950
8d9c5f34 8951 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
8952 CAM_INVALIDATE(config->config_table[i]);
8953 }
8954
8955 /* Free SKBs, SGEs, TPA pool and driver internals */
8956 bnx2x_free_skbs(bp);
54b9ddaa 8957 for_each_queue(bp, i)
f8ef6e44 8958 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8959 for_each_queue(bp, i)
7cde1c8b 8960 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
8961 bnx2x_free_mem(bp);
8962
8963 bp->state = BNX2X_STATE_CLOSED;
8964
f8ef6e44
YG
8965 return 0;
8966}
8967
8968static void bnx2x_eeh_recover(struct bnx2x *bp)
8969{
8970 u32 val;
8971
8972 mutex_init(&bp->port.phy_mutex);
8973
8974 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8975 bp->link_params.shmem_base = bp->common.shmem_base;
8976 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
8977
8978 if (!bp->common.shmem_base ||
8979 (bp->common.shmem_base < 0xA0000) ||
8980 (bp->common.shmem_base >= 0xC0000)) {
8981 BNX2X_DEV_INFO("MCP not active\n");
8982 bp->flags |= NO_MCP_FLAG;
8983 return;
8984 }
8985
8986 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8987 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8988 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8989 BNX2X_ERR("BAD MCP validity signature\n");
8990
8991 if (!BP_NOMCP(bp)) {
8992 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
8993 & DRV_MSG_SEQ_NUMBER_MASK);
8994 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8995 }
8996}
8997
493adb1f
WX
8998/**
8999 * bnx2x_io_error_detected - called when PCI error is detected
9000 * @pdev: Pointer to PCI device
9001 * @state: The current pci connection state
9002 *
9003 * This function is called after a PCI bus error affecting
9004 * this device has been detected.
9005 */
9006static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9007 pci_channel_state_t state)
9008{
9009 struct net_device *dev = pci_get_drvdata(pdev);
9010 struct bnx2x *bp = netdev_priv(dev);
9011
9012 rtnl_lock();
9013
9014 netif_device_detach(dev);
9015
07ce50e4
DN
9016 if (state == pci_channel_io_perm_failure) {
9017 rtnl_unlock();
9018 return PCI_ERS_RESULT_DISCONNECT;
9019 }
9020
493adb1f 9021 if (netif_running(dev))
f8ef6e44 9022 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
9023
9024 pci_disable_device(pdev);
9025
9026 rtnl_unlock();
9027
9028 /* Request a slot reset */
9029 return PCI_ERS_RESULT_NEED_RESET;
9030}
9031
9032/**
9033 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9034 * @pdev: Pointer to PCI device
9035 *
9036 * Restart the card from scratch, as if from a cold-boot.
9037 */
9038static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9039{
9040 struct net_device *dev = pci_get_drvdata(pdev);
9041 struct bnx2x *bp = netdev_priv(dev);
9042
9043 rtnl_lock();
9044
9045 if (pci_enable_device(pdev)) {
9046 dev_err(&pdev->dev,
9047 "Cannot re-enable PCI device after reset\n");
9048 rtnl_unlock();
9049 return PCI_ERS_RESULT_DISCONNECT;
9050 }
9051
9052 pci_set_master(pdev);
9053 pci_restore_state(pdev);
9054
9055 if (netif_running(dev))
9056 bnx2x_set_power_state(bp, PCI_D0);
9057
9058 rtnl_unlock();
9059
9060 return PCI_ERS_RESULT_RECOVERED;
9061}
9062
9063/**
9064 * bnx2x_io_resume - called when traffic can start flowing again
9065 * @pdev: Pointer to PCI device
9066 *
9067 * This callback is called when the error recovery driver tells us that
9068 * its OK to resume normal operation.
9069 */
9070static void bnx2x_io_resume(struct pci_dev *pdev)
9071{
9072 struct net_device *dev = pci_get_drvdata(pdev);
9073 struct bnx2x *bp = netdev_priv(dev);
9074
72fd0718
VZ
9075 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9076 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
9077 return;
9078 }
9079
493adb1f
WX
9080 rtnl_lock();
9081
f8ef6e44
YG
9082 bnx2x_eeh_recover(bp);
9083
493adb1f 9084 if (netif_running(dev))
f8ef6e44 9085 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
9086
9087 netif_device_attach(dev);
9088
9089 rtnl_unlock();
9090}
9091
9092static struct pci_error_handlers bnx2x_err_handler = {
9093 .error_detected = bnx2x_io_error_detected,
356e2385
EG
9094 .slot_reset = bnx2x_io_slot_reset,
9095 .resume = bnx2x_io_resume,
493adb1f
WX
9096};
9097
a2fbb9ea 9098static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
9099 .name = DRV_MODULE_NAME,
9100 .id_table = bnx2x_pci_tbl,
9101 .probe = bnx2x_init_one,
9102 .remove = __devexit_p(bnx2x_remove_one),
9103 .suspend = bnx2x_suspend,
9104 .resume = bnx2x_resume,
9105 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
9106};
9107
9108static int __init bnx2x_init(void)
9109{
dd21ca6d
SG
9110 int ret;
9111
7995c64e 9112 pr_info("%s", version);
938cf541 9113
1cf167f2
EG
9114 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9115 if (bnx2x_wq == NULL) {
7995c64e 9116 pr_err("Cannot create workqueue\n");
1cf167f2
EG
9117 return -ENOMEM;
9118 }
9119
dd21ca6d
SG
9120 ret = pci_register_driver(&bnx2x_pci_driver);
9121 if (ret) {
7995c64e 9122 pr_err("Cannot register driver\n");
dd21ca6d
SG
9123 destroy_workqueue(bnx2x_wq);
9124 }
9125 return ret;
a2fbb9ea
ET
9126}
9127
9128static void __exit bnx2x_cleanup(void)
9129{
9130 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
9131
9132 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
9133}
9134
9135module_init(bnx2x_init);
9136module_exit(bnx2x_cleanup);
9137
993ac7b5
MC
9138#ifdef BCM_CNIC
9139
9140/* count denotes the number of new completions we have seen */
9141static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9142{
9143 struct eth_spe *spe;
9144
9145#ifdef BNX2X_STOP_ON_ERROR
9146 if (unlikely(bp->panic))
9147 return;
9148#endif
9149
9150 spin_lock_bh(&bp->spq_lock);
9151 bp->cnic_spq_pending -= count;
9152
9153 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
9154 bp->cnic_spq_pending++) {
9155
9156 if (!bp->cnic_kwq_pending)
9157 break;
9158
9159 spe = bnx2x_sp_get_next(bp);
9160 *spe = *bp->cnic_kwq_cons;
9161
9162 bp->cnic_kwq_pending--;
9163
9164 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9165 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9166
9167 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9168 bp->cnic_kwq_cons = bp->cnic_kwq;
9169 else
9170 bp->cnic_kwq_cons++;
9171 }
9172 bnx2x_sp_prod_update(bp);
9173 spin_unlock_bh(&bp->spq_lock);
9174}
9175
9176static int bnx2x_cnic_sp_queue(struct net_device *dev,
9177 struct kwqe_16 *kwqes[], u32 count)
9178{
9179 struct bnx2x *bp = netdev_priv(dev);
9180 int i;
9181
9182#ifdef BNX2X_STOP_ON_ERROR
9183 if (unlikely(bp->panic))
9184 return -EIO;
9185#endif
9186
9187 spin_lock_bh(&bp->spq_lock);
9188
9189 for (i = 0; i < count; i++) {
9190 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9191
9192 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9193 break;
9194
9195 *bp->cnic_kwq_prod = *spe;
9196
9197 bp->cnic_kwq_pending++;
9198
9199 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9200 spe->hdr.conn_and_cmd_data, spe->hdr.type,
9201 spe->data.mac_config_addr.hi,
9202 spe->data.mac_config_addr.lo,
9203 bp->cnic_kwq_pending);
9204
9205 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9206 bp->cnic_kwq_prod = bp->cnic_kwq;
9207 else
9208 bp->cnic_kwq_prod++;
9209 }
9210
9211 spin_unlock_bh(&bp->spq_lock);
9212
9213 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9214 bnx2x_cnic_sp_post(bp, 0);
9215
9216 return i;
9217}
9218
9219static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9220{
9221 struct cnic_ops *c_ops;
9222 int rc = 0;
9223
9224 mutex_lock(&bp->cnic_mutex);
9225 c_ops = bp->cnic_ops;
9226 if (c_ops)
9227 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9228 mutex_unlock(&bp->cnic_mutex);
9229
9230 return rc;
9231}
9232
9233static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9234{
9235 struct cnic_ops *c_ops;
9236 int rc = 0;
9237
9238 rcu_read_lock();
9239 c_ops = rcu_dereference(bp->cnic_ops);
9240 if (c_ops)
9241 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9242 rcu_read_unlock();
9243
9244 return rc;
9245}
9246
9247/*
9248 * for commands that have no data
9249 */
9f6c9258 9250int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
9251{
9252 struct cnic_ctl_info ctl = {0};
9253
9254 ctl.cmd = cmd;
9255
9256 return bnx2x_cnic_ctl_send(bp, &ctl);
9257}
9258
9259static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9260{
9261 struct cnic_ctl_info ctl;
9262
9263 /* first we tell CNIC and only then we count this as a completion */
9264 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9265 ctl.data.comp.cid = cid;
9266
9267 bnx2x_cnic_ctl_send_bh(bp, &ctl);
9268 bnx2x_cnic_sp_post(bp, 1);
9269}
9270
9271static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9272{
9273 struct bnx2x *bp = netdev_priv(dev);
9274 int rc = 0;
9275
9276 switch (ctl->cmd) {
9277 case DRV_CTL_CTXTBL_WR_CMD: {
9278 u32 index = ctl->data.io.offset;
9279 dma_addr_t addr = ctl->data.io.dma_addr;
9280
9281 bnx2x_ilt_wr(bp, index, addr);
9282 break;
9283 }
9284
9285 case DRV_CTL_COMPLETION_CMD: {
9286 int count = ctl->data.comp.comp_count;
9287
9288 bnx2x_cnic_sp_post(bp, count);
9289 break;
9290 }
9291
9292 /* rtnl_lock is held. */
9293 case DRV_CTL_START_L2_CMD: {
9294 u32 cli = ctl->data.ring.client_id;
9295
9296 bp->rx_mode_cl_mask |= (1 << cli);
9297 bnx2x_set_storm_rx_mode(bp);
9298 break;
9299 }
9300
9301 /* rtnl_lock is held. */
9302 case DRV_CTL_STOP_L2_CMD: {
9303 u32 cli = ctl->data.ring.client_id;
9304
9305 bp->rx_mode_cl_mask &= ~(1 << cli);
9306 bnx2x_set_storm_rx_mode(bp);
9307 break;
9308 }
9309
9310 default:
9311 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9312 rc = -EINVAL;
9313 }
9314
9315 return rc;
9316}
9317
9f6c9258 9318void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
9319{
9320 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9321
9322 if (bp->flags & USING_MSIX_FLAG) {
9323 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9324 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9325 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9326 } else {
9327 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9328 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9329 }
9330 cp->irq_arr[0].status_blk = bp->cnic_sb;
9331 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
9332 cp->irq_arr[1].status_blk = bp->def_status_blk;
9333 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
9334
9335 cp->num_irq = 2;
9336}
9337
9338static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9339 void *data)
9340{
9341 struct bnx2x *bp = netdev_priv(dev);
9342 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9343
9344 if (ops == NULL)
9345 return -EINVAL;
9346
9347 if (atomic_read(&bp->intr_sem) != 0)
9348 return -EBUSY;
9349
9350 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9351 if (!bp->cnic_kwq)
9352 return -ENOMEM;
9353
9354 bp->cnic_kwq_cons = bp->cnic_kwq;
9355 bp->cnic_kwq_prod = bp->cnic_kwq;
9356 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9357
9358 bp->cnic_spq_pending = 0;
9359 bp->cnic_kwq_pending = 0;
9360
9361 bp->cnic_data = data;
9362
9363 cp->num_irq = 0;
9364 cp->drv_state = CNIC_DRV_STATE_REGD;
9365
9366 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
9367
9368 bnx2x_setup_cnic_irq_info(bp);
9369 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9370 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
9371 rcu_assign_pointer(bp->cnic_ops, ops);
9372
9373 return 0;
9374}
9375
9376static int bnx2x_unregister_cnic(struct net_device *dev)
9377{
9378 struct bnx2x *bp = netdev_priv(dev);
9379 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9380
9381 mutex_lock(&bp->cnic_mutex);
9382 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9383 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9384 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9385 }
9386 cp->drv_state = 0;
9387 rcu_assign_pointer(bp->cnic_ops, NULL);
9388 mutex_unlock(&bp->cnic_mutex);
9389 synchronize_rcu();
9390 kfree(bp->cnic_kwq);
9391 bp->cnic_kwq = NULL;
9392
9393 return 0;
9394}
9395
9396struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9397{
9398 struct bnx2x *bp = netdev_priv(dev);
9399 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9400
9401 cp->drv_owner = THIS_MODULE;
9402 cp->chip_id = CHIP_ID(bp);
9403 cp->pdev = bp->pdev;
9404 cp->io_base = bp->regview;
9405 cp->io_base2 = bp->doorbells;
9406 cp->max_kwqe_pending = 8;
9407 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
9408 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
9409 cp->ctx_tbl_len = CNIC_ILT_LINES;
9410 cp->starting_cid = BCM_CNIC_CID_START;
9411 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9412 cp->drv_ctl = bnx2x_drv_ctl;
9413 cp->drv_register_cnic = bnx2x_register_cnic;
9414 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
9415
9416 return cp;
9417}
9418EXPORT_SYMBOL(bnx2x_cnic_probe);
9419
9420#endif /* BCM_CNIC */
94a78b79 9421