]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/bnx2x_main.c
bnx2x: Protect code with NOMCP
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
4fd89b7a
DK
60#define DRV_MODULE_VERSION "1.52.1-8"
61#define DRV_MODULE_RELDATE "2010/04/01"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 124
1cf167f2 125static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
126
127enum bnx2x_board_type {
128 BCM57710 = 0,
34f80b04
EG
129 BCM57711 = 1,
130 BCM57711E = 2,
a2fbb9ea
ET
131};
132
34f80b04 133/* indexed by board_type, above */
53a10565 134static struct {
a2fbb9ea
ET
135 char *name;
136} board_info[] __devinitdata = {
34f80b04
EG
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
140};
141
34f80b04 142
a3aa1884 143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
573f2035 159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
a2fbb9ea
ET
167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
a2fbb9ea
ET
178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
ad8d3948
EG
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
ad8d3948
EG
203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
a2fbb9ea 205{
5ff7b6d4 206 struct dmae_command dmae;
a2fbb9ea 207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
5ff7b6d4 219 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 220
5ff7b6d4
EG
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 224#ifdef __BIG_ENDIAN
5ff7b6d4 225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 226#else
5ff7b6d4 227 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 228#endif
5ff7b6d4
EG
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 239
c3eefaf6 240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 250
5ff7b6d4
EG
251 mutex_lock(&bp->dmae_mutex);
252
a2fbb9ea
ET
253 *wb_comp = 0;
254
5ff7b6d4 255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
256
257 udelay(5);
ad8d3948
EG
258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
ad8d3948 262 if (!cnt) {
c3eefaf6 263 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
264 break;
265 }
ad8d3948 266 cnt--;
12469401
YG
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
a2fbb9ea 272 }
ad8d3948
EG
273
274 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
275}
276
c18487ee 277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 278{
5ff7b6d4 279 struct dmae_command dmae;
a2fbb9ea 280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
5ff7b6d4 294 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 295
5ff7b6d4
EG
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 299#ifdef __BIG_ENDIAN
5ff7b6d4 300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 301#else
5ff7b6d4 302 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 303#endif
5ff7b6d4
EG
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 314
c3eefaf6 315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 322
5ff7b6d4
EG
323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
326 *wb_comp = 0;
327
5ff7b6d4 328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
329
330 udelay(5);
ad8d3948
EG
331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
ad8d3948 334 if (!cnt) {
c3eefaf6 335 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
336 break;
337 }
ad8d3948 338 cnt--;
12469401
YG
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
a2fbb9ea 344 }
ad8d3948 345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
348
349 mutex_unlock(&bp->dmae_mutex);
350}
351
573f2035
EG
352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
02e3c6cb 355 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
356 int offset = 0;
357
02e3c6cb 358 while (len > dmae_wr_max) {
573f2035 359 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
360 addr + offset, dmae_wr_max);
361 offset += dmae_wr_max * 4;
362 len -= dmae_wr_max;
573f2035
EG
363 }
364
365 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
366}
367
ad8d3948
EG
368/* used only for slowpath so not inlined */
369static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
370{
371 u32 wb_write[2];
372
373 wb_write[0] = val_hi;
374 wb_write[1] = val_lo;
375 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 376}
a2fbb9ea 377
ad8d3948
EG
378#ifdef USE_WB_RD
379static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
380{
381 u32 wb_data[2];
382
383 REG_RD_DMAE(bp, reg, wb_data, 2);
384
385 return HILO_U64(wb_data[0], wb_data[1]);
386}
387#endif
388
a2fbb9ea
ET
389static int bnx2x_mc_assert(struct bnx2x *bp)
390{
a2fbb9ea 391 char last_idx;
34f80b04
EG
392 int i, rc = 0;
393 u32 row0, row1, row2, row3;
394
395 /* XSTORM */
396 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
397 XSTORM_ASSERT_LIST_INDEX_OFFSET);
398 if (last_idx)
399 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
400
401 /* print the asserts */
402 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
403
404 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i));
406 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
408 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
409 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
410 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
411 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
412
413 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
414 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
415 " 0x%08x 0x%08x 0x%08x\n",
416 i, row3, row2, row1, row0);
417 rc++;
418 } else {
419 break;
420 }
421 }
422
423 /* TSTORM */
424 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
425 TSTORM_ASSERT_LIST_INDEX_OFFSET);
426 if (last_idx)
427 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
428
429 /* print the asserts */
430 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
431
432 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i));
434 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
436 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
437 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
438 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
439 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
440
441 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
442 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
443 " 0x%08x 0x%08x 0x%08x\n",
444 i, row3, row2, row1, row0);
445 rc++;
446 } else {
447 break;
448 }
449 }
450
451 /* CSTORM */
452 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
453 CSTORM_ASSERT_LIST_INDEX_OFFSET);
454 if (last_idx)
455 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
456
457 /* print the asserts */
458 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
459
460 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i));
462 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
464 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
465 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
466 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
467 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
468
469 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
470 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
471 " 0x%08x 0x%08x 0x%08x\n",
472 i, row3, row2, row1, row0);
473 rc++;
474 } else {
475 break;
476 }
477 }
478
479 /* USTORM */
480 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
481 USTORM_ASSERT_LIST_INDEX_OFFSET);
482 if (last_idx)
483 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
484
485 /* print the asserts */
486 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
487
488 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i));
490 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 4);
492 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
493 USTORM_ASSERT_LIST_OFFSET(i) + 8);
494 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
495 USTORM_ASSERT_LIST_OFFSET(i) + 12);
496
497 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
498 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
499 " 0x%08x 0x%08x 0x%08x\n",
500 i, row3, row2, row1, row0);
501 rc++;
502 } else {
503 break;
a2fbb9ea
ET
504 }
505 }
34f80b04 506
a2fbb9ea
ET
507 return rc;
508}
c14423fe 509
a2fbb9ea
ET
510static void bnx2x_fw_dump(struct bnx2x *bp)
511{
512 u32 mark, offset;
4781bfad 513 __be32 data[9];
a2fbb9ea
ET
514 int word;
515
2145a920
VZ
516 if (BP_NOMCP(bp)) {
517 BNX2X_ERR("NO MCP - can not dump\n");
518 return;
519 }
a2fbb9ea 520 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 521 mark = ((mark + 0x3) & ~0x3);
7995c64e 522 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 523
7995c64e 524 pr_err("");
a2fbb9ea
ET
525 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
526 for (word = 0; word < 8; word++)
527 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
528 offset + 4*word));
529 data[8] = 0x0;
7995c64e 530 pr_cont("%s", (char *)data);
a2fbb9ea
ET
531 }
532 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
533 for (word = 0; word < 8; word++)
534 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
535 offset + 4*word));
536 data[8] = 0x0;
7995c64e 537 pr_cont("%s", (char *)data);
a2fbb9ea 538 }
7995c64e 539 pr_err("end of fw dump\n");
a2fbb9ea
ET
540}
541
542static void bnx2x_panic_dump(struct bnx2x *bp)
543{
544 int i;
545 u16 j, start, end;
546
66e855f3
YG
547 bp->stats_state = STATS_STATE_DISABLED;
548 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
549
a2fbb9ea
ET
550 BNX2X_ERR("begin crash dump -----------------\n");
551
8440d2b6
EG
552 /* Indices */
553 /* Common */
554 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
555 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
556 " spq_prod_idx(%u)\n",
557 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
558 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
559
560 /* Rx */
54b9ddaa 561 for_each_queue(bp, i) {
a2fbb9ea 562 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 563
c3eefaf6 564 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
565 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
566 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 567 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
568 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
569 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 570 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
571 " fp_u_idx(%x) *sb_u_idx(%x)\n",
572 fp->rx_sge_prod, fp->last_max_sge,
573 le16_to_cpu(fp->fp_u_idx),
574 fp->status_blk->u_status_block.status_block_index);
575 }
a2fbb9ea 576
8440d2b6 577 /* Tx */
54b9ddaa 578 for_each_queue(bp, i) {
8440d2b6 579 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 580
c3eefaf6 581 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
582 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
583 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
584 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 585 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 586 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 587 fp->status_blk->c_status_block.status_block_index,
ca00392c 588 fp->tx_db.data.prod);
8440d2b6 589 }
a2fbb9ea 590
8440d2b6
EG
591 /* Rings */
592 /* Rx */
54b9ddaa 593 for_each_queue(bp, i) {
8440d2b6 594 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
595
596 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
597 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 598 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
599 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
600 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
601
c3eefaf6
EG
602 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
603 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
604 }
605
3196a88a
EG
606 start = RX_SGE(fp->rx_sge_prod);
607 end = RX_SGE(fp->last_max_sge);
8440d2b6 608 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
609 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
610 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
611
c3eefaf6
EG
612 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
613 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
614 }
615
a2fbb9ea
ET
616 start = RCQ_BD(fp->rx_comp_cons - 10);
617 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 618 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
619 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
620
c3eefaf6
EG
621 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
622 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
623 }
624 }
625
8440d2b6 626 /* Tx */
54b9ddaa 627 for_each_queue(bp, i) {
8440d2b6
EG
628 struct bnx2x_fastpath *fp = &bp->fp[i];
629
630 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
631 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
632 for (j = start; j != end; j = TX_BD(j + 1)) {
633 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
634
c3eefaf6
EG
635 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
636 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
637 }
638
639 start = TX_BD(fp->tx_bd_cons - 10);
640 end = TX_BD(fp->tx_bd_cons + 254);
641 for (j = start; j != end; j = TX_BD(j + 1)) {
642 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
643
c3eefaf6
EG
644 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
645 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
646 }
647 }
a2fbb9ea 648
34f80b04 649 bnx2x_fw_dump(bp);
a2fbb9ea
ET
650 bnx2x_mc_assert(bp);
651 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
652}
653
615f8fd9 654static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 655{
34f80b04 656 int port = BP_PORT(bp);
a2fbb9ea
ET
657 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
658 u32 val = REG_RD(bp, addr);
659 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 660 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
661
662 if (msix) {
8badd27a
EG
663 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
664 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
665 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
667 } else if (msi) {
668 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
669 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
670 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
672 } else {
673 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 674 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
675 HC_CONFIG_0_REG_INT_LINE_EN_0 |
676 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 677
8badd27a
EG
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
679 val, port, addr);
615f8fd9
ET
680
681 REG_WR(bp, addr, val);
682
a2fbb9ea
ET
683 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
684 }
685
8badd27a
EG
686 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
687 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
688
689 REG_WR(bp, addr, val);
37dbbf32
EG
690 /*
691 * Ensure that HC_CONFIG is written before leading/trailing edge config
692 */
693 mmiowb();
694 barrier();
34f80b04
EG
695
696 if (CHIP_IS_E1H(bp)) {
697 /* init leading/trailing edge */
698 if (IS_E1HMF(bp)) {
8badd27a 699 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 700 if (bp->port.pmf)
4acac6a5
EG
701 /* enable nig and gpio3 attention */
702 val |= 0x1100;
34f80b04
EG
703 } else
704 val = 0xffff;
705
706 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
707 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
708 }
37dbbf32
EG
709
710 /* Make sure that interrupts are indeed enabled from here on */
711 mmiowb();
a2fbb9ea
ET
712}
713
615f8fd9 714static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 715{
34f80b04 716 int port = BP_PORT(bp);
a2fbb9ea
ET
717 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
718 u32 val = REG_RD(bp, addr);
719
720 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
721 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
722 HC_CONFIG_0_REG_INT_LINE_EN_0 |
723 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
724
725 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
726 val, port, addr);
727
8badd27a
EG
728 /* flush all outstanding writes */
729 mmiowb();
730
a2fbb9ea
ET
731 REG_WR(bp, addr, val);
732 if (REG_RD(bp, addr) != val)
733 BNX2X_ERR("BUG! proper val not read from IGU!\n");
734}
735
f8ef6e44 736static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 737{
a2fbb9ea 738 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 739 int i, offset;
a2fbb9ea 740
34f80b04 741 /* disable interrupt handling */
a2fbb9ea 742 atomic_inc(&bp->intr_sem);
e1510706
EG
743 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
744
f8ef6e44
YG
745 if (disable_hw)
746 /* prevent the HW from sending interrupts */
747 bnx2x_int_disable(bp);
a2fbb9ea
ET
748
749 /* make sure all ISRs are done */
750 if (msix) {
8badd27a
EG
751 synchronize_irq(bp->msix_table[0].vector);
752 offset = 1;
37b091ba
MC
753#ifdef BCM_CNIC
754 offset++;
755#endif
a2fbb9ea 756 for_each_queue(bp, i)
8badd27a 757 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
758 } else
759 synchronize_irq(bp->pdev->irq);
760
761 /* make sure sp_task is not running */
1cf167f2
EG
762 cancel_delayed_work(&bp->sp_task);
763 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
764}
765
34f80b04 766/* fast path */
a2fbb9ea
ET
767
768/*
34f80b04 769 * General service functions
a2fbb9ea
ET
770 */
771
72fd0718
VZ
772/* Return true if succeeded to acquire the lock */
773static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
774{
775 u32 lock_status;
776 u32 resource_bit = (1 << resource);
777 int func = BP_FUNC(bp);
778 u32 hw_lock_control_reg;
779
780 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
781
782 /* Validating that the resource is within range */
783 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
784 DP(NETIF_MSG_HW,
785 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
786 resource, HW_LOCK_MAX_RESOURCE_VALUE);
787 return -EINVAL;
788 }
789
790 if (func <= 5)
791 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
792 else
793 hw_lock_control_reg =
794 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
795
796 /* Try to acquire the lock */
797 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
798 lock_status = REG_RD(bp, hw_lock_control_reg);
799 if (lock_status & resource_bit)
800 return true;
801
802 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
803 return false;
804}
805
34f80b04 806static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
807 u8 storm, u16 index, u8 op, u8 update)
808{
5c862848
EG
809 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
810 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
811 struct igu_ack_register igu_ack;
812
813 igu_ack.status_block_index = index;
814 igu_ack.sb_id_and_flags =
34f80b04 815 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
816 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
817 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
818 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
819
5c862848
EG
820 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
821 (*(u32 *)&igu_ack), hc_addr);
822 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
823
824 /* Make sure that ACK is written */
825 mmiowb();
826 barrier();
a2fbb9ea
ET
827}
828
54b9ddaa 829static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
830{
831 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
832
833 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
834 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
835 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
836}
837
a2fbb9ea
ET
838static u16 bnx2x_ack_int(struct bnx2x *bp)
839{
5c862848
EG
840 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
841 COMMAND_REG_SIMD_MASK);
842 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 843
5c862848
EG
844 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
845 result, hc_addr);
a2fbb9ea 846
a2fbb9ea
ET
847 return result;
848}
849
850
851/*
852 * fast path service functions
853 */
854
e8b5fc51
VZ
855static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
856{
857 /* Tell compiler that consumer and producer can change */
858 barrier();
859 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
860}
861
a2fbb9ea
ET
862/* free skb in the packet ring at pos idx
863 * return idx of last bd freed
864 */
865static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
866 u16 idx)
867{
868 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
869 struct eth_tx_start_bd *tx_start_bd;
870 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 871 struct sk_buff *skb = tx_buf->skb;
34f80b04 872 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
873 int nbd;
874
54b9ddaa
VZ
875 /* prefetch skb end pointer to speedup dev_kfree_skb() */
876 prefetch(&skb->end);
877
a2fbb9ea
ET
878 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
879 idx, tx_buf, skb);
880
881 /* unmap first bd */
882 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c 883 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
1a983142 884 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
ca00392c 885 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 886
ca00392c 887 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 888#ifdef BNX2X_STOP_ON_ERROR
ca00392c 889 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 890 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
891 bnx2x_panic();
892 }
893#endif
ca00392c 894 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 895
ca00392c
EG
896 /* Get the next bd */
897 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 898
ca00392c
EG
899 /* Skip a parse bd... */
900 --nbd;
901 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
902
903 /* ...and the TSO split header bd since they have no mapping */
904 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
905 --nbd;
906 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
907 }
908
909 /* now free frags */
910 while (nbd > 0) {
911
912 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c 913 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
1a983142
FT
914 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
915 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
a2fbb9ea
ET
916 if (--nbd)
917 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
918 }
919
920 /* release skb */
53e5e96e 921 WARN_ON(!skb);
54b9ddaa 922 dev_kfree_skb(skb);
a2fbb9ea
ET
923 tx_buf->first_bd = 0;
924 tx_buf->skb = NULL;
925
34f80b04 926 return new_cons;
a2fbb9ea
ET
927}
928
34f80b04 929static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 930{
34f80b04
EG
931 s16 used;
932 u16 prod;
933 u16 cons;
a2fbb9ea 934
a2fbb9ea
ET
935 prod = fp->tx_bd_prod;
936 cons = fp->tx_bd_cons;
937
34f80b04
EG
938 /* NUM_TX_RINGS = number of "next-page" entries
939 It will be used as a threshold */
940 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 941
34f80b04 942#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
943 WARN_ON(used < 0);
944 WARN_ON(used > fp->bp->tx_ring_size);
945 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 946#endif
a2fbb9ea 947
34f80b04 948 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
949}
950
54b9ddaa
VZ
951static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
952{
953 u16 hw_cons;
954
955 /* Tell compiler that status block fields can change */
956 barrier();
957 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
958 return hw_cons != fp->tx_pkt_cons;
959}
960
961static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
962{
963 struct bnx2x *bp = fp->bp;
555f6c78 964 struct netdev_queue *txq;
a2fbb9ea 965 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
966
967#ifdef BNX2X_STOP_ON_ERROR
968 if (unlikely(bp->panic))
54b9ddaa 969 return -1;
a2fbb9ea
ET
970#endif
971
54b9ddaa 972 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
973 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
974 sw_cons = fp->tx_pkt_cons;
975
976 while (sw_cons != hw_cons) {
977 u16 pkt_cons;
978
979 pkt_cons = TX_BD(sw_cons);
980
981 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
982
34f80b04 983 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
984 hw_cons, sw_cons, pkt_cons);
985
34f80b04 986/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
987 rmb();
988 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
989 }
990*/
991 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
992 sw_cons++;
a2fbb9ea
ET
993 }
994
995 fp->tx_pkt_cons = sw_cons;
996 fp->tx_bd_cons = bd_cons;
997
c16cc0b4
VZ
998 /* Need to make the tx_bd_cons update visible to start_xmit()
999 * before checking for netif_tx_queue_stopped(). Without the
1000 * memory barrier, there is a small possibility that
1001 * start_xmit() will miss it and cause the queue to be stopped
1002 * forever.
1003 */
2d99cf16 1004 smp_mb();
c16cc0b4 1005
a2fbb9ea 1006 /* TBD need a thresh? */
555f6c78 1007 if (unlikely(netif_tx_queue_stopped(txq))) {
c16cc0b4
VZ
1008 /* Taking tx_lock() is needed to prevent reenabling the queue
1009 * while it's empty. This could have happen if rx_action() gets
1010 * suspended in bnx2x_tx_int() after the condition before
1011 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1012 *
1013 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1014 * sends some packets consuming the whole queue again->
1015 * stops the queue
6044735d 1016 */
c16cc0b4
VZ
1017
1018 __netif_tx_lock(txq, smp_processor_id());
6044735d 1019
555f6c78 1020 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 1021 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 1022 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 1023 netif_tx_wake_queue(txq);
c16cc0b4
VZ
1024
1025 __netif_tx_unlock(txq);
a2fbb9ea 1026 }
54b9ddaa 1027 return 0;
a2fbb9ea
ET
1028}
1029
993ac7b5
MC
1030#ifdef BCM_CNIC
1031static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1032#endif
3196a88a 1033
a2fbb9ea
ET
1034static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1035 union eth_rx_cqe *rr_cqe)
1036{
1037 struct bnx2x *bp = fp->bp;
1038 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1039 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1040
34f80b04 1041 DP(BNX2X_MSG_SP,
a2fbb9ea 1042 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1043 fp->index, cid, command, bp->state,
34f80b04 1044 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
1045
1046 bp->spq_left++;
1047
0626b899 1048 if (fp->index) {
a2fbb9ea
ET
1049 switch (command | fp->state) {
1050 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1051 BNX2X_FP_STATE_OPENING):
1052 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1053 cid);
1054 fp->state = BNX2X_FP_STATE_OPEN;
1055 break;
1056
1057 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1058 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1059 cid);
1060 fp->state = BNX2X_FP_STATE_HALTED;
1061 break;
1062
1063 default:
34f80b04
EG
1064 BNX2X_ERR("unexpected MC reply (%d) "
1065 "fp->state is %x\n", command, fp->state);
1066 break;
a2fbb9ea 1067 }
34f80b04 1068 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1069 return;
1070 }
c14423fe 1071
a2fbb9ea
ET
1072 switch (command | bp->state) {
1073 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1074 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1075 bp->state = BNX2X_STATE_OPEN;
1076 break;
1077
1078 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1079 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1080 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1081 fp->state = BNX2X_FP_STATE_HALTED;
1082 break;
1083
a2fbb9ea 1084 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1085 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1086 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1087 break;
1088
993ac7b5
MC
1089#ifdef BCM_CNIC
1090 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1091 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1092 bnx2x_cnic_cfc_comp(bp, cid);
1093 break;
1094#endif
3196a88a 1095
a2fbb9ea 1096 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1097 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1098 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1099 bp->set_mac_pending--;
1100 smp_wmb();
a2fbb9ea
ET
1101 break;
1102
49d66772 1103 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1104 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1105 bp->set_mac_pending--;
1106 smp_wmb();
49d66772
ET
1107 break;
1108
a2fbb9ea 1109 default:
34f80b04 1110 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1111 command, bp->state);
34f80b04 1112 break;
a2fbb9ea 1113 }
34f80b04 1114 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1115}
1116
7a9b2557
VZ
1117static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1118 struct bnx2x_fastpath *fp, u16 index)
1119{
1120 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1121 struct page *page = sw_buf->page;
1122 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1123
1124 /* Skip "next page" elements */
1125 if (!page)
1126 return;
1127
1a983142 1128 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
4f40f2cb 1129 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1130 __free_pages(page, PAGES_PER_SGE_SHIFT);
1131
1132 sw_buf->page = NULL;
1133 sge->addr_hi = 0;
1134 sge->addr_lo = 0;
1135}
1136
1137static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1138 struct bnx2x_fastpath *fp, int last)
1139{
1140 int i;
1141
1142 for (i = 0; i < last; i++)
1143 bnx2x_free_rx_sge(bp, fp, i);
1144}
1145
1146static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1147 struct bnx2x_fastpath *fp, u16 index)
1148{
1149 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1150 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1151 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1152 dma_addr_t mapping;
1153
1154 if (unlikely(page == NULL))
1155 return -ENOMEM;
1156
1a983142
FT
1157 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1158 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
8d8bb39b 1159 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1160 __free_pages(page, PAGES_PER_SGE_SHIFT);
1161 return -ENOMEM;
1162 }
1163
1164 sw_buf->page = page;
1a983142 1165 dma_unmap_addr_set(sw_buf, mapping, mapping);
7a9b2557
VZ
1166
1167 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1168 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1169
1170 return 0;
1171}
1172
a2fbb9ea
ET
1173static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1174 struct bnx2x_fastpath *fp, u16 index)
1175{
1176 struct sk_buff *skb;
1177 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1178 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1179 dma_addr_t mapping;
1180
1181 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1182 if (unlikely(skb == NULL))
1183 return -ENOMEM;
1184
1a983142
FT
1185 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1186 DMA_FROM_DEVICE);
8d8bb39b 1187 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1188 dev_kfree_skb(skb);
1189 return -ENOMEM;
1190 }
1191
1192 rx_buf->skb = skb;
1a983142 1193 dma_unmap_addr_set(rx_buf, mapping, mapping);
a2fbb9ea
ET
1194
1195 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1196 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1197
1198 return 0;
1199}
1200
1201/* note that we are not allocating a new skb,
1202 * we are just moving one from cons to prod
1203 * we are not creating a new mapping,
1204 * so there is no need to check for dma_mapping_error().
1205 */
1206static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1207 struct sk_buff *skb, u16 cons, u16 prod)
1208{
1209 struct bnx2x *bp = fp->bp;
1210 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1211 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1212 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1213 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1214
1a983142
FT
1215 dma_sync_single_for_device(&bp->pdev->dev,
1216 dma_unmap_addr(cons_rx_buf, mapping),
1217 RX_COPY_THRESH, DMA_FROM_DEVICE);
a2fbb9ea
ET
1218
1219 prod_rx_buf->skb = cons_rx_buf->skb;
1a983142
FT
1220 dma_unmap_addr_set(prod_rx_buf, mapping,
1221 dma_unmap_addr(cons_rx_buf, mapping));
a2fbb9ea
ET
1222 *prod_bd = *cons_bd;
1223}
1224
7a9b2557
VZ
1225static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1226 u16 idx)
1227{
1228 u16 last_max = fp->last_max_sge;
1229
1230 if (SUB_S16(idx, last_max) > 0)
1231 fp->last_max_sge = idx;
1232}
1233
1234static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1235{
1236 int i, j;
1237
1238 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1239 int idx = RX_SGE_CNT * i - 1;
1240
1241 for (j = 0; j < 2; j++) {
1242 SGE_MASK_CLEAR_BIT(fp, idx);
1243 idx--;
1244 }
1245 }
1246}
1247
1248static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1249 struct eth_fast_path_rx_cqe *fp_cqe)
1250{
1251 struct bnx2x *bp = fp->bp;
4f40f2cb 1252 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1253 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1254 SGE_PAGE_SHIFT;
7a9b2557
VZ
1255 u16 last_max, last_elem, first_elem;
1256 u16 delta = 0;
1257 u16 i;
1258
1259 if (!sge_len)
1260 return;
1261
1262 /* First mark all used pages */
1263 for (i = 0; i < sge_len; i++)
1264 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1265
1266 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1267 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1268
1269 /* Here we assume that the last SGE index is the biggest */
1270 prefetch((void *)(fp->sge_mask));
1271 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273 last_max = RX_SGE(fp->last_max_sge);
1274 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1275 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1276
1277 /* If ring is not full */
1278 if (last_elem + 1 != first_elem)
1279 last_elem++;
1280
1281 /* Now update the prod */
1282 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1283 if (likely(fp->sge_mask[i]))
1284 break;
1285
1286 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1287 delta += RX_SGE_MASK_ELEM_SZ;
1288 }
1289
1290 if (delta > 0) {
1291 fp->rx_sge_prod += delta;
1292 /* clear page-end entries */
1293 bnx2x_clear_sge_mask_next_elems(fp);
1294 }
1295
1296 DP(NETIF_MSG_RX_STATUS,
1297 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1298 fp->last_max_sge, fp->rx_sge_prod);
1299}
1300
1301static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1302{
1303 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1304 memset(fp->sge_mask, 0xff,
1305 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1306
33471629
EG
1307 /* Clear the two last indices in the page to 1:
1308 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1309 hence will never be indicated and should be removed from
1310 the calculations. */
1311 bnx2x_clear_sge_mask_next_elems(fp);
1312}
1313
1314static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1315 struct sk_buff *skb, u16 cons, u16 prod)
1316{
1317 struct bnx2x *bp = fp->bp;
1318 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1319 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1320 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1321 dma_addr_t mapping;
1322
1323 /* move empty skb from pool to prod and map it */
1324 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1a983142
FT
1325 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1326 bp->rx_buf_size, DMA_FROM_DEVICE);
1327 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
7a9b2557
VZ
1328
1329 /* move partial skb from cons to pool (don't unmap yet) */
1330 fp->tpa_pool[queue] = *cons_rx_buf;
1331
1332 /* mark bin state as start - print error if current state != stop */
1333 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1334 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1335
1336 fp->tpa_state[queue] = BNX2X_TPA_START;
1337
1338 /* point prod_bd to new skb */
1339 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1340 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1341
1342#ifdef BNX2X_STOP_ON_ERROR
1343 fp->tpa_queue_used |= (1 << queue);
1344#ifdef __powerpc64__
1345 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1346#else
1347 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1348#endif
1349 fp->tpa_queue_used);
1350#endif
1351}
1352
1353static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1354 struct sk_buff *skb,
1355 struct eth_fast_path_rx_cqe *fp_cqe,
1356 u16 cqe_idx)
1357{
1358 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1359 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1360 u32 i, frag_len, frag_size, pages;
1361 int err;
1362 int j;
1363
1364 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1365 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1366
1367 /* This is needed in order to enable forwarding support */
1368 if (frag_size)
4f40f2cb 1369 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1370 max(frag_size, (u32)len_on_bd));
1371
1372#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1373 if (pages >
1374 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1375 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1376 pages, cqe_idx);
1377 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1378 fp_cqe->pkt_len, len_on_bd);
1379 bnx2x_panic();
1380 return -EINVAL;
1381 }
1382#endif
1383
1384 /* Run through the SGL and compose the fragmented skb */
1385 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1386 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1387
1388 /* FW gives the indices of the SGE as if the ring is an array
1389 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1390 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1391 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1392 old_rx_pg = *rx_pg;
1393
1394 /* If we fail to allocate a substitute page, we simply stop
1395 where we are and drop the whole packet */
1396 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1397 if (unlikely(err)) {
de832a55 1398 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1399 return err;
1400 }
1401
1402 /* Unmap the page as we r going to pass it to the stack */
1a983142
FT
1403 dma_unmap_page(&bp->pdev->dev,
1404 dma_unmap_addr(&old_rx_pg, mapping),
1405 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
7a9b2557
VZ
1406
1407 /* Add one frag and update the appropriate fields in the skb */
1408 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1409
1410 skb->data_len += frag_len;
1411 skb->truesize += frag_len;
1412 skb->len += frag_len;
1413
1414 frag_size -= frag_len;
1415 }
1416
1417 return 0;
1418}
1419
1420static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1421 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1422 u16 cqe_idx)
1423{
1424 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1425 struct sk_buff *skb = rx_buf->skb;
1426 /* alloc new skb */
1427 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1428
1429 /* Unmap skb in the pool anyway, as we are going to change
1430 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1431 fails. */
1a983142
FT
1432 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1433 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557 1434
7a9b2557 1435 if (likely(new_skb)) {
66e855f3
YG
1436 /* fix ip xsum and give it to the stack */
1437 /* (no need to map the new skb) */
0c6671b0
EG
1438#ifdef BCM_VLAN
1439 int is_vlan_cqe =
1440 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1441 PARSING_FLAGS_VLAN);
1442 int is_not_hwaccel_vlan_cqe =
1443 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1444#endif
7a9b2557
VZ
1445
1446 prefetch(skb);
1447 prefetch(((char *)(skb)) + 128);
1448
7a9b2557
VZ
1449#ifdef BNX2X_STOP_ON_ERROR
1450 if (pad + len > bp->rx_buf_size) {
1451 BNX2X_ERR("skb_put is about to fail... "
1452 "pad %d len %d rx_buf_size %d\n",
1453 pad, len, bp->rx_buf_size);
1454 bnx2x_panic();
1455 return;
1456 }
1457#endif
1458
1459 skb_reserve(skb, pad);
1460 skb_put(skb, len);
1461
1462 skb->protocol = eth_type_trans(skb, bp->dev);
1463 skb->ip_summed = CHECKSUM_UNNECESSARY;
1464
1465 {
1466 struct iphdr *iph;
1467
1468 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1469#ifdef BCM_VLAN
1470 /* If there is no Rx VLAN offloading -
1471 take VLAN tag into an account */
1472 if (unlikely(is_not_hwaccel_vlan_cqe))
1473 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1474#endif
7a9b2557
VZ
1475 iph->check = 0;
1476 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1477 }
1478
1479 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1480 &cqe->fast_path_cqe, cqe_idx)) {
1481#ifdef BCM_VLAN
0c6671b0
EG
1482 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1483 (!is_not_hwaccel_vlan_cqe))
4fd89b7a
DK
1484 vlan_gro_receive(&fp->napi, bp->vlgrp,
1485 le16_to_cpu(cqe->fast_path_cqe.
1486 vlan_tag), skb);
7a9b2557
VZ
1487 else
1488#endif
4fd89b7a 1489 napi_gro_receive(&fp->napi, skb);
7a9b2557
VZ
1490 } else {
1491 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1492 " - dropping packet!\n");
1493 dev_kfree_skb(skb);
1494 }
1495
7a9b2557
VZ
1496
1497 /* put new skb in bin */
1498 fp->tpa_pool[queue].skb = new_skb;
1499
1500 } else {
66e855f3 1501 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1502 DP(NETIF_MSG_RX_STATUS,
1503 "Failed to allocate new skb - dropping packet!\n");
de832a55 1504 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1505 }
1506
1507 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1508}
1509
1510static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1511 struct bnx2x_fastpath *fp,
1512 u16 bd_prod, u16 rx_comp_prod,
1513 u16 rx_sge_prod)
1514{
8d9c5f34 1515 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1516 int i;
1517
1518 /* Update producers */
1519 rx_prods.bd_prod = bd_prod;
1520 rx_prods.cqe_prod = rx_comp_prod;
1521 rx_prods.sge_prod = rx_sge_prod;
1522
58f4c4cf
EG
1523 /*
1524 * Make sure that the BD and SGE data is updated before updating the
1525 * producers since FW might read the BD/SGE right after the producer
1526 * is updated.
1527 * This is only applicable for weak-ordered memory model archs such
1528 * as IA-64. The following barrier is also mandatory since FW will
1529 * assumes BDs must have buffers.
1530 */
1531 wmb();
1532
8d9c5f34
EG
1533 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1534 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1535 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1536 ((u32 *)&rx_prods)[i]);
1537
58f4c4cf
EG
1538 mmiowb(); /* keep prod updates ordered */
1539
7a9b2557 1540 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1541 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1542 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1543}
1544
a2fbb9ea
ET
1545static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1546{
1547 struct bnx2x *bp = fp->bp;
34f80b04 1548 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1549 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1550 int rx_pkt = 0;
1551
1552#ifdef BNX2X_STOP_ON_ERROR
1553 if (unlikely(bp->panic))
1554 return 0;
1555#endif
1556
34f80b04
EG
1557 /* CQ "next element" is of the size of the regular element,
1558 that's why it's ok here */
a2fbb9ea
ET
1559 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1560 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1561 hw_comp_cons++;
1562
1563 bd_cons = fp->rx_bd_cons;
1564 bd_prod = fp->rx_bd_prod;
34f80b04 1565 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1566 sw_comp_cons = fp->rx_comp_cons;
1567 sw_comp_prod = fp->rx_comp_prod;
1568
1569 /* Memory barrier necessary as speculative reads of the rx
1570 * buffer can be ahead of the index in the status block
1571 */
1572 rmb();
1573
1574 DP(NETIF_MSG_RX_STATUS,
1575 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1576 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1577
1578 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1579 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1580 struct sk_buff *skb;
1581 union eth_rx_cqe *cqe;
34f80b04
EG
1582 u8 cqe_fp_flags;
1583 u16 len, pad;
a2fbb9ea
ET
1584
1585 comp_ring_cons = RCQ_BD(sw_comp_cons);
1586 bd_prod = RX_BD(bd_prod);
1587 bd_cons = RX_BD(bd_cons);
1588
619e7a66
EG
1589 /* Prefetch the page containing the BD descriptor
1590 at producer's index. It will be needed when new skb is
1591 allocated */
1592 prefetch((void *)(PAGE_ALIGN((unsigned long)
1593 (&fp->rx_desc_ring[bd_prod])) -
1594 PAGE_SIZE + 1));
1595
a2fbb9ea 1596 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1597 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1598
a2fbb9ea 1599 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1600 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1601 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1602 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1603 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1604 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1605
1606 /* is this a slowpath msg? */
34f80b04 1607 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1608 bnx2x_sp_event(fp, cqe);
1609 goto next_cqe;
1610
1611 /* this is an rx packet */
1612 } else {
1613 rx_buf = &fp->rx_buf_ring[bd_cons];
1614 skb = rx_buf->skb;
54b9ddaa
VZ
1615 prefetch(skb);
1616 prefetch((u8 *)skb + 256);
a2fbb9ea
ET
1617 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1618 pad = cqe->fast_path_cqe.placement_offset;
1619
7a9b2557
VZ
1620 /* If CQE is marked both TPA_START and TPA_END
1621 it is a non-TPA CQE */
1622 if ((!fp->disable_tpa) &&
1623 (TPA_TYPE(cqe_fp_flags) !=
1624 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1625 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1626
1627 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1628 DP(NETIF_MSG_RX_STATUS,
1629 "calling tpa_start on queue %d\n",
1630 queue);
1631
1632 bnx2x_tpa_start(fp, queue, skb,
1633 bd_cons, bd_prod);
1634 goto next_rx;
1635 }
1636
1637 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1638 DP(NETIF_MSG_RX_STATUS,
1639 "calling tpa_stop on queue %d\n",
1640 queue);
1641
1642 if (!BNX2X_RX_SUM_FIX(cqe))
1643 BNX2X_ERR("STOP on none TCP "
1644 "data\n");
1645
1646 /* This is a size of the linear data
1647 on this skb */
1648 len = le16_to_cpu(cqe->fast_path_cqe.
1649 len_on_bd);
1650 bnx2x_tpa_stop(bp, fp, queue, pad,
1651 len, cqe, comp_ring_cons);
1652#ifdef BNX2X_STOP_ON_ERROR
1653 if (bp->panic)
17cb4006 1654 return 0;
7a9b2557
VZ
1655#endif
1656
1657 bnx2x_update_sge_prod(fp,
1658 &cqe->fast_path_cqe);
1659 goto next_cqe;
1660 }
1661 }
1662
1a983142
FT
1663 dma_sync_single_for_device(&bp->pdev->dev,
1664 dma_unmap_addr(rx_buf, mapping),
1665 pad + RX_COPY_THRESH,
1666 DMA_FROM_DEVICE);
a2fbb9ea
ET
1667 prefetch(skb);
1668 prefetch(((char *)(skb)) + 128);
1669
1670 /* is this an error packet? */
34f80b04 1671 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1672 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1673 "ERROR flags %x rx packet %u\n",
1674 cqe_fp_flags, sw_comp_cons);
de832a55 1675 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1676 goto reuse_rx;
1677 }
1678
1679 /* Since we don't have a jumbo ring
1680 * copy small packets if mtu > 1500
1681 */
1682 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1683 (len <= RX_COPY_THRESH)) {
1684 struct sk_buff *new_skb;
1685
1686 new_skb = netdev_alloc_skb(bp->dev,
1687 len + pad);
1688 if (new_skb == NULL) {
1689 DP(NETIF_MSG_RX_ERR,
34f80b04 1690 "ERROR packet dropped "
a2fbb9ea 1691 "because of alloc failure\n");
de832a55 1692 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1693 goto reuse_rx;
1694 }
1695
1696 /* aligned copy */
1697 skb_copy_from_linear_data_offset(skb, pad,
1698 new_skb->data + pad, len);
1699 skb_reserve(new_skb, pad);
1700 skb_put(new_skb, len);
1701
1702 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1703
1704 skb = new_skb;
1705
a119a069
EG
1706 } else
1707 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1a983142
FT
1708 dma_unmap_single(&bp->pdev->dev,
1709 dma_unmap_addr(rx_buf, mapping),
437cf2f1 1710 bp->rx_buf_size,
1a983142 1711 DMA_FROM_DEVICE);
a2fbb9ea
ET
1712 skb_reserve(skb, pad);
1713 skb_put(skb, len);
1714
1715 } else {
1716 DP(NETIF_MSG_RX_ERR,
34f80b04 1717 "ERROR packet dropped because "
a2fbb9ea 1718 "of alloc failure\n");
de832a55 1719 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1720reuse_rx:
1721 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1722 goto next_rx;
1723 }
1724
1725 skb->protocol = eth_type_trans(skb, bp->dev);
1726
1727 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1728 if (bp->rx_csum) {
1adcd8be
EG
1729 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1730 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1731 else
de832a55 1732 fp->eth_q_stats.hw_csum_err++;
66e855f3 1733 }
a2fbb9ea
ET
1734 }
1735
748e5439 1736 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1737
a2fbb9ea 1738#ifdef BCM_VLAN
0c6671b0 1739 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1740 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1741 PARSING_FLAGS_VLAN))
4fd89b7a
DK
1742 vlan_gro_receive(&fp->napi, bp->vlgrp,
1743 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
a2fbb9ea
ET
1744 else
1745#endif
4fd89b7a 1746 napi_gro_receive(&fp->napi, skb);
a2fbb9ea 1747
a2fbb9ea
ET
1748
1749next_rx:
1750 rx_buf->skb = NULL;
1751
1752 bd_cons = NEXT_RX_IDX(bd_cons);
1753 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1754 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1755 rx_pkt++;
a2fbb9ea
ET
1756next_cqe:
1757 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1758 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1759
34f80b04 1760 if (rx_pkt == budget)
a2fbb9ea
ET
1761 break;
1762 } /* while */
1763
1764 fp->rx_bd_cons = bd_cons;
34f80b04 1765 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1766 fp->rx_comp_cons = sw_comp_cons;
1767 fp->rx_comp_prod = sw_comp_prod;
1768
7a9b2557
VZ
1769 /* Update producers */
1770 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1771 fp->rx_sge_prod);
a2fbb9ea
ET
1772
1773 fp->rx_pkt += rx_pkt;
1774 fp->rx_calls++;
1775
1776 return rx_pkt;
1777}
1778
1779static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1780{
1781 struct bnx2x_fastpath *fp = fp_cookie;
1782 struct bnx2x *bp = fp->bp;
a2fbb9ea 1783
da5a662a
VZ
1784 /* Return here if interrupt is disabled */
1785 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1786 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1787 return IRQ_HANDLED;
1788 }
1789
34f80b04 1790 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1791 fp->index, fp->sb_id);
0626b899 1792 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1793
1794#ifdef BNX2X_STOP_ON_ERROR
1795 if (unlikely(bp->panic))
1796 return IRQ_HANDLED;
1797#endif
ca00392c 1798
54b9ddaa
VZ
1799 /* Handle Rx and Tx according to MSI-X vector */
1800 prefetch(fp->rx_cons_sb);
1801 prefetch(fp->tx_cons_sb);
1802 prefetch(&fp->status_blk->u_status_block.status_block_index);
1803 prefetch(&fp->status_blk->c_status_block.status_block_index);
1804 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1805
a2fbb9ea
ET
1806 return IRQ_HANDLED;
1807}
1808
1809static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1810{
555f6c78 1811 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1812 u16 status = bnx2x_ack_int(bp);
34f80b04 1813 u16 mask;
ca00392c 1814 int i;
a2fbb9ea 1815
34f80b04 1816 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1817 if (unlikely(status == 0)) {
1818 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1819 return IRQ_NONE;
1820 }
f5372251 1821 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1822
34f80b04 1823 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1824 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1825 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1826 return IRQ_HANDLED;
1827 }
1828
3196a88a
EG
1829#ifdef BNX2X_STOP_ON_ERROR
1830 if (unlikely(bp->panic))
1831 return IRQ_HANDLED;
1832#endif
1833
ca00392c
EG
1834 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1835 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1836
ca00392c
EG
1837 mask = 0x2 << fp->sb_id;
1838 if (status & mask) {
54b9ddaa
VZ
1839 /* Handle Rx and Tx according to SB id */
1840 prefetch(fp->rx_cons_sb);
1841 prefetch(&fp->status_blk->u_status_block.
1842 status_block_index);
1843 prefetch(fp->tx_cons_sb);
1844 prefetch(&fp->status_blk->c_status_block.
1845 status_block_index);
1846 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1847 status &= ~mask;
1848 }
a2fbb9ea
ET
1849 }
1850
993ac7b5
MC
1851#ifdef BCM_CNIC
1852 mask = 0x2 << CNIC_SB_ID(bp);
1853 if (status & (mask | 0x1)) {
1854 struct cnic_ops *c_ops = NULL;
1855
1856 rcu_read_lock();
1857 c_ops = rcu_dereference(bp->cnic_ops);
1858 if (c_ops)
1859 c_ops->cnic_handler(bp->cnic_data, NULL);
1860 rcu_read_unlock();
1861
1862 status &= ~mask;
1863 }
1864#endif
a2fbb9ea 1865
34f80b04 1866 if (unlikely(status & 0x1)) {
1cf167f2 1867 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1868
1869 status &= ~0x1;
1870 if (!status)
1871 return IRQ_HANDLED;
1872 }
1873
34f80b04
EG
1874 if (status)
1875 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1876 status);
a2fbb9ea 1877
c18487ee 1878 return IRQ_HANDLED;
a2fbb9ea
ET
1879}
1880
c18487ee 1881/* end of fast path */
a2fbb9ea 1882
bb2a0f7a 1883static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1884
c18487ee
YR
1885/* Link */
1886
1887/*
1888 * General service functions
1889 */
a2fbb9ea 1890
4a37fb66 1891static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1892{
1893 u32 lock_status;
1894 u32 resource_bit = (1 << resource);
4a37fb66
YG
1895 int func = BP_FUNC(bp);
1896 u32 hw_lock_control_reg;
c18487ee 1897 int cnt;
a2fbb9ea 1898
c18487ee
YR
1899 /* Validating that the resource is within range */
1900 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1901 DP(NETIF_MSG_HW,
1902 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1903 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1904 return -EINVAL;
1905 }
a2fbb9ea 1906
4a37fb66
YG
1907 if (func <= 5) {
1908 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1909 } else {
1910 hw_lock_control_reg =
1911 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1912 }
1913
c18487ee 1914 /* Validating that the resource is not already taken */
4a37fb66 1915 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1916 if (lock_status & resource_bit) {
1917 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1918 lock_status, resource_bit);
1919 return -EEXIST;
1920 }
a2fbb9ea 1921
46230476
EG
1922 /* Try for 5 second every 5ms */
1923 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1924 /* Try to acquire the lock */
4a37fb66
YG
1925 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1926 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1927 if (lock_status & resource_bit)
1928 return 0;
a2fbb9ea 1929
c18487ee 1930 msleep(5);
a2fbb9ea 1931 }
c18487ee
YR
1932 DP(NETIF_MSG_HW, "Timeout\n");
1933 return -EAGAIN;
1934}
a2fbb9ea 1935
4a37fb66 1936static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1937{
1938 u32 lock_status;
1939 u32 resource_bit = (1 << resource);
4a37fb66
YG
1940 int func = BP_FUNC(bp);
1941 u32 hw_lock_control_reg;
a2fbb9ea 1942
72fd0718
VZ
1943 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1944
c18487ee
YR
1945 /* Validating that the resource is within range */
1946 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1947 DP(NETIF_MSG_HW,
1948 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1949 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1950 return -EINVAL;
1951 }
1952
4a37fb66
YG
1953 if (func <= 5) {
1954 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1955 } else {
1956 hw_lock_control_reg =
1957 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1958 }
1959
c18487ee 1960 /* Validating that the resource is currently taken */
4a37fb66 1961 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1962 if (!(lock_status & resource_bit)) {
1963 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1964 lock_status, resource_bit);
1965 return -EFAULT;
a2fbb9ea
ET
1966 }
1967
4a37fb66 1968 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1969 return 0;
1970}
1971
1972/* HW Lock for shared dual port PHYs */
4a37fb66 1973static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1974{
34f80b04 1975 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1976
46c6a674
EG
1977 if (bp->port.need_hw_lock)
1978 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1979}
a2fbb9ea 1980
4a37fb66 1981static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1982{
46c6a674
EG
1983 if (bp->port.need_hw_lock)
1984 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1985
34f80b04 1986 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1987}
a2fbb9ea 1988
4acac6a5
EG
1989int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1990{
1991 /* The GPIO should be swapped if swap register is set and active */
1992 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1993 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1994 int gpio_shift = gpio_num +
1995 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1996 u32 gpio_mask = (1 << gpio_shift);
1997 u32 gpio_reg;
1998 int value;
1999
2000 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2001 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2002 return -EINVAL;
2003 }
2004
2005 /* read GPIO value */
2006 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2007
2008 /* get the requested pin value */
2009 if ((gpio_reg & gpio_mask) == gpio_mask)
2010 value = 1;
2011 else
2012 value = 0;
2013
2014 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2015
2016 return value;
2017}
2018
17de50b7 2019int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
2020{
2021 /* The GPIO should be swapped if swap register is set and active */
2022 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 2023 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2024 int gpio_shift = gpio_num +
2025 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2026 u32 gpio_mask = (1 << gpio_shift);
2027 u32 gpio_reg;
a2fbb9ea 2028
c18487ee
YR
2029 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2030 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2031 return -EINVAL;
2032 }
a2fbb9ea 2033
4a37fb66 2034 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2035 /* read GPIO and mask except the float bits */
2036 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2037
c18487ee
YR
2038 switch (mode) {
2039 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2040 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2041 gpio_num, gpio_shift);
2042 /* clear FLOAT and set CLR */
2043 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2044 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2045 break;
a2fbb9ea 2046
c18487ee
YR
2047 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2048 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2049 gpio_num, gpio_shift);
2050 /* clear FLOAT and set SET */
2051 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2052 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2053 break;
a2fbb9ea 2054
17de50b7 2055 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2056 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2057 gpio_num, gpio_shift);
2058 /* set FLOAT */
2059 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2060 break;
a2fbb9ea 2061
c18487ee
YR
2062 default:
2063 break;
a2fbb9ea
ET
2064 }
2065
c18487ee 2066 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2067 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2068
c18487ee 2069 return 0;
a2fbb9ea
ET
2070}
2071
4acac6a5
EG
2072int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2073{
2074 /* The GPIO should be swapped if swap register is set and active */
2075 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2076 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2077 int gpio_shift = gpio_num +
2078 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2079 u32 gpio_mask = (1 << gpio_shift);
2080 u32 gpio_reg;
2081
2082 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2083 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2084 return -EINVAL;
2085 }
2086
2087 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2088 /* read GPIO int */
2089 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2090
2091 switch (mode) {
2092 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2093 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2094 "output low\n", gpio_num, gpio_shift);
2095 /* clear SET and set CLR */
2096 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2097 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2098 break;
2099
2100 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2101 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2102 "output high\n", gpio_num, gpio_shift);
2103 /* clear CLR and set SET */
2104 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2105 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2106 break;
2107
2108 default:
2109 break;
2110 }
2111
2112 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2113 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2114
2115 return 0;
2116}
2117
c18487ee 2118static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2119{
c18487ee
YR
2120 u32 spio_mask = (1 << spio_num);
2121 u32 spio_reg;
a2fbb9ea 2122
c18487ee
YR
2123 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2124 (spio_num > MISC_REGISTERS_SPIO_7)) {
2125 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2126 return -EINVAL;
a2fbb9ea
ET
2127 }
2128
4a37fb66 2129 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2130 /* read SPIO and mask except the float bits */
2131 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2132
c18487ee 2133 switch (mode) {
6378c025 2134 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2135 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2136 /* clear FLOAT and set CLR */
2137 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2138 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2139 break;
a2fbb9ea 2140
6378c025 2141 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2142 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2143 /* clear FLOAT and set SET */
2144 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2145 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2146 break;
a2fbb9ea 2147
c18487ee
YR
2148 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2149 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2150 /* set FLOAT */
2151 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2152 break;
a2fbb9ea 2153
c18487ee
YR
2154 default:
2155 break;
a2fbb9ea
ET
2156 }
2157
c18487ee 2158 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2159 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2160
a2fbb9ea
ET
2161 return 0;
2162}
2163
c18487ee 2164static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2165{
ad33ea3a
EG
2166 switch (bp->link_vars.ieee_fc &
2167 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2168 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2169 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2170 ADVERTISED_Pause);
2171 break;
356e2385 2172
c18487ee 2173 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2174 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2175 ADVERTISED_Pause);
2176 break;
356e2385 2177
c18487ee 2178 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2179 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2180 break;
356e2385 2181
c18487ee 2182 default:
34f80b04 2183 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2184 ADVERTISED_Pause);
2185 break;
2186 }
2187}
f1410647 2188
c18487ee
YR
2189static void bnx2x_link_report(struct bnx2x *bp)
2190{
f34d28ea 2191 if (bp->flags & MF_FUNC_DIS) {
2691d51d 2192 netif_carrier_off(bp->dev);
7995c64e 2193 netdev_err(bp->dev, "NIC Link is Down\n");
2691d51d
EG
2194 return;
2195 }
2196
c18487ee 2197 if (bp->link_vars.link_up) {
35c5f8fe
EG
2198 u16 line_speed;
2199
c18487ee
YR
2200 if (bp->state == BNX2X_STATE_OPEN)
2201 netif_carrier_on(bp->dev);
7995c64e 2202 netdev_info(bp->dev, "NIC Link is Up, ");
f1410647 2203
35c5f8fe
EG
2204 line_speed = bp->link_vars.line_speed;
2205 if (IS_E1HMF(bp)) {
2206 u16 vn_max_rate;
2207
2208 vn_max_rate =
2209 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2210 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2211 if (vn_max_rate < line_speed)
2212 line_speed = vn_max_rate;
2213 }
7995c64e 2214 pr_cont("%d Mbps ", line_speed);
f1410647 2215
c18487ee 2216 if (bp->link_vars.duplex == DUPLEX_FULL)
7995c64e 2217 pr_cont("full duplex");
c18487ee 2218 else
7995c64e 2219 pr_cont("half duplex");
f1410647 2220
c0700f90
DM
2221 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2222 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
7995c64e 2223 pr_cont(", receive ");
356e2385
EG
2224 if (bp->link_vars.flow_ctrl &
2225 BNX2X_FLOW_CTRL_TX)
7995c64e 2226 pr_cont("& transmit ");
c18487ee 2227 } else {
7995c64e 2228 pr_cont(", transmit ");
c18487ee 2229 }
7995c64e 2230 pr_cont("flow control ON");
c18487ee 2231 }
7995c64e 2232 pr_cont("\n");
f1410647 2233
c18487ee
YR
2234 } else { /* link_down */
2235 netif_carrier_off(bp->dev);
7995c64e 2236 netdev_err(bp->dev, "NIC Link is Down\n");
f1410647 2237 }
c18487ee
YR
2238}
2239
b5bf9068 2240static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2241{
19680c48
EG
2242 if (!BP_NOMCP(bp)) {
2243 u8 rc;
a2fbb9ea 2244
19680c48 2245 /* Initialize link parameters structure variables */
8c99e7b0
YR
2246 /* It is recommended to turn off RX FC for jumbo frames
2247 for better performance */
0c593270 2248 if (bp->dev->mtu > 5000)
c0700f90 2249 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2250 else
c0700f90 2251 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2252
4a37fb66 2253 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2254
2255 if (load_mode == LOAD_DIAG)
2256 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2257
19680c48 2258 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2259
4a37fb66 2260 bnx2x_release_phy_lock(bp);
a2fbb9ea 2261
3c96c68b
EG
2262 bnx2x_calc_fc_adv(bp);
2263
b5bf9068
EG
2264 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2265 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2266 bnx2x_link_report(bp);
b5bf9068 2267 }
34f80b04 2268
19680c48
EG
2269 return rc;
2270 }
f5372251 2271 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2272 return -EINVAL;
a2fbb9ea
ET
2273}
2274
c18487ee 2275static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2276{
19680c48 2277 if (!BP_NOMCP(bp)) {
4a37fb66 2278 bnx2x_acquire_phy_lock(bp);
19680c48 2279 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2280 bnx2x_release_phy_lock(bp);
a2fbb9ea 2281
19680c48
EG
2282 bnx2x_calc_fc_adv(bp);
2283 } else
f5372251 2284 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2285}
a2fbb9ea 2286
c18487ee
YR
2287static void bnx2x__link_reset(struct bnx2x *bp)
2288{
19680c48 2289 if (!BP_NOMCP(bp)) {
4a37fb66 2290 bnx2x_acquire_phy_lock(bp);
589abe3a 2291 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2292 bnx2x_release_phy_lock(bp);
19680c48 2293 } else
f5372251 2294 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2295}
a2fbb9ea 2296
c18487ee
YR
2297static u8 bnx2x_link_test(struct bnx2x *bp)
2298{
2145a920 2299 u8 rc = 0;
a2fbb9ea 2300
2145a920
VZ
2301 if (!BP_NOMCP(bp)) {
2302 bnx2x_acquire_phy_lock(bp);
2303 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2304 bnx2x_release_phy_lock(bp);
2305 } else
2306 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 2307
c18487ee
YR
2308 return rc;
2309}
a2fbb9ea 2310
8a1c38d1 2311static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2312{
8a1c38d1
EG
2313 u32 r_param = bp->link_vars.line_speed / 8;
2314 u32 fair_periodic_timeout_usec;
2315 u32 t_fair;
34f80b04 2316
8a1c38d1
EG
2317 memset(&(bp->cmng.rs_vars), 0,
2318 sizeof(struct rate_shaping_vars_per_port));
2319 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2320
8a1c38d1
EG
2321 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2322 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2323
8a1c38d1
EG
2324 /* this is the threshold below which no timer arming will occur
2325 1.25 coefficient is for the threshold to be a little bigger
2326 than the real time, to compensate for timer in-accuracy */
2327 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2328 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2329
8a1c38d1
EG
2330 /* resolution of fairness timer */
2331 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2332 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2333 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2334
8a1c38d1
EG
2335 /* this is the threshold below which we won't arm the timer anymore */
2336 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2337
8a1c38d1
EG
2338 /* we multiply by 1e3/8 to get bytes/msec.
2339 We don't want the credits to pass a credit
2340 of the t_fair*FAIR_MEM (algorithm resolution) */
2341 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2342 /* since each tick is 4 usec */
2343 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2344}
2345
2691d51d
EG
2346/* Calculates the sum of vn_min_rates.
2347 It's needed for further normalizing of the min_rates.
2348 Returns:
2349 sum of vn_min_rates.
2350 or
2351 0 - if all the min_rates are 0.
2352 In the later case fainess algorithm should be deactivated.
2353 If not all min_rates are zero then those that are zeroes will be set to 1.
2354 */
2355static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2356{
2357 int all_zero = 1;
2358 int port = BP_PORT(bp);
2359 int vn;
2360
2361 bp->vn_weight_sum = 0;
2362 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2363 int func = 2*vn + port;
2364 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2365 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2366 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2367
2368 /* Skip hidden vns */
2369 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2370 continue;
2371
2372 /* If min rate is zero - set it to 1 */
2373 if (!vn_min_rate)
2374 vn_min_rate = DEF_MIN_RATE;
2375 else
2376 all_zero = 0;
2377
2378 bp->vn_weight_sum += vn_min_rate;
2379 }
2380
2381 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2382 if (all_zero) {
2383 bp->cmng.flags.cmng_enables &=
2384 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2385 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2386 " fairness will be disabled\n");
2387 } else
2388 bp->cmng.flags.cmng_enables |=
2389 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2390}
2391
8a1c38d1 2392static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2393{
2394 struct rate_shaping_vars_per_vn m_rs_vn;
2395 struct fairness_vars_per_vn m_fair_vn;
2396 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2397 u16 vn_min_rate, vn_max_rate;
2398 int i;
2399
2400 /* If function is hidden - set min and max to zeroes */
2401 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2402 vn_min_rate = 0;
2403 vn_max_rate = 0;
2404
2405 } else {
2406 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2407 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2408 /* If min rate is zero - set it to 1 */
2409 if (!vn_min_rate)
34f80b04
EG
2410 vn_min_rate = DEF_MIN_RATE;
2411 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2412 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2413 }
8a1c38d1 2414 DP(NETIF_MSG_IFUP,
b015e3d1 2415 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2416 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2417
2418 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2419 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2420
2421 /* global vn counter - maximal Mbps for this vn */
2422 m_rs_vn.vn_counter.rate = vn_max_rate;
2423
2424 /* quota - number of bytes transmitted in this period */
2425 m_rs_vn.vn_counter.quota =
2426 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2427
8a1c38d1 2428 if (bp->vn_weight_sum) {
34f80b04
EG
2429 /* credit for each period of the fairness algorithm:
2430 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2431 vn_weight_sum should not be larger than 10000, thus
2432 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2433 than zero */
34f80b04 2434 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2435 max((u32)(vn_min_rate * (T_FAIR_COEF /
2436 (8 * bp->vn_weight_sum))),
2437 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2438 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2439 m_fair_vn.vn_credit_delta);
2440 }
2441
34f80b04
EG
2442 /* Store it to internal memory */
2443 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2444 REG_WR(bp, BAR_XSTRORM_INTMEM +
2445 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2446 ((u32 *)(&m_rs_vn))[i]);
2447
2448 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2449 REG_WR(bp, BAR_XSTRORM_INTMEM +
2450 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2451 ((u32 *)(&m_fair_vn))[i]);
2452}
2453
8a1c38d1 2454
c18487ee
YR
2455/* This function is called upon link interrupt */
2456static void bnx2x_link_attn(struct bnx2x *bp)
2457{
bb2a0f7a
YG
2458 /* Make sure that we are synced with the current statistics */
2459 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2460
c18487ee 2461 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2462
bb2a0f7a
YG
2463 if (bp->link_vars.link_up) {
2464
1c06328c 2465 /* dropless flow control */
a18f5128 2466 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2467 int port = BP_PORT(bp);
2468 u32 pause_enabled = 0;
2469
2470 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2471 pause_enabled = 1;
2472
2473 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2474 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2475 pause_enabled);
2476 }
2477
bb2a0f7a
YG
2478 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2479 struct host_port_stats *pstats;
2480
2481 pstats = bnx2x_sp(bp, port_stats);
2482 /* reset old bmac stats */
2483 memset(&(pstats->mac_stx[0]), 0,
2484 sizeof(struct mac_stx));
2485 }
f34d28ea 2486 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2487 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2488 }
2489
c18487ee
YR
2490 /* indicate link status */
2491 bnx2x_link_report(bp);
34f80b04
EG
2492
2493 if (IS_E1HMF(bp)) {
8a1c38d1 2494 int port = BP_PORT(bp);
34f80b04 2495 int func;
8a1c38d1 2496 int vn;
34f80b04 2497
ab6ad5a4 2498 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2499 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2500 if (vn == BP_E1HVN(bp))
2501 continue;
2502
8a1c38d1 2503 func = ((vn << 1) | port);
34f80b04
EG
2504 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2505 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2506 }
34f80b04 2507
8a1c38d1
EG
2508 if (bp->link_vars.link_up) {
2509 int i;
2510
2511 /* Init rate shaping and fairness contexts */
2512 bnx2x_init_port_minmax(bp);
34f80b04 2513
34f80b04 2514 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2515 bnx2x_init_vn_minmax(bp, 2*vn + port);
2516
2517 /* Store it to internal memory */
2518 for (i = 0;
2519 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2520 REG_WR(bp, BAR_XSTRORM_INTMEM +
2521 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2522 ((u32 *)(&bp->cmng))[i]);
2523 }
34f80b04 2524 }
c18487ee 2525}
a2fbb9ea 2526
c18487ee
YR
2527static void bnx2x__link_status_update(struct bnx2x *bp)
2528{
f34d28ea 2529 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2530 return;
a2fbb9ea 2531
c18487ee 2532 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2533
bb2a0f7a
YG
2534 if (bp->link_vars.link_up)
2535 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2536 else
2537 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2538
2691d51d
EG
2539 bnx2x_calc_vn_weight_sum(bp);
2540
c18487ee
YR
2541 /* indicate link status */
2542 bnx2x_link_report(bp);
a2fbb9ea 2543}
a2fbb9ea 2544
34f80b04
EG
2545static void bnx2x_pmf_update(struct bnx2x *bp)
2546{
2547 int port = BP_PORT(bp);
2548 u32 val;
2549
2550 bp->port.pmf = 1;
2551 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2552
2553 /* enable nig attention */
2554 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2555 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2556 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2557
2558 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2559}
2560
c18487ee 2561/* end of Link */
a2fbb9ea
ET
2562
2563/* slow path */
2564
2565/*
2566 * General service functions
2567 */
2568
2691d51d
EG
2569/* send the MCP a request, block until there is a reply */
2570u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2571{
2572 int func = BP_FUNC(bp);
2573 u32 seq = ++bp->fw_seq;
2574 u32 rc = 0;
2575 u32 cnt = 1;
2576 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2577
c4ff7cbf 2578 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2579 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2580 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2581
2582 do {
2583 /* let the FW do it's magic ... */
2584 msleep(delay);
2585
2586 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2587
c4ff7cbf
EG
2588 /* Give the FW up to 5 second (500*10ms) */
2589 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2590
2591 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2592 cnt*delay, rc, seq);
2593
2594 /* is this a reply to our command? */
2595 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2596 rc &= FW_MSG_CODE_MASK;
2597 else {
2598 /* FW BUG! */
2599 BNX2X_ERR("FW failed to respond!\n");
2600 bnx2x_fw_dump(bp);
2601 rc = 0;
2602 }
c4ff7cbf 2603 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2604
2605 return rc;
2606}
2607
2608static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2609static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2610static void bnx2x_set_rx_mode(struct net_device *dev);
2611
2612static void bnx2x_e1h_disable(struct bnx2x *bp)
2613{
2614 int port = BP_PORT(bp);
2691d51d
EG
2615
2616 netif_tx_disable(bp->dev);
2691d51d
EG
2617
2618 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2619
2691d51d
EG
2620 netif_carrier_off(bp->dev);
2621}
2622
2623static void bnx2x_e1h_enable(struct bnx2x *bp)
2624{
2625 int port = BP_PORT(bp);
2626
2627 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2628
2691d51d
EG
2629 /* Tx queue should be only reenabled */
2630 netif_tx_wake_all_queues(bp->dev);
2631
061bc702
EG
2632 /*
2633 * Should not call netif_carrier_on since it will be called if the link
2634 * is up when checking for link state
2635 */
2691d51d
EG
2636}
2637
2638static void bnx2x_update_min_max(struct bnx2x *bp)
2639{
2640 int port = BP_PORT(bp);
2641 int vn, i;
2642
2643 /* Init rate shaping and fairness contexts */
2644 bnx2x_init_port_minmax(bp);
2645
2646 bnx2x_calc_vn_weight_sum(bp);
2647
2648 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2649 bnx2x_init_vn_minmax(bp, 2*vn + port);
2650
2651 if (bp->port.pmf) {
2652 int func;
2653
2654 /* Set the attention towards other drivers on the same port */
2655 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2656 if (vn == BP_E1HVN(bp))
2657 continue;
2658
2659 func = ((vn << 1) | port);
2660 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2661 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2662 }
2663
2664 /* Store it to internal memory */
2665 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2666 REG_WR(bp, BAR_XSTRORM_INTMEM +
2667 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2668 ((u32 *)(&bp->cmng))[i]);
2669 }
2670}
2671
2672static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2673{
2691d51d 2674 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2675
2676 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2677
f34d28ea
EG
2678 /*
2679 * This is the only place besides the function initialization
2680 * where the bp->flags can change so it is done without any
2681 * locks
2682 */
2691d51d
EG
2683 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2684 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2685 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2686
2687 bnx2x_e1h_disable(bp);
2688 } else {
2689 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2690 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2691
2692 bnx2x_e1h_enable(bp);
2693 }
2694 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2695 }
2696 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2697
2698 bnx2x_update_min_max(bp);
2699 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2700 }
2701
2702 /* Report results to MCP */
2703 if (dcc_event)
2704 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2705 else
2706 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2707}
2708
28912902
MC
2709/* must be called under the spq lock */
2710static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2711{
2712 struct eth_spe *next_spe = bp->spq_prod_bd;
2713
2714 if (bp->spq_prod_bd == bp->spq_last_bd) {
2715 bp->spq_prod_bd = bp->spq;
2716 bp->spq_prod_idx = 0;
2717 DP(NETIF_MSG_TIMER, "end of spq\n");
2718 } else {
2719 bp->spq_prod_bd++;
2720 bp->spq_prod_idx++;
2721 }
2722 return next_spe;
2723}
2724
2725/* must be called under the spq lock */
2726static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2727{
2728 int func = BP_FUNC(bp);
2729
2730 /* Make sure that BD data is updated before writing the producer */
2731 wmb();
2732
2733 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2734 bp->spq_prod_idx);
2735 mmiowb();
2736}
2737
a2fbb9ea
ET
2738/* the slow path queue is odd since completions arrive on the fastpath ring */
2739static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2740 u32 data_hi, u32 data_lo, int common)
2741{
28912902 2742 struct eth_spe *spe;
a2fbb9ea 2743
34f80b04
EG
2744 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2745 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2746 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2747 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2748 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2749
2750#ifdef BNX2X_STOP_ON_ERROR
2751 if (unlikely(bp->panic))
2752 return -EIO;
2753#endif
2754
34f80b04 2755 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2756
2757 if (!bp->spq_left) {
2758 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2759 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2760 bnx2x_panic();
2761 return -EBUSY;
2762 }
f1410647 2763
28912902
MC
2764 spe = bnx2x_sp_get_next(bp);
2765
a2fbb9ea 2766 /* CID needs port number to be encoded int it */
28912902 2767 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2768 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2769 HW_CID(bp, cid)));
28912902 2770 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2771 if (common)
28912902 2772 spe->hdr.type |=
a2fbb9ea
ET
2773 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2774
28912902
MC
2775 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2776 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2777
2778 bp->spq_left--;
2779
28912902 2780 bnx2x_sp_prod_update(bp);
34f80b04 2781 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2782 return 0;
2783}
2784
2785/* acquire split MCP access lock register */
4a37fb66 2786static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2787{
72fd0718 2788 u32 j, val;
34f80b04 2789 int rc = 0;
a2fbb9ea
ET
2790
2791 might_sleep();
72fd0718 2792 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2793 val = (1UL << 31);
2794 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2795 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2796 if (val & (1L << 31))
2797 break;
2798
2799 msleep(5);
2800 }
a2fbb9ea 2801 if (!(val & (1L << 31))) {
19680c48 2802 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2803 rc = -EBUSY;
2804 }
2805
2806 return rc;
2807}
2808
4a37fb66
YG
2809/* release split MCP access lock register */
2810static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2811{
72fd0718 2812 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2813}
2814
2815static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2816{
2817 struct host_def_status_block *def_sb = bp->def_status_blk;
2818 u16 rc = 0;
2819
2820 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2821 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2822 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2823 rc |= 1;
2824 }
2825 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2826 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2827 rc |= 2;
2828 }
2829 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2830 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2831 rc |= 4;
2832 }
2833 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2834 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2835 rc |= 8;
2836 }
2837 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2838 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2839 rc |= 16;
2840 }
2841 return rc;
2842}
2843
2844/*
2845 * slow path service functions
2846 */
2847
2848static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2849{
34f80b04 2850 int port = BP_PORT(bp);
5c862848
EG
2851 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2852 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2853 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2854 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2855 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2856 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2857 u32 aeu_mask;
87942b46 2858 u32 nig_mask = 0;
a2fbb9ea 2859
a2fbb9ea
ET
2860 if (bp->attn_state & asserted)
2861 BNX2X_ERR("IGU ERROR\n");
2862
3fcaf2e5
EG
2863 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2864 aeu_mask = REG_RD(bp, aeu_addr);
2865
a2fbb9ea 2866 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2867 aeu_mask, asserted);
72fd0718 2868 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2869 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2870
3fcaf2e5
EG
2871 REG_WR(bp, aeu_addr, aeu_mask);
2872 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2873
3fcaf2e5 2874 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2875 bp->attn_state |= asserted;
3fcaf2e5 2876 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2877
2878 if (asserted & ATTN_HARD_WIRED_MASK) {
2879 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2880
a5e9a7cf
EG
2881 bnx2x_acquire_phy_lock(bp);
2882
877e9aa4 2883 /* save nig interrupt mask */
87942b46 2884 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2885 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2886
c18487ee 2887 bnx2x_link_attn(bp);
a2fbb9ea
ET
2888
2889 /* handle unicore attn? */
2890 }
2891 if (asserted & ATTN_SW_TIMER_4_FUNC)
2892 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2893
2894 if (asserted & GPIO_2_FUNC)
2895 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2896
2897 if (asserted & GPIO_3_FUNC)
2898 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2899
2900 if (asserted & GPIO_4_FUNC)
2901 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2902
2903 if (port == 0) {
2904 if (asserted & ATTN_GENERAL_ATTN_1) {
2905 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2906 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2907 }
2908 if (asserted & ATTN_GENERAL_ATTN_2) {
2909 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2910 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2911 }
2912 if (asserted & ATTN_GENERAL_ATTN_3) {
2913 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2914 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2915 }
2916 } else {
2917 if (asserted & ATTN_GENERAL_ATTN_4) {
2918 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2919 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2920 }
2921 if (asserted & ATTN_GENERAL_ATTN_5) {
2922 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2923 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2924 }
2925 if (asserted & ATTN_GENERAL_ATTN_6) {
2926 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2927 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2928 }
2929 }
2930
2931 } /* if hardwired */
2932
5c862848
EG
2933 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2934 asserted, hc_addr);
2935 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2936
2937 /* now set back the mask */
a5e9a7cf 2938 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2939 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2940 bnx2x_release_phy_lock(bp);
2941 }
a2fbb9ea
ET
2942}
2943
fd4ef40d
EG
2944static inline void bnx2x_fan_failure(struct bnx2x *bp)
2945{
2946 int port = BP_PORT(bp);
2947
2948 /* mark the failure */
2949 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2950 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2951 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2952 bp->link_params.ext_phy_config);
2953
2954 /* log the failure */
7995c64e
JP
2955 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2956 "Please contact Dell Support for assistance.\n");
fd4ef40d 2957}
ab6ad5a4 2958
877e9aa4 2959static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2960{
34f80b04 2961 int port = BP_PORT(bp);
877e9aa4 2962 int reg_offset;
4d295db0 2963 u32 val, swap_val, swap_override;
877e9aa4 2964
34f80b04
EG
2965 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2966 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2967
34f80b04 2968 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2969
2970 val = REG_RD(bp, reg_offset);
2971 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2972 REG_WR(bp, reg_offset, val);
2973
2974 BNX2X_ERR("SPIO5 hw attention\n");
2975
fd4ef40d 2976 /* Fan failure attention */
35b19ba5
EG
2977 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2978 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2979 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2980 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2981 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2982 /* The PHY reset is controlled by GPIO 1 */
2983 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2984 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2985 break;
2986
4d295db0
EG
2987 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2988 /* The PHY reset is controlled by GPIO 1 */
2989 /* fake the port number to cancel the swap done in
2990 set_gpio() */
2991 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2992 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2993 port = (swap_val && swap_override) ^ 1;
2994 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2995 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2996 break;
2997
877e9aa4
ET
2998 default:
2999 break;
3000 }
fd4ef40d 3001 bnx2x_fan_failure(bp);
877e9aa4 3002 }
34f80b04 3003
589abe3a
EG
3004 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3005 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3006 bnx2x_acquire_phy_lock(bp);
3007 bnx2x_handle_module_detect_int(&bp->link_params);
3008 bnx2x_release_phy_lock(bp);
3009 }
3010
34f80b04
EG
3011 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3012
3013 val = REG_RD(bp, reg_offset);
3014 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3015 REG_WR(bp, reg_offset, val);
3016
3017 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3018 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3019 bnx2x_panic();
3020 }
877e9aa4
ET
3021}
3022
3023static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3024{
3025 u32 val;
3026
0626b899 3027 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3028
3029 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3030 BNX2X_ERR("DB hw attention 0x%x\n", val);
3031 /* DORQ discard attention */
3032 if (val & 0x2)
3033 BNX2X_ERR("FATAL error from DORQ\n");
3034 }
34f80b04
EG
3035
3036 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3037
3038 int port = BP_PORT(bp);
3039 int reg_offset;
3040
3041 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3042 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3043
3044 val = REG_RD(bp, reg_offset);
3045 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3046 REG_WR(bp, reg_offset, val);
3047
3048 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3049 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3050 bnx2x_panic();
3051 }
877e9aa4
ET
3052}
3053
3054static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3055{
3056 u32 val;
3057
3058 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3059
3060 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3061 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3062 /* CFC error attention */
3063 if (val & 0x2)
3064 BNX2X_ERR("FATAL error from CFC\n");
3065 }
3066
3067 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3068
3069 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3070 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3071 /* RQ_USDMDP_FIFO_OVERFLOW */
3072 if (val & 0x18000)
3073 BNX2X_ERR("FATAL error from PXP\n");
3074 }
34f80b04
EG
3075
3076 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3077
3078 int port = BP_PORT(bp);
3079 int reg_offset;
3080
3081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3082 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3083
3084 val = REG_RD(bp, reg_offset);
3085 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3086 REG_WR(bp, reg_offset, val);
3087
3088 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3089 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3090 bnx2x_panic();
3091 }
877e9aa4
ET
3092}
3093
3094static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3095{
34f80b04
EG
3096 u32 val;
3097
877e9aa4
ET
3098 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3099
34f80b04
EG
3100 if (attn & BNX2X_PMF_LINK_ASSERT) {
3101 int func = BP_FUNC(bp);
3102
3103 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3104 bp->mf_config = SHMEM_RD(bp,
3105 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3106 val = SHMEM_RD(bp, func_mb[func].drv_status);
3107 if (val & DRV_STATUS_DCC_EVENT_MASK)
3108 bnx2x_dcc_event(bp,
3109 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3110 bnx2x__link_status_update(bp);
2691d51d 3111 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3112 bnx2x_pmf_update(bp);
3113
3114 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3115
3116 BNX2X_ERR("MC assert!\n");
3117 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3118 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3119 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3120 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3121 bnx2x_panic();
3122
3123 } else if (attn & BNX2X_MCP_ASSERT) {
3124
3125 BNX2X_ERR("MCP assert!\n");
3126 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3127 bnx2x_fw_dump(bp);
877e9aa4
ET
3128
3129 } else
3130 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3131 }
3132
3133 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3134 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3135 if (attn & BNX2X_GRC_TIMEOUT) {
3136 val = CHIP_IS_E1H(bp) ?
3137 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3138 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3139 }
3140 if (attn & BNX2X_GRC_RSV) {
3141 val = CHIP_IS_E1H(bp) ?
3142 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3143 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3144 }
877e9aa4 3145 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3146 }
3147}
3148
72fd0718
VZ
3149static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3150static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3151
3152
3153#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3154#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3155#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3156#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3157#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3158#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3159/*
3160 * should be run under rtnl lock
3161 */
3162static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3163{
3164 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3165 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3166 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3167 barrier();
3168 mmiowb();
3169}
3170
3171/*
3172 * should be run under rtnl lock
3173 */
3174static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3175{
3176 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3177 val |= (1 << 16);
3178 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3179 barrier();
3180 mmiowb();
3181}
3182
3183/*
3184 * should be run under rtnl lock
3185 */
3186static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3187{
3188 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3189 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3190 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3191}
3192
3193/*
3194 * should be run under rtnl lock
3195 */
3196static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3197{
3198 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3199
3200 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3201
3202 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3203 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3204 barrier();
3205 mmiowb();
3206}
3207
3208/*
3209 * should be run under rtnl lock
3210 */
3211static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3212{
3213 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3214
3215 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3216
3217 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3218 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3219 barrier();
3220 mmiowb();
3221
3222 return val1;
3223}
3224
3225/*
3226 * should be run under rtnl lock
3227 */
3228static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3229{
3230 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3231}
3232
3233static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3234{
3235 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3236 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3237}
3238
3239static inline void _print_next_block(int idx, const char *blk)
3240{
3241 if (idx)
3242 pr_cont(", ");
3243 pr_cont("%s", blk);
3244}
3245
3246static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3247{
3248 int i = 0;
3249 u32 cur_bit = 0;
3250 for (i = 0; sig; i++) {
3251 cur_bit = ((u32)0x1 << i);
3252 if (sig & cur_bit) {
3253 switch (cur_bit) {
3254 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3255 _print_next_block(par_num++, "BRB");
3256 break;
3257 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3258 _print_next_block(par_num++, "PARSER");
3259 break;
3260 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3261 _print_next_block(par_num++, "TSDM");
3262 break;
3263 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3264 _print_next_block(par_num++, "SEARCHER");
3265 break;
3266 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3267 _print_next_block(par_num++, "TSEMI");
3268 break;
3269 }
3270
3271 /* Clear the bit */
3272 sig &= ~cur_bit;
3273 }
3274 }
3275
3276 return par_num;
3277}
3278
3279static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3280{
3281 int i = 0;
3282 u32 cur_bit = 0;
3283 for (i = 0; sig; i++) {
3284 cur_bit = ((u32)0x1 << i);
3285 if (sig & cur_bit) {
3286 switch (cur_bit) {
3287 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3288 _print_next_block(par_num++, "PBCLIENT");
3289 break;
3290 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3291 _print_next_block(par_num++, "QM");
3292 break;
3293 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3294 _print_next_block(par_num++, "XSDM");
3295 break;
3296 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3297 _print_next_block(par_num++, "XSEMI");
3298 break;
3299 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3300 _print_next_block(par_num++, "DOORBELLQ");
3301 break;
3302 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3303 _print_next_block(par_num++, "VAUX PCI CORE");
3304 break;
3305 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3306 _print_next_block(par_num++, "DEBUG");
3307 break;
3308 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3309 _print_next_block(par_num++, "USDM");
3310 break;
3311 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3312 _print_next_block(par_num++, "USEMI");
3313 break;
3314 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3315 _print_next_block(par_num++, "UPB");
3316 break;
3317 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3318 _print_next_block(par_num++, "CSDM");
3319 break;
3320 }
3321
3322 /* Clear the bit */
3323 sig &= ~cur_bit;
3324 }
3325 }
3326
3327 return par_num;
3328}
3329
3330static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3331{
3332 int i = 0;
3333 u32 cur_bit = 0;
3334 for (i = 0; sig; i++) {
3335 cur_bit = ((u32)0x1 << i);
3336 if (sig & cur_bit) {
3337 switch (cur_bit) {
3338 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3339 _print_next_block(par_num++, "CSEMI");
3340 break;
3341 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3342 _print_next_block(par_num++, "PXP");
3343 break;
3344 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3345 _print_next_block(par_num++,
3346 "PXPPCICLOCKCLIENT");
3347 break;
3348 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3349 _print_next_block(par_num++, "CFC");
3350 break;
3351 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3352 _print_next_block(par_num++, "CDU");
3353 break;
3354 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3355 _print_next_block(par_num++, "IGU");
3356 break;
3357 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3358 _print_next_block(par_num++, "MISC");
3359 break;
3360 }
3361
3362 /* Clear the bit */
3363 sig &= ~cur_bit;
3364 }
3365 }
3366
3367 return par_num;
3368}
3369
3370static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3371{
3372 int i = 0;
3373 u32 cur_bit = 0;
3374 for (i = 0; sig; i++) {
3375 cur_bit = ((u32)0x1 << i);
3376 if (sig & cur_bit) {
3377 switch (cur_bit) {
3378 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3379 _print_next_block(par_num++, "MCP ROM");
3380 break;
3381 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3382 _print_next_block(par_num++, "MCP UMP RX");
3383 break;
3384 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3385 _print_next_block(par_num++, "MCP UMP TX");
3386 break;
3387 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3388 _print_next_block(par_num++, "MCP SCPAD");
3389 break;
3390 }
3391
3392 /* Clear the bit */
3393 sig &= ~cur_bit;
3394 }
3395 }
3396
3397 return par_num;
3398}
3399
3400static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3401 u32 sig2, u32 sig3)
3402{
3403 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3404 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3405 int par_num = 0;
3406 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3407 "[0]:0x%08x [1]:0x%08x "
3408 "[2]:0x%08x [3]:0x%08x\n",
3409 sig0 & HW_PRTY_ASSERT_SET_0,
3410 sig1 & HW_PRTY_ASSERT_SET_1,
3411 sig2 & HW_PRTY_ASSERT_SET_2,
3412 sig3 & HW_PRTY_ASSERT_SET_3);
3413 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3414 bp->dev->name);
3415 par_num = bnx2x_print_blocks_with_parity0(
3416 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3417 par_num = bnx2x_print_blocks_with_parity1(
3418 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3419 par_num = bnx2x_print_blocks_with_parity2(
3420 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3421 par_num = bnx2x_print_blocks_with_parity3(
3422 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3423 printk("\n");
3424 return true;
3425 } else
3426 return false;
3427}
3428
3429static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3430{
a2fbb9ea 3431 struct attn_route attn;
72fd0718
VZ
3432 int port = BP_PORT(bp);
3433
3434 attn.sig[0] = REG_RD(bp,
3435 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3436 port*4);
3437 attn.sig[1] = REG_RD(bp,
3438 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3439 port*4);
3440 attn.sig[2] = REG_RD(bp,
3441 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3442 port*4);
3443 attn.sig[3] = REG_RD(bp,
3444 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3445 port*4);
3446
3447 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3448 attn.sig[3]);
3449}
3450
3451static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3452{
3453 struct attn_route attn, *group_mask;
34f80b04 3454 int port = BP_PORT(bp);
877e9aa4 3455 int index;
a2fbb9ea
ET
3456 u32 reg_addr;
3457 u32 val;
3fcaf2e5 3458 u32 aeu_mask;
a2fbb9ea
ET
3459
3460 /* need to take HW lock because MCP or other port might also
3461 try to handle this event */
4a37fb66 3462 bnx2x_acquire_alr(bp);
a2fbb9ea 3463
72fd0718
VZ
3464 if (bnx2x_chk_parity_attn(bp)) {
3465 bp->recovery_state = BNX2X_RECOVERY_INIT;
3466 bnx2x_set_reset_in_progress(bp);
3467 schedule_delayed_work(&bp->reset_task, 0);
3468 /* Disable HW interrupts */
3469 bnx2x_int_disable(bp);
3470 bnx2x_release_alr(bp);
3471 /* In case of parity errors don't handle attentions so that
3472 * other function would "see" parity errors.
3473 */
3474 return;
3475 }
3476
a2fbb9ea
ET
3477 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3478 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3479 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3480 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3481 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3482 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3483
3484 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3485 if (deasserted & (1 << index)) {
72fd0718 3486 group_mask = &bp->attn_group[index];
a2fbb9ea 3487
34f80b04 3488 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
3489 index, group_mask->sig[0], group_mask->sig[1],
3490 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 3491
877e9aa4 3492 bnx2x_attn_int_deasserted3(bp,
72fd0718 3493 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3494 bnx2x_attn_int_deasserted1(bp,
72fd0718 3495 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3496 bnx2x_attn_int_deasserted2(bp,
72fd0718 3497 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3498 bnx2x_attn_int_deasserted0(bp,
72fd0718 3499 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3500 }
3501 }
3502
4a37fb66 3503 bnx2x_release_alr(bp);
a2fbb9ea 3504
5c862848 3505 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3506
3507 val = ~deasserted;
3fcaf2e5
EG
3508 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3509 val, reg_addr);
5c862848 3510 REG_WR(bp, reg_addr, val);
a2fbb9ea 3511
a2fbb9ea 3512 if (~bp->attn_state & deasserted)
3fcaf2e5 3513 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3514
3515 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3516 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3517
3fcaf2e5
EG
3518 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3519 aeu_mask = REG_RD(bp, reg_addr);
3520
3521 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3522 aeu_mask, deasserted);
72fd0718 3523 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3524 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3525
3fcaf2e5
EG
3526 REG_WR(bp, reg_addr, aeu_mask);
3527 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3528
3529 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3530 bp->attn_state &= ~deasserted;
3531 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3532}
3533
3534static void bnx2x_attn_int(struct bnx2x *bp)
3535{
3536 /* read local copy of bits */
68d59484
EG
3537 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3538 attn_bits);
3539 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3540 attn_bits_ack);
a2fbb9ea
ET
3541 u32 attn_state = bp->attn_state;
3542
3543 /* look for changed bits */
3544 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3545 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3546
3547 DP(NETIF_MSG_HW,
3548 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3549 attn_bits, attn_ack, asserted, deasserted);
3550
3551 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3552 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3553
3554 /* handle bits that were raised */
3555 if (asserted)
3556 bnx2x_attn_int_asserted(bp, asserted);
3557
3558 if (deasserted)
3559 bnx2x_attn_int_deasserted(bp, deasserted);
3560}
3561
3562static void bnx2x_sp_task(struct work_struct *work)
3563{
1cf167f2 3564 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3565 u16 status;
3566
34f80b04 3567
a2fbb9ea
ET
3568 /* Return here if interrupt is disabled */
3569 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3570 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3571 return;
3572 }
3573
3574 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3575/* if (status == 0) */
3576/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3577
3196a88a 3578 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3579
877e9aa4
ET
3580 /* HW attentions */
3581 if (status & 0x1)
a2fbb9ea 3582 bnx2x_attn_int(bp);
a2fbb9ea 3583
68d59484 3584 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3585 IGU_INT_NOP, 1);
3586 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3587 IGU_INT_NOP, 1);
3588 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3589 IGU_INT_NOP, 1);
3590 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3591 IGU_INT_NOP, 1);
3592 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3593 IGU_INT_ENABLE, 1);
877e9aa4 3594
a2fbb9ea
ET
3595}
3596
3597static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3598{
3599 struct net_device *dev = dev_instance;
3600 struct bnx2x *bp = netdev_priv(dev);
3601
3602 /* Return here if interrupt is disabled */
3603 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3604 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3605 return IRQ_HANDLED;
3606 }
3607
8d9c5f34 3608 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3609
3610#ifdef BNX2X_STOP_ON_ERROR
3611 if (unlikely(bp->panic))
3612 return IRQ_HANDLED;
3613#endif
3614
993ac7b5
MC
3615#ifdef BCM_CNIC
3616 {
3617 struct cnic_ops *c_ops;
3618
3619 rcu_read_lock();
3620 c_ops = rcu_dereference(bp->cnic_ops);
3621 if (c_ops)
3622 c_ops->cnic_handler(bp->cnic_data, NULL);
3623 rcu_read_unlock();
3624 }
3625#endif
1cf167f2 3626 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3627
3628 return IRQ_HANDLED;
3629}
3630
3631/* end of slow path */
3632
3633/* Statistics */
3634
3635/****************************************************************************
3636* Macros
3637****************************************************************************/
3638
a2fbb9ea
ET
3639/* sum[hi:lo] += add[hi:lo] */
3640#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3641 do { \
3642 s_lo += a_lo; \
f5ba6772 3643 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3644 } while (0)
3645
3646/* difference = minuend - subtrahend */
3647#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3648 do { \
bb2a0f7a
YG
3649 if (m_lo < s_lo) { \
3650 /* underflow */ \
a2fbb9ea 3651 d_hi = m_hi - s_hi; \
bb2a0f7a 3652 if (d_hi > 0) { \
6378c025 3653 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3654 d_hi--; \
3655 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3656 } else { \
6378c025 3657 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3658 d_hi = 0; \
3659 d_lo = 0; \
3660 } \
bb2a0f7a
YG
3661 } else { \
3662 /* m_lo >= s_lo */ \
a2fbb9ea 3663 if (m_hi < s_hi) { \
bb2a0f7a
YG
3664 d_hi = 0; \
3665 d_lo = 0; \
3666 } else { \
6378c025 3667 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3668 d_hi = m_hi - s_hi; \
3669 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3670 } \
3671 } \
3672 } while (0)
3673
bb2a0f7a 3674#define UPDATE_STAT64(s, t) \
a2fbb9ea 3675 do { \
bb2a0f7a
YG
3676 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3677 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3678 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3679 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3680 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3681 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3682 } while (0)
3683
bb2a0f7a 3684#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3685 do { \
bb2a0f7a
YG
3686 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3687 diff.lo, new->s##_lo, old->s##_lo); \
3688 ADD_64(estats->t##_hi, diff.hi, \
3689 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3690 } while (0)
3691
3692/* sum[hi:lo] += add */
3693#define ADD_EXTEND_64(s_hi, s_lo, a) \
3694 do { \
3695 s_lo += a; \
3696 s_hi += (s_lo < a) ? 1 : 0; \
3697 } while (0)
3698
bb2a0f7a 3699#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3700 do { \
bb2a0f7a
YG
3701 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3702 pstats->mac_stx[1].s##_lo, \
3703 new->s); \
a2fbb9ea
ET
3704 } while (0)
3705
bb2a0f7a 3706#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3707 do { \
4781bfad
EG
3708 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3709 old_tclient->s = tclient->s; \
de832a55
EG
3710 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3711 } while (0)
3712
3713#define UPDATE_EXTEND_USTAT(s, t) \
3714 do { \
3715 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3716 old_uclient->s = uclient->s; \
3717 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3718 } while (0)
3719
3720#define UPDATE_EXTEND_XSTAT(s, t) \
3721 do { \
4781bfad
EG
3722 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3723 old_xclient->s = xclient->s; \
de832a55
EG
3724 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3725 } while (0)
3726
3727/* minuend -= subtrahend */
3728#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3729 do { \
3730 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3731 } while (0)
3732
3733/* minuend[hi:lo] -= subtrahend */
3734#define SUB_EXTEND_64(m_hi, m_lo, s) \
3735 do { \
3736 SUB_64(m_hi, 0, m_lo, s); \
3737 } while (0)
3738
3739#define SUB_EXTEND_USTAT(s, t) \
3740 do { \
3741 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3742 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3743 } while (0)
3744
3745/*
3746 * General service functions
3747 */
3748
3749static inline long bnx2x_hilo(u32 *hiref)
3750{
3751 u32 lo = *(hiref + 1);
3752#if (BITS_PER_LONG == 64)
3753 u32 hi = *hiref;
3754
3755 return HILO_U64(hi, lo);
3756#else
3757 return lo;
3758#endif
3759}
3760
3761/*
3762 * Init service functions
3763 */
3764
bb2a0f7a
YG
3765static void bnx2x_storm_stats_post(struct bnx2x *bp)
3766{
3767 if (!bp->stats_pending) {
3768 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3769 int i, rc;
bb2a0f7a
YG
3770
3771 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3772 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3773 for_each_queue(bp, i)
3774 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3775
3776 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3777 ((u32 *)&ramrod_data)[1],
3778 ((u32 *)&ramrod_data)[0], 0);
3779 if (rc == 0) {
3780 /* stats ramrod has it's own slot on the spq */
3781 bp->spq_left++;
3782 bp->stats_pending = 1;
3783 }
3784 }
3785}
3786
bb2a0f7a
YG
3787static void bnx2x_hw_stats_post(struct bnx2x *bp)
3788{
3789 struct dmae_command *dmae = &bp->stats_dmae;
3790 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3791
3792 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3793 if (CHIP_REV_IS_SLOW(bp))
3794 return;
bb2a0f7a
YG
3795
3796 /* loader */
3797 if (bp->executer_idx) {
3798 int loader_idx = PMF_DMAE_C(bp);
3799
3800 memset(dmae, 0, sizeof(struct dmae_command));
3801
3802 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3803 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3804 DMAE_CMD_DST_RESET |
3805#ifdef __BIG_ENDIAN
3806 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3807#else
3808 DMAE_CMD_ENDIANITY_DW_SWAP |
3809#endif
3810 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3811 DMAE_CMD_PORT_0) |
3812 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3813 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3814 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3815 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3816 sizeof(struct dmae_command) *
3817 (loader_idx + 1)) >> 2;
3818 dmae->dst_addr_hi = 0;
3819 dmae->len = sizeof(struct dmae_command) >> 2;
3820 if (CHIP_IS_E1(bp))
3821 dmae->len--;
3822 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3823 dmae->comp_addr_hi = 0;
3824 dmae->comp_val = 1;
3825
3826 *stats_comp = 0;
3827 bnx2x_post_dmae(bp, dmae, loader_idx);
3828
3829 } else if (bp->func_stx) {
3830 *stats_comp = 0;
3831 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3832 }
3833}
3834
3835static int bnx2x_stats_comp(struct bnx2x *bp)
3836{
3837 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3838 int cnt = 10;
3839
3840 might_sleep();
3841 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3842 if (!cnt) {
3843 BNX2X_ERR("timeout waiting for stats finished\n");
3844 break;
3845 }
3846 cnt--;
12469401 3847 msleep(1);
bb2a0f7a
YG
3848 }
3849 return 1;
3850}
3851
3852/*
3853 * Statistics service functions
3854 */
3855
3856static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3857{
3858 struct dmae_command *dmae;
3859 u32 opcode;
3860 int loader_idx = PMF_DMAE_C(bp);
3861 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3862
3863 /* sanity */
3864 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3865 BNX2X_ERR("BUG!\n");
3866 return;
3867 }
3868
3869 bp->executer_idx = 0;
3870
3871 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3872 DMAE_CMD_C_ENABLE |
3873 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3874#ifdef __BIG_ENDIAN
3875 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3876#else
3877 DMAE_CMD_ENDIANITY_DW_SWAP |
3878#endif
3879 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3880 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3881
3882 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3883 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3884 dmae->src_addr_lo = bp->port.port_stx >> 2;
3885 dmae->src_addr_hi = 0;
3886 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3887 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3888 dmae->len = DMAE_LEN32_RD_MAX;
3889 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3890 dmae->comp_addr_hi = 0;
3891 dmae->comp_val = 1;
3892
3893 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3894 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3895 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3896 dmae->src_addr_hi = 0;
7a9b2557
VZ
3897 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3898 DMAE_LEN32_RD_MAX * 4);
3899 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3900 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3901 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3902 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3903 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3904 dmae->comp_val = DMAE_COMP_VAL;
3905
3906 *stats_comp = 0;
3907 bnx2x_hw_stats_post(bp);
3908 bnx2x_stats_comp(bp);
3909}
3910
3911static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3912{
3913 struct dmae_command *dmae;
34f80b04 3914 int port = BP_PORT(bp);
bb2a0f7a 3915 int vn = BP_E1HVN(bp);
a2fbb9ea 3916 u32 opcode;
bb2a0f7a 3917 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3918 u32 mac_addr;
bb2a0f7a
YG
3919 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3920
3921 /* sanity */
3922 if (!bp->link_vars.link_up || !bp->port.pmf) {
3923 BNX2X_ERR("BUG!\n");
3924 return;
3925 }
a2fbb9ea
ET
3926
3927 bp->executer_idx = 0;
bb2a0f7a
YG
3928
3929 /* MCP */
3930 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3931 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3932 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3933#ifdef __BIG_ENDIAN
bb2a0f7a 3934 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3935#else
bb2a0f7a 3936 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3937#endif
bb2a0f7a
YG
3938 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3939 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3940
bb2a0f7a 3941 if (bp->port.port_stx) {
a2fbb9ea
ET
3942
3943 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3944 dmae->opcode = opcode;
bb2a0f7a
YG
3945 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3946 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3947 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3948 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3949 dmae->len = sizeof(struct host_port_stats) >> 2;
3950 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3951 dmae->comp_addr_hi = 0;
3952 dmae->comp_val = 1;
a2fbb9ea
ET
3953 }
3954
bb2a0f7a
YG
3955 if (bp->func_stx) {
3956
3957 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3958 dmae->opcode = opcode;
3959 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3960 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3961 dmae->dst_addr_lo = bp->func_stx >> 2;
3962 dmae->dst_addr_hi = 0;
3963 dmae->len = sizeof(struct host_func_stats) >> 2;
3964 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3965 dmae->comp_addr_hi = 0;
3966 dmae->comp_val = 1;
a2fbb9ea
ET
3967 }
3968
bb2a0f7a 3969 /* MAC */
a2fbb9ea
ET
3970 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3971 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3972 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3973#ifdef __BIG_ENDIAN
3974 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3975#else
3976 DMAE_CMD_ENDIANITY_DW_SWAP |
3977#endif
bb2a0f7a
YG
3978 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3979 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3980
c18487ee 3981 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3982
3983 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3984 NIG_REG_INGRESS_BMAC0_MEM);
3985
3986 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3987 BIGMAC_REGISTER_TX_STAT_GTBYT */
3988 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3989 dmae->opcode = opcode;
3990 dmae->src_addr_lo = (mac_addr +
3991 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3992 dmae->src_addr_hi = 0;
3993 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3994 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3995 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3996 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3997 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3998 dmae->comp_addr_hi = 0;
3999 dmae->comp_val = 1;
4000
4001 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4002 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4003 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4004 dmae->opcode = opcode;
4005 dmae->src_addr_lo = (mac_addr +
4006 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4007 dmae->src_addr_hi = 0;
4008 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4009 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 4010 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4011 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
4012 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4013 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4014 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4015 dmae->comp_addr_hi = 0;
4016 dmae->comp_val = 1;
4017
c18487ee 4018 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
4019
4020 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4021
4022 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4023 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4024 dmae->opcode = opcode;
4025 dmae->src_addr_lo = (mac_addr +
4026 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4027 dmae->src_addr_hi = 0;
4028 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4029 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4030 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4031 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4032 dmae->comp_addr_hi = 0;
4033 dmae->comp_val = 1;
4034
4035 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4036 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4037 dmae->opcode = opcode;
4038 dmae->src_addr_lo = (mac_addr +
4039 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4040 dmae->src_addr_hi = 0;
4041 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4042 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 4043 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4044 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
4045 dmae->len = 1;
4046 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4047 dmae->comp_addr_hi = 0;
4048 dmae->comp_val = 1;
4049
4050 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4051 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4052 dmae->opcode = opcode;
4053 dmae->src_addr_lo = (mac_addr +
4054 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4055 dmae->src_addr_hi = 0;
4056 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4057 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 4058 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4059 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
4060 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4061 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4062 dmae->comp_addr_hi = 0;
4063 dmae->comp_val = 1;
4064 }
4065
4066 /* NIG */
bb2a0f7a
YG
4067 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4068 dmae->opcode = opcode;
4069 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4070 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4071 dmae->src_addr_hi = 0;
4072 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4073 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4074 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4075 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4076 dmae->comp_addr_hi = 0;
4077 dmae->comp_val = 1;
4078
4079 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4080 dmae->opcode = opcode;
4081 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4082 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4083 dmae->src_addr_hi = 0;
4084 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4085 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4086 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4087 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4088 dmae->len = (2*sizeof(u32)) >> 2;
4089 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4090 dmae->comp_addr_hi = 0;
4091 dmae->comp_val = 1;
4092
a2fbb9ea
ET
4093 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4095 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4096 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4097#ifdef __BIG_ENDIAN
4098 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4099#else
4100 DMAE_CMD_ENDIANITY_DW_SWAP |
4101#endif
bb2a0f7a
YG
4102 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4103 (vn << DMAE_CMD_E1HVN_SHIFT));
4104 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4105 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 4106 dmae->src_addr_hi = 0;
bb2a0f7a
YG
4107 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4108 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4109 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4110 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4111 dmae->len = (2*sizeof(u32)) >> 2;
4112 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4113 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4114 dmae->comp_val = DMAE_COMP_VAL;
4115
4116 *stats_comp = 0;
a2fbb9ea
ET
4117}
4118
bb2a0f7a 4119static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 4120{
bb2a0f7a
YG
4121 struct dmae_command *dmae = &bp->stats_dmae;
4122 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4123
bb2a0f7a
YG
4124 /* sanity */
4125 if (!bp->func_stx) {
4126 BNX2X_ERR("BUG!\n");
4127 return;
4128 }
a2fbb9ea 4129
bb2a0f7a
YG
4130 bp->executer_idx = 0;
4131 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 4132
bb2a0f7a
YG
4133 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4134 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4135 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4136#ifdef __BIG_ENDIAN
4137 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4138#else
4139 DMAE_CMD_ENDIANITY_DW_SWAP |
4140#endif
4141 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4142 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4143 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4144 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4145 dmae->dst_addr_lo = bp->func_stx >> 2;
4146 dmae->dst_addr_hi = 0;
4147 dmae->len = sizeof(struct host_func_stats) >> 2;
4148 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4149 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4150 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4151
bb2a0f7a
YG
4152 *stats_comp = 0;
4153}
a2fbb9ea 4154
bb2a0f7a
YG
4155static void bnx2x_stats_start(struct bnx2x *bp)
4156{
4157 if (bp->port.pmf)
4158 bnx2x_port_stats_init(bp);
4159
4160 else if (bp->func_stx)
4161 bnx2x_func_stats_init(bp);
4162
4163 bnx2x_hw_stats_post(bp);
4164 bnx2x_storm_stats_post(bp);
4165}
4166
4167static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4168{
4169 bnx2x_stats_comp(bp);
4170 bnx2x_stats_pmf_update(bp);
4171 bnx2x_stats_start(bp);
4172}
4173
4174static void bnx2x_stats_restart(struct bnx2x *bp)
4175{
4176 bnx2x_stats_comp(bp);
4177 bnx2x_stats_start(bp);
4178}
4179
4180static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4181{
4182 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4183 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4184 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4185 struct {
4186 u32 lo;
4187 u32 hi;
4188 } diff;
bb2a0f7a
YG
4189
4190 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4191 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4192 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4193 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4194 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4195 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 4196 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 4197 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 4198 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
4199 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4200 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4201 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4202 UPDATE_STAT64(tx_stat_gt127,
4203 tx_stat_etherstatspkts65octetsto127octets);
4204 UPDATE_STAT64(tx_stat_gt255,
4205 tx_stat_etherstatspkts128octetsto255octets);
4206 UPDATE_STAT64(tx_stat_gt511,
4207 tx_stat_etherstatspkts256octetsto511octets);
4208 UPDATE_STAT64(tx_stat_gt1023,
4209 tx_stat_etherstatspkts512octetsto1023octets);
4210 UPDATE_STAT64(tx_stat_gt1518,
4211 tx_stat_etherstatspkts1024octetsto1522octets);
4212 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4213 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4214 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4215 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4216 UPDATE_STAT64(tx_stat_gterr,
4217 tx_stat_dot3statsinternalmactransmiterrors);
4218 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
4219
4220 estats->pause_frames_received_hi =
4221 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4222 estats->pause_frames_received_lo =
4223 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4224
4225 estats->pause_frames_sent_hi =
4226 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4227 estats->pause_frames_sent_lo =
4228 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
4229}
4230
4231static void bnx2x_emac_stats_update(struct bnx2x *bp)
4232{
4233 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4234 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4235 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
4236
4237 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4238 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4239 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4240 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4241 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4242 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4243 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4244 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4245 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4246 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4247 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4248 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4249 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4250 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4251 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4252 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4253 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4254 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4255 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4256 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4257 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4258 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4259 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4260 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4261 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4262 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4263 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4264 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4265 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4266 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4267 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
4268
4269 estats->pause_frames_received_hi =
4270 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4271 estats->pause_frames_received_lo =
4272 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4273 ADD_64(estats->pause_frames_received_hi,
4274 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4275 estats->pause_frames_received_lo,
4276 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4277
4278 estats->pause_frames_sent_hi =
4279 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4280 estats->pause_frames_sent_lo =
4281 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4282 ADD_64(estats->pause_frames_sent_hi,
4283 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4284 estats->pause_frames_sent_lo,
4285 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
4286}
4287
4288static int bnx2x_hw_stats_update(struct bnx2x *bp)
4289{
4290 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4291 struct nig_stats *old = &(bp->port.old_nig_stats);
4292 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4293 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4294 struct {
4295 u32 lo;
4296 u32 hi;
4297 } diff;
bb2a0f7a
YG
4298
4299 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4300 bnx2x_bmac_stats_update(bp);
4301
4302 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4303 bnx2x_emac_stats_update(bp);
4304
4305 else { /* unreached */
c3eefaf6 4306 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
4307 return -1;
4308 }
a2fbb9ea 4309
bb2a0f7a
YG
4310 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4311 new->brb_discard - old->brb_discard);
66e855f3
YG
4312 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4313 new->brb_truncate - old->brb_truncate);
a2fbb9ea 4314
bb2a0f7a
YG
4315 UPDATE_STAT64_NIG(egress_mac_pkt0,
4316 etherstatspkts1024octetsto1522octets);
4317 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 4318
bb2a0f7a 4319 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 4320
bb2a0f7a
YG
4321 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4322 sizeof(struct mac_stx));
4323 estats->brb_drop_hi = pstats->brb_drop_hi;
4324 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 4325
bb2a0f7a 4326 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 4327
2145a920
VZ
4328 if (!BP_NOMCP(bp)) {
4329 u32 nig_timer_max =
4330 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4331 if (nig_timer_max != estats->nig_timer_max) {
4332 estats->nig_timer_max = nig_timer_max;
4333 BNX2X_ERR("NIG timer max (%u)\n",
4334 estats->nig_timer_max);
4335 }
de832a55
EG
4336 }
4337
bb2a0f7a 4338 return 0;
a2fbb9ea
ET
4339}
4340
bb2a0f7a 4341static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
4342{
4343 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4344 struct tstorm_per_port_stats *tport =
de832a55 4345 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4346 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4347 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4348 int i;
4349
6fe49bb9
EG
4350 memcpy(&(fstats->total_bytes_received_hi),
4351 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4352 sizeof(struct host_func_stats) - 2*sizeof(u32));
4353 estats->error_bytes_received_hi = 0;
4354 estats->error_bytes_received_lo = 0;
4355 estats->etherstatsoverrsizepkts_hi = 0;
4356 estats->etherstatsoverrsizepkts_lo = 0;
4357 estats->no_buff_discard_hi = 0;
4358 estats->no_buff_discard_lo = 0;
a2fbb9ea 4359
54b9ddaa 4360 for_each_queue(bp, i) {
de832a55
EG
4361 struct bnx2x_fastpath *fp = &bp->fp[i];
4362 int cl_id = fp->cl_id;
4363 struct tstorm_per_client_stats *tclient =
4364 &stats->tstorm_common.client_statistics[cl_id];
4365 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4366 struct ustorm_per_client_stats *uclient =
4367 &stats->ustorm_common.client_statistics[cl_id];
4368 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4369 struct xstorm_per_client_stats *xclient =
4370 &stats->xstorm_common.client_statistics[cl_id];
4371 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4372 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4373 u32 diff;
4374
4375 /* are storm stats valid? */
4376 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4377 bp->stats_counter) {
de832a55
EG
4378 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4379 " xstorm counter (%d) != stats_counter (%d)\n",
4380 i, xclient->stats_counter, bp->stats_counter);
4381 return -1;
4382 }
4383 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4384 bp->stats_counter) {
de832a55
EG
4385 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4386 " tstorm counter (%d) != stats_counter (%d)\n",
4387 i, tclient->stats_counter, bp->stats_counter);
4388 return -2;
4389 }
4390 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4391 bp->stats_counter) {
4392 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4393 " ustorm counter (%d) != stats_counter (%d)\n",
4394 i, uclient->stats_counter, bp->stats_counter);
4395 return -4;
4396 }
a2fbb9ea 4397
de832a55 4398 qstats->total_bytes_received_hi =
ca00392c 4399 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4400 qstats->total_bytes_received_lo =
ca00392c
EG
4401 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4402
4403 ADD_64(qstats->total_bytes_received_hi,
4404 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4405 qstats->total_bytes_received_lo,
4406 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4407
4408 ADD_64(qstats->total_bytes_received_hi,
4409 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4410 qstats->total_bytes_received_lo,
4411 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4412
4413 qstats->valid_bytes_received_hi =
4414 qstats->total_bytes_received_hi;
de832a55 4415 qstats->valid_bytes_received_lo =
ca00392c 4416 qstats->total_bytes_received_lo;
bb2a0f7a 4417
de832a55 4418 qstats->error_bytes_received_hi =
bb2a0f7a 4419 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4420 qstats->error_bytes_received_lo =
bb2a0f7a 4421 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4422
de832a55
EG
4423 ADD_64(qstats->total_bytes_received_hi,
4424 qstats->error_bytes_received_hi,
4425 qstats->total_bytes_received_lo,
4426 qstats->error_bytes_received_lo);
4427
4428 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4429 total_unicast_packets_received);
4430 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4431 total_multicast_packets_received);
4432 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4433 total_broadcast_packets_received);
4434 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4435 etherstatsoverrsizepkts);
4436 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4437
4438 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4439 total_unicast_packets_received);
4440 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4441 total_multicast_packets_received);
4442 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4443 total_broadcast_packets_received);
4444 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4445 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4446 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4447
4448 qstats->total_bytes_transmitted_hi =
ca00392c 4449 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4450 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4451 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4452
4453 ADD_64(qstats->total_bytes_transmitted_hi,
4454 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4455 qstats->total_bytes_transmitted_lo,
4456 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4457
4458 ADD_64(qstats->total_bytes_transmitted_hi,
4459 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4460 qstats->total_bytes_transmitted_lo,
4461 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4462
de832a55
EG
4463 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4464 total_unicast_packets_transmitted);
4465 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4466 total_multicast_packets_transmitted);
4467 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4468 total_broadcast_packets_transmitted);
4469
4470 old_tclient->checksum_discard = tclient->checksum_discard;
4471 old_tclient->ttl0_discard = tclient->ttl0_discard;
4472
4473 ADD_64(fstats->total_bytes_received_hi,
4474 qstats->total_bytes_received_hi,
4475 fstats->total_bytes_received_lo,
4476 qstats->total_bytes_received_lo);
4477 ADD_64(fstats->total_bytes_transmitted_hi,
4478 qstats->total_bytes_transmitted_hi,
4479 fstats->total_bytes_transmitted_lo,
4480 qstats->total_bytes_transmitted_lo);
4481 ADD_64(fstats->total_unicast_packets_received_hi,
4482 qstats->total_unicast_packets_received_hi,
4483 fstats->total_unicast_packets_received_lo,
4484 qstats->total_unicast_packets_received_lo);
4485 ADD_64(fstats->total_multicast_packets_received_hi,
4486 qstats->total_multicast_packets_received_hi,
4487 fstats->total_multicast_packets_received_lo,
4488 qstats->total_multicast_packets_received_lo);
4489 ADD_64(fstats->total_broadcast_packets_received_hi,
4490 qstats->total_broadcast_packets_received_hi,
4491 fstats->total_broadcast_packets_received_lo,
4492 qstats->total_broadcast_packets_received_lo);
4493 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4494 qstats->total_unicast_packets_transmitted_hi,
4495 fstats->total_unicast_packets_transmitted_lo,
4496 qstats->total_unicast_packets_transmitted_lo);
4497 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4498 qstats->total_multicast_packets_transmitted_hi,
4499 fstats->total_multicast_packets_transmitted_lo,
4500 qstats->total_multicast_packets_transmitted_lo);
4501 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4502 qstats->total_broadcast_packets_transmitted_hi,
4503 fstats->total_broadcast_packets_transmitted_lo,
4504 qstats->total_broadcast_packets_transmitted_lo);
4505 ADD_64(fstats->valid_bytes_received_hi,
4506 qstats->valid_bytes_received_hi,
4507 fstats->valid_bytes_received_lo,
4508 qstats->valid_bytes_received_lo);
4509
4510 ADD_64(estats->error_bytes_received_hi,
4511 qstats->error_bytes_received_hi,
4512 estats->error_bytes_received_lo,
4513 qstats->error_bytes_received_lo);
4514 ADD_64(estats->etherstatsoverrsizepkts_hi,
4515 qstats->etherstatsoverrsizepkts_hi,
4516 estats->etherstatsoverrsizepkts_lo,
4517 qstats->etherstatsoverrsizepkts_lo);
4518 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4519 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4520 }
4521
4522 ADD_64(fstats->total_bytes_received_hi,
4523 estats->rx_stat_ifhcinbadoctets_hi,
4524 fstats->total_bytes_received_lo,
4525 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4526
4527 memcpy(estats, &(fstats->total_bytes_received_hi),
4528 sizeof(struct host_func_stats) - 2*sizeof(u32));
4529
de832a55
EG
4530 ADD_64(estats->etherstatsoverrsizepkts_hi,
4531 estats->rx_stat_dot3statsframestoolong_hi,
4532 estats->etherstatsoverrsizepkts_lo,
4533 estats->rx_stat_dot3statsframestoolong_lo);
4534 ADD_64(estats->error_bytes_received_hi,
4535 estats->rx_stat_ifhcinbadoctets_hi,
4536 estats->error_bytes_received_lo,
4537 estats->rx_stat_ifhcinbadoctets_lo);
4538
4539 if (bp->port.pmf) {
4540 estats->mac_filter_discard =
4541 le32_to_cpu(tport->mac_filter_discard);
4542 estats->xxoverflow_discard =
4543 le32_to_cpu(tport->xxoverflow_discard);
4544 estats->brb_truncate_discard =
bb2a0f7a 4545 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4546 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4547 }
bb2a0f7a
YG
4548
4549 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4550
de832a55
EG
4551 bp->stats_pending = 0;
4552
a2fbb9ea
ET
4553 return 0;
4554}
4555
bb2a0f7a 4556static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4557{
bb2a0f7a 4558 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4559 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4560 int i;
a2fbb9ea
ET
4561
4562 nstats->rx_packets =
4563 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4564 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4565 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4566
4567 nstats->tx_packets =
4568 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4569 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4570 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4571
de832a55 4572 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4573
0e39e645 4574 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4575
de832a55 4576 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4577 for_each_queue(bp, i)
de832a55
EG
4578 nstats->rx_dropped +=
4579 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4580
a2fbb9ea
ET
4581 nstats->tx_dropped = 0;
4582
4583 nstats->multicast =
de832a55 4584 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4585
bb2a0f7a 4586 nstats->collisions =
de832a55 4587 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4588
4589 nstats->rx_length_errors =
de832a55
EG
4590 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4591 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4592 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4593 bnx2x_hilo(&estats->brb_truncate_hi);
4594 nstats->rx_crc_errors =
4595 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4596 nstats->rx_frame_errors =
4597 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4598 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4599 nstats->rx_missed_errors = estats->xxoverflow_discard;
4600
4601 nstats->rx_errors = nstats->rx_length_errors +
4602 nstats->rx_over_errors +
4603 nstats->rx_crc_errors +
4604 nstats->rx_frame_errors +
0e39e645
ET
4605 nstats->rx_fifo_errors +
4606 nstats->rx_missed_errors;
a2fbb9ea 4607
bb2a0f7a 4608 nstats->tx_aborted_errors =
de832a55
EG
4609 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4610 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4611 nstats->tx_carrier_errors =
4612 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4613 nstats->tx_fifo_errors = 0;
4614 nstats->tx_heartbeat_errors = 0;
4615 nstats->tx_window_errors = 0;
4616
4617 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4618 nstats->tx_carrier_errors +
4619 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4620}
4621
4622static void bnx2x_drv_stats_update(struct bnx2x *bp)
4623{
4624 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4625 int i;
4626
4627 estats->driver_xoff = 0;
4628 estats->rx_err_discard_pkt = 0;
4629 estats->rx_skb_alloc_failed = 0;
4630 estats->hw_csum_err = 0;
54b9ddaa 4631 for_each_queue(bp, i) {
de832a55
EG
4632 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4633
4634 estats->driver_xoff += qstats->driver_xoff;
4635 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4636 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4637 estats->hw_csum_err += qstats->hw_csum_err;
4638 }
a2fbb9ea
ET
4639}
4640
bb2a0f7a 4641static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4642{
bb2a0f7a 4643 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4644
bb2a0f7a
YG
4645 if (*stats_comp != DMAE_COMP_VAL)
4646 return;
4647
4648 if (bp->port.pmf)
de832a55 4649 bnx2x_hw_stats_update(bp);
a2fbb9ea 4650
de832a55
EG
4651 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4652 BNX2X_ERR("storm stats were not updated for 3 times\n");
4653 bnx2x_panic();
4654 return;
a2fbb9ea
ET
4655 }
4656
de832a55
EG
4657 bnx2x_net_stats_update(bp);
4658 bnx2x_drv_stats_update(bp);
4659
7995c64e 4660 if (netif_msg_timer(bp)) {
ca00392c 4661 struct bnx2x_fastpath *fp0_rx = bp->fp;
54b9ddaa 4662 struct bnx2x_fastpath *fp0_tx = bp->fp;
de832a55
EG
4663 struct tstorm_per_client_stats *old_tclient =
4664 &bp->fp->old_tclient;
4665 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4666 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4667 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4668 int i;
a2fbb9ea 4669
7995c64e 4670 netdev_printk(KERN_DEBUG, bp->dev, "\n");
a2fbb9ea
ET
4671 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4672 " tx pkt (%lx)\n",
ca00392c
EG
4673 bnx2x_tx_avail(fp0_tx),
4674 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4675 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4676 " rx pkt (%lx)\n",
ca00392c
EG
4677 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4678 fp0_rx->rx_comp_cons),
4679 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4680 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4681 "brb truncate %u\n",
4682 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4683 qstats->driver_xoff,
4684 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4685 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4686 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4687 "mac_discard %u mac_filter_discard %u "
4688 "xxovrflow_discard %u brb_truncate_discard %u "
4689 "ttl0_discard %u\n",
4781bfad 4690 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4691 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4692 bnx2x_hilo(&qstats->no_buff_discard_hi),
4693 estats->mac_discard, estats->mac_filter_discard,
4694 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4695 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4696
4697 for_each_queue(bp, i) {
4698 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4699 bnx2x_fp(bp, i, tx_pkt),
4700 bnx2x_fp(bp, i, rx_pkt),
4701 bnx2x_fp(bp, i, rx_calls));
4702 }
4703 }
4704
bb2a0f7a
YG
4705 bnx2x_hw_stats_post(bp);
4706 bnx2x_storm_stats_post(bp);
4707}
a2fbb9ea 4708
bb2a0f7a
YG
4709static void bnx2x_port_stats_stop(struct bnx2x *bp)
4710{
4711 struct dmae_command *dmae;
4712 u32 opcode;
4713 int loader_idx = PMF_DMAE_C(bp);
4714 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4715
bb2a0f7a 4716 bp->executer_idx = 0;
a2fbb9ea 4717
bb2a0f7a
YG
4718 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4719 DMAE_CMD_C_ENABLE |
4720 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4721#ifdef __BIG_ENDIAN
bb2a0f7a 4722 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4723#else
bb2a0f7a 4724 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4725#endif
bb2a0f7a
YG
4726 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4727 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4728
4729 if (bp->port.port_stx) {
4730
4731 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4732 if (bp->func_stx)
4733 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4734 else
4735 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4736 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4737 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4738 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4739 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4740 dmae->len = sizeof(struct host_port_stats) >> 2;
4741 if (bp->func_stx) {
4742 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4743 dmae->comp_addr_hi = 0;
4744 dmae->comp_val = 1;
4745 } else {
4746 dmae->comp_addr_lo =
4747 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4748 dmae->comp_addr_hi =
4749 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4750 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4751
bb2a0f7a
YG
4752 *stats_comp = 0;
4753 }
a2fbb9ea
ET
4754 }
4755
bb2a0f7a
YG
4756 if (bp->func_stx) {
4757
4758 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4759 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4760 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4761 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4762 dmae->dst_addr_lo = bp->func_stx >> 2;
4763 dmae->dst_addr_hi = 0;
4764 dmae->len = sizeof(struct host_func_stats) >> 2;
4765 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4766 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4767 dmae->comp_val = DMAE_COMP_VAL;
4768
4769 *stats_comp = 0;
a2fbb9ea 4770 }
bb2a0f7a
YG
4771}
4772
4773static void bnx2x_stats_stop(struct bnx2x *bp)
4774{
4775 int update = 0;
4776
4777 bnx2x_stats_comp(bp);
4778
4779 if (bp->port.pmf)
4780 update = (bnx2x_hw_stats_update(bp) == 0);
4781
4782 update |= (bnx2x_storm_stats_update(bp) == 0);
4783
4784 if (update) {
4785 bnx2x_net_stats_update(bp);
a2fbb9ea 4786
bb2a0f7a
YG
4787 if (bp->port.pmf)
4788 bnx2x_port_stats_stop(bp);
4789
4790 bnx2x_hw_stats_post(bp);
4791 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4792 }
4793}
4794
bb2a0f7a
YG
4795static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4796{
4797}
4798
4799static const struct {
4800 void (*action)(struct bnx2x *bp);
4801 enum bnx2x_stats_state next_state;
4802} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4803/* state event */
4804{
4805/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4806/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4807/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4808/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4809},
4810{
4811/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4812/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4813/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4814/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4815}
4816};
4817
4818static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4819{
4820 enum bnx2x_stats_state state = bp->stats_state;
4821
4822 bnx2x_stats_stm[state][event].action(bp);
4823 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4824
8924665a
EG
4825 /* Make sure the state has been "changed" */
4826 smp_wmb();
4827
7995c64e 4828 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
4829 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4830 state, event, bp->stats_state);
4831}
4832
6fe49bb9
EG
4833static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4834{
4835 struct dmae_command *dmae;
4836 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4837
4838 /* sanity */
4839 if (!bp->port.pmf || !bp->port.port_stx) {
4840 BNX2X_ERR("BUG!\n");
4841 return;
4842 }
4843
4844 bp->executer_idx = 0;
4845
4846 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4847 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4848 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4849 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4850#ifdef __BIG_ENDIAN
4851 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4852#else
4853 DMAE_CMD_ENDIANITY_DW_SWAP |
4854#endif
4855 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4856 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4857 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4858 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4859 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4860 dmae->dst_addr_hi = 0;
4861 dmae->len = sizeof(struct host_port_stats) >> 2;
4862 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4863 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4864 dmae->comp_val = DMAE_COMP_VAL;
4865
4866 *stats_comp = 0;
4867 bnx2x_hw_stats_post(bp);
4868 bnx2x_stats_comp(bp);
4869}
4870
4871static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4872{
4873 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4874 int port = BP_PORT(bp);
4875 int func;
4876 u32 func_stx;
4877
4878 /* sanity */
4879 if (!bp->port.pmf || !bp->func_stx) {
4880 BNX2X_ERR("BUG!\n");
4881 return;
4882 }
4883
4884 /* save our func_stx */
4885 func_stx = bp->func_stx;
4886
4887 for (vn = VN_0; vn < vn_max; vn++) {
4888 func = 2*vn + port;
4889
4890 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4891 bnx2x_func_stats_init(bp);
4892 bnx2x_hw_stats_post(bp);
4893 bnx2x_stats_comp(bp);
4894 }
4895
4896 /* restore our func_stx */
4897 bp->func_stx = func_stx;
4898}
4899
4900static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4901{
4902 struct dmae_command *dmae = &bp->stats_dmae;
4903 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4904
4905 /* sanity */
4906 if (!bp->func_stx) {
4907 BNX2X_ERR("BUG!\n");
4908 return;
4909 }
4910
4911 bp->executer_idx = 0;
4912 memset(dmae, 0, sizeof(struct dmae_command));
4913
4914 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4915 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4916 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4917#ifdef __BIG_ENDIAN
4918 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4919#else
4920 DMAE_CMD_ENDIANITY_DW_SWAP |
4921#endif
4922 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4923 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4924 dmae->src_addr_lo = bp->func_stx >> 2;
4925 dmae->src_addr_hi = 0;
4926 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4927 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4928 dmae->len = sizeof(struct host_func_stats) >> 2;
4929 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4930 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4931 dmae->comp_val = DMAE_COMP_VAL;
4932
4933 *stats_comp = 0;
4934 bnx2x_hw_stats_post(bp);
4935 bnx2x_stats_comp(bp);
4936}
4937
4938static void bnx2x_stats_init(struct bnx2x *bp)
4939{
4940 int port = BP_PORT(bp);
4941 int func = BP_FUNC(bp);
4942 int i;
4943
4944 bp->stats_pending = 0;
4945 bp->executer_idx = 0;
4946 bp->stats_counter = 0;
4947
4948 /* port and func stats for management */
4949 if (!BP_NOMCP(bp)) {
4950 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4951 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4952
4953 } else {
4954 bp->port.port_stx = 0;
4955 bp->func_stx = 0;
4956 }
4957 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4958 bp->port.port_stx, bp->func_stx);
4959
4960 /* port stats */
4961 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4962 bp->port.old_nig_stats.brb_discard =
4963 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4964 bp->port.old_nig_stats.brb_truncate =
4965 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4966 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4967 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4968 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4969 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4970
4971 /* function stats */
4972 for_each_queue(bp, i) {
4973 struct bnx2x_fastpath *fp = &bp->fp[i];
4974
4975 memset(&fp->old_tclient, 0,
4976 sizeof(struct tstorm_per_client_stats));
4977 memset(&fp->old_uclient, 0,
4978 sizeof(struct ustorm_per_client_stats));
4979 memset(&fp->old_xclient, 0,
4980 sizeof(struct xstorm_per_client_stats));
4981 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4982 }
4983
4984 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4985 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4986
4987 bp->stats_state = STATS_STATE_DISABLED;
4988
4989 if (bp->port.pmf) {
4990 if (bp->port.port_stx)
4991 bnx2x_port_stats_base_init(bp);
4992
4993 if (bp->func_stx)
4994 bnx2x_func_stats_base_init(bp);
4995
4996 } else if (bp->func_stx)
4997 bnx2x_func_stats_base_update(bp);
4998}
4999
a2fbb9ea
ET
5000static void bnx2x_timer(unsigned long data)
5001{
5002 struct bnx2x *bp = (struct bnx2x *) data;
5003
5004 if (!netif_running(bp->dev))
5005 return;
5006
5007 if (atomic_read(&bp->intr_sem) != 0)
f1410647 5008 goto timer_restart;
a2fbb9ea
ET
5009
5010 if (poll) {
5011 struct bnx2x_fastpath *fp = &bp->fp[0];
5012 int rc;
5013
7961f791 5014 bnx2x_tx_int(fp);
a2fbb9ea
ET
5015 rc = bnx2x_rx_int(fp, 1000);
5016 }
5017
34f80b04
EG
5018 if (!BP_NOMCP(bp)) {
5019 int func = BP_FUNC(bp);
a2fbb9ea
ET
5020 u32 drv_pulse;
5021 u32 mcp_pulse;
5022
5023 ++bp->fw_drv_pulse_wr_seq;
5024 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5025 /* TBD - add SYSTEM_TIME */
5026 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 5027 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 5028
34f80b04 5029 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
5030 MCP_PULSE_SEQ_MASK);
5031 /* The delta between driver pulse and mcp response
5032 * should be 1 (before mcp response) or 0 (after mcp response)
5033 */
5034 if ((drv_pulse != mcp_pulse) &&
5035 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5036 /* someone lost a heartbeat... */
5037 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5038 drv_pulse, mcp_pulse);
5039 }
5040 }
5041
f34d28ea 5042 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 5043 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 5044
f1410647 5045timer_restart:
a2fbb9ea
ET
5046 mod_timer(&bp->timer, jiffies + bp->current_interval);
5047}
5048
5049/* end of Statistics */
5050
5051/* nic init */
5052
5053/*
5054 * nic init service functions
5055 */
5056
34f80b04 5057static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 5058{
34f80b04
EG
5059 int port = BP_PORT(bp);
5060
ca00392c
EG
5061 /* "CSTORM" */
5062 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5063 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5064 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5065 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5066 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5067 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
5068}
5069
5c862848
EG
5070static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5071 dma_addr_t mapping, int sb_id)
34f80b04
EG
5072{
5073 int port = BP_PORT(bp);
bb2a0f7a 5074 int func = BP_FUNC(bp);
a2fbb9ea 5075 int index;
34f80b04 5076 u64 section;
a2fbb9ea
ET
5077
5078 /* USTORM */
5079 section = ((u64)mapping) + offsetof(struct host_status_block,
5080 u_status_block);
34f80b04 5081 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 5082
ca00392c
EG
5083 REG_WR(bp, BAR_CSTRORM_INTMEM +
5084 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5085 REG_WR(bp, BAR_CSTRORM_INTMEM +
5086 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5087 U64_HI(section));
ca00392c
EG
5088 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5089 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5090
5091 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
5092 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5093 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
5094
5095 /* CSTORM */
5096 section = ((u64)mapping) + offsetof(struct host_status_block,
5097 c_status_block);
34f80b04 5098 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5099
5100 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5101 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 5102 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5103 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5104 U64_HI(section));
7a9b2557 5105 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 5106 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5107
5108 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5109 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5110 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
5111
5112 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5113}
5114
5115static void bnx2x_zero_def_sb(struct bnx2x *bp)
5116{
5117 int func = BP_FUNC(bp);
a2fbb9ea 5118
ca00392c 5119 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
5120 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5121 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
5122 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5123 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5124 sizeof(struct cstorm_def_status_block_u)/4);
5125 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5126 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5127 sizeof(struct cstorm_def_status_block_c)/4);
5128 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
5129 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5130 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
5131}
5132
5133static void bnx2x_init_def_sb(struct bnx2x *bp,
5134 struct host_def_status_block *def_sb,
34f80b04 5135 dma_addr_t mapping, int sb_id)
a2fbb9ea 5136{
34f80b04
EG
5137 int port = BP_PORT(bp);
5138 int func = BP_FUNC(bp);
a2fbb9ea
ET
5139 int index, val, reg_offset;
5140 u64 section;
5141
5142 /* ATTN */
5143 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5144 atten_status_block);
34f80b04 5145 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 5146
49d66772
ET
5147 bp->attn_state = 0;
5148
a2fbb9ea
ET
5149 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5150 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5151
34f80b04 5152 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
5153 bp->attn_group[index].sig[0] = REG_RD(bp,
5154 reg_offset + 0x10*index);
5155 bp->attn_group[index].sig[1] = REG_RD(bp,
5156 reg_offset + 0x4 + 0x10*index);
5157 bp->attn_group[index].sig[2] = REG_RD(bp,
5158 reg_offset + 0x8 + 0x10*index);
5159 bp->attn_group[index].sig[3] = REG_RD(bp,
5160 reg_offset + 0xc + 0x10*index);
5161 }
5162
a2fbb9ea
ET
5163 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5164 HC_REG_ATTN_MSG0_ADDR_L);
5165
5166 REG_WR(bp, reg_offset, U64_LO(section));
5167 REG_WR(bp, reg_offset + 4, U64_HI(section));
5168
5169 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5170
5171 val = REG_RD(bp, reg_offset);
34f80b04 5172 val |= sb_id;
a2fbb9ea
ET
5173 REG_WR(bp, reg_offset, val);
5174
5175 /* USTORM */
5176 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5177 u_def_status_block);
34f80b04 5178 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 5179
ca00392c
EG
5180 REG_WR(bp, BAR_CSTRORM_INTMEM +
5181 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5182 REG_WR(bp, BAR_CSTRORM_INTMEM +
5183 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 5184 U64_HI(section));
ca00392c
EG
5185 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5186 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
5187
5188 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
5189 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5190 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
5191
5192 /* CSTORM */
5193 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5194 c_def_status_block);
34f80b04 5195 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5196
5197 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5198 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 5199 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5200 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 5201 U64_HI(section));
5c862848 5202 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 5203 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
5204
5205 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5206 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5207 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
5208
5209 /* TSTORM */
5210 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5211 t_def_status_block);
34f80b04 5212 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5213
5214 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5215 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5216 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5217 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5218 U64_HI(section));
5c862848 5219 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 5220 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5221
5222 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5223 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 5224 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
5225
5226 /* XSTORM */
5227 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5228 x_def_status_block);
34f80b04 5229 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5230
5231 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5232 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5233 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5234 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5235 U64_HI(section));
5c862848 5236 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 5237 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5238
5239 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5240 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 5241 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 5242
bb2a0f7a 5243 bp->stats_pending = 0;
66e855f3 5244 bp->set_mac_pending = 0;
bb2a0f7a 5245
34f80b04 5246 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5247}
5248
5249static void bnx2x_update_coalesce(struct bnx2x *bp)
5250{
34f80b04 5251 int port = BP_PORT(bp);
a2fbb9ea
ET
5252 int i;
5253
5254 for_each_queue(bp, i) {
34f80b04 5255 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
5256
5257 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
5258 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5259 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5260 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5261 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
5262 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5263 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5264 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5265 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5266
5267 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5268 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5269 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5270 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5271 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 5272 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5273 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5274 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5275 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5276 }
5277}
5278
7a9b2557
VZ
5279static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5280 struct bnx2x_fastpath *fp, int last)
5281{
5282 int i;
5283
5284 for (i = 0; i < last; i++) {
5285 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5286 struct sk_buff *skb = rx_buf->skb;
5287
5288 if (skb == NULL) {
5289 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5290 continue;
5291 }
5292
5293 if (fp->tpa_state[i] == BNX2X_TPA_START)
1a983142
FT
5294 dma_unmap_single(&bp->pdev->dev,
5295 dma_unmap_addr(rx_buf, mapping),
5296 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557
VZ
5297
5298 dev_kfree_skb(skb);
5299 rx_buf->skb = NULL;
5300 }
5301}
5302
a2fbb9ea
ET
5303static void bnx2x_init_rx_rings(struct bnx2x *bp)
5304{
7a9b2557 5305 int func = BP_FUNC(bp);
32626230
EG
5306 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5307 ETH_MAX_AGGREGATION_QUEUES_E1H;
5308 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 5309 int i, j;
a2fbb9ea 5310
87942b46 5311 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
5312 DP(NETIF_MSG_IFUP,
5313 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 5314
7a9b2557 5315 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 5316
54b9ddaa 5317 for_each_queue(bp, j) {
32626230 5318 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 5319
32626230 5320 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
5321 fp->tpa_pool[i].skb =
5322 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5323 if (!fp->tpa_pool[i].skb) {
5324 BNX2X_ERR("Failed to allocate TPA "
5325 "skb pool for queue[%d] - "
5326 "disabling TPA on this "
5327 "queue!\n", j);
5328 bnx2x_free_tpa_pool(bp, fp, i);
5329 fp->disable_tpa = 1;
5330 break;
5331 }
1a983142 5332 dma_unmap_addr_set((struct sw_rx_bd *)
7a9b2557
VZ
5333 &bp->fp->tpa_pool[i],
5334 mapping, 0);
5335 fp->tpa_state[i] = BNX2X_TPA_STOP;
5336 }
5337 }
5338 }
5339
54b9ddaa 5340 for_each_queue(bp, j) {
a2fbb9ea
ET
5341 struct bnx2x_fastpath *fp = &bp->fp[j];
5342
5343 fp->rx_bd_cons = 0;
5344 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5345 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5346
5347 /* "next page" elements initialization */
5348 /* SGE ring */
5349 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5350 struct eth_rx_sge *sge;
5351
5352 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5353 sge->addr_hi =
5354 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5355 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5356 sge->addr_lo =
5357 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5358 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5359 }
5360
5361 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5362
7a9b2557 5363 /* RX BD ring */
a2fbb9ea
ET
5364 for (i = 1; i <= NUM_RX_RINGS; i++) {
5365 struct eth_rx_bd *rx_bd;
5366
5367 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5368 rx_bd->addr_hi =
5369 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5370 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5371 rx_bd->addr_lo =
5372 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5373 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5374 }
5375
34f80b04 5376 /* CQ ring */
a2fbb9ea
ET
5377 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5378 struct eth_rx_cqe_next_page *nextpg;
5379
5380 nextpg = (struct eth_rx_cqe_next_page *)
5381 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5382 nextpg->addr_hi =
5383 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5384 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5385 nextpg->addr_lo =
5386 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5387 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5388 }
5389
7a9b2557
VZ
5390 /* Allocate SGEs and initialize the ring elements */
5391 for (i = 0, ring_prod = 0;
5392 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5393
7a9b2557
VZ
5394 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5395 BNX2X_ERR("was only able to allocate "
5396 "%d rx sges\n", i);
5397 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5398 /* Cleanup already allocated elements */
5399 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5400 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5401 fp->disable_tpa = 1;
5402 ring_prod = 0;
5403 break;
5404 }
5405 ring_prod = NEXT_SGE_IDX(ring_prod);
5406 }
5407 fp->rx_sge_prod = ring_prod;
5408
5409 /* Allocate BDs and initialize BD ring */
66e855f3 5410 fp->rx_comp_cons = 0;
7a9b2557 5411 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5412 for (i = 0; i < bp->rx_ring_size; i++) {
5413 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5414 BNX2X_ERR("was only able to allocate "
de832a55
EG
5415 "%d rx skbs on queue[%d]\n", i, j);
5416 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5417 break;
5418 }
5419 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5420 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5421 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5422 }
5423
7a9b2557
VZ
5424 fp->rx_bd_prod = ring_prod;
5425 /* must not have more available CQEs than BDs */
5426 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5427 cqe_ring_prod);
a2fbb9ea
ET
5428 fp->rx_pkt = fp->rx_calls = 0;
5429
7a9b2557
VZ
5430 /* Warning!
5431 * this will generate an interrupt (to the TSTORM)
5432 * must only be done after chip is initialized
5433 */
5434 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5435 fp->rx_sge_prod);
a2fbb9ea
ET
5436 if (j != 0)
5437 continue;
5438
5439 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5440 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5441 U64_LO(fp->rx_comp_mapping));
5442 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5443 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5444 U64_HI(fp->rx_comp_mapping));
5445 }
5446}
5447
5448static void bnx2x_init_tx_ring(struct bnx2x *bp)
5449{
5450 int i, j;
5451
54b9ddaa 5452 for_each_queue(bp, j) {
a2fbb9ea
ET
5453 struct bnx2x_fastpath *fp = &bp->fp[j];
5454
5455 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5456 struct eth_tx_next_bd *tx_next_bd =
5457 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5458
ca00392c 5459 tx_next_bd->addr_hi =
a2fbb9ea 5460 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5461 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5462 tx_next_bd->addr_lo =
a2fbb9ea 5463 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5464 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5465 }
5466
ca00392c
EG
5467 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5468 fp->tx_db.data.zero_fill1 = 0;
5469 fp->tx_db.data.prod = 0;
5470
a2fbb9ea
ET
5471 fp->tx_pkt_prod = 0;
5472 fp->tx_pkt_cons = 0;
5473 fp->tx_bd_prod = 0;
5474 fp->tx_bd_cons = 0;
5475 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5476 fp->tx_pkt = 0;
5477 }
5478}
5479
5480static void bnx2x_init_sp_ring(struct bnx2x *bp)
5481{
34f80b04 5482 int func = BP_FUNC(bp);
a2fbb9ea
ET
5483
5484 spin_lock_init(&bp->spq_lock);
5485
5486 bp->spq_left = MAX_SPQ_PENDING;
5487 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5488 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5489 bp->spq_prod_bd = bp->spq;
5490 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5491
34f80b04 5492 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5493 U64_LO(bp->spq_mapping));
34f80b04
EG
5494 REG_WR(bp,
5495 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5496 U64_HI(bp->spq_mapping));
5497
34f80b04 5498 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5499 bp->spq_prod_idx);
5500}
5501
5502static void bnx2x_init_context(struct bnx2x *bp)
5503{
5504 int i;
5505
54b9ddaa
VZ
5506 /* Rx */
5507 for_each_queue(bp, i) {
a2fbb9ea
ET
5508 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5509 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5510 u8 cl_id = fp->cl_id;
a2fbb9ea 5511
34f80b04
EG
5512 context->ustorm_st_context.common.sb_index_numbers =
5513 BNX2X_RX_SB_INDEX_NUM;
0626b899 5514 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5515 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5516 context->ustorm_st_context.common.flags =
de832a55
EG
5517 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5518 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5519 context->ustorm_st_context.common.statistics_counter_id =
5520 cl_id;
8d9c5f34 5521 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5522 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5523 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5524 bp->rx_buf_size;
34f80b04 5525 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5526 U64_HI(fp->rx_desc_mapping);
34f80b04 5527 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5528 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5529 if (!fp->disable_tpa) {
5530 context->ustorm_st_context.common.flags |=
ca00392c 5531 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5532 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5533 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5534 (u32)0xffff);
7a9b2557
VZ
5535 context->ustorm_st_context.common.sge_page_base_hi =
5536 U64_HI(fp->rx_sge_mapping);
5537 context->ustorm_st_context.common.sge_page_base_lo =
5538 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5539
5540 context->ustorm_st_context.common.max_sges_for_packet =
5541 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5542 context->ustorm_st_context.common.max_sges_for_packet =
5543 ((context->ustorm_st_context.common.
5544 max_sges_for_packet + PAGES_PER_SGE - 1) &
5545 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5546 }
5547
8d9c5f34
EG
5548 context->ustorm_ag_context.cdu_usage =
5549 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5550 CDU_REGION_NUMBER_UCM_AG,
5551 ETH_CONNECTION_TYPE);
5552
ca00392c
EG
5553 context->xstorm_ag_context.cdu_reserved =
5554 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5555 CDU_REGION_NUMBER_XCM_AG,
5556 ETH_CONNECTION_TYPE);
5557 }
5558
54b9ddaa
VZ
5559 /* Tx */
5560 for_each_queue(bp, i) {
ca00392c
EG
5561 struct bnx2x_fastpath *fp = &bp->fp[i];
5562 struct eth_context *context =
54b9ddaa 5563 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5564
5565 context->cstorm_st_context.sb_index_number =
5566 C_SB_ETH_TX_CQ_INDEX;
5567 context->cstorm_st_context.status_block_id = fp->sb_id;
5568
8d9c5f34
EG
5569 context->xstorm_st_context.tx_bd_page_base_hi =
5570 U64_HI(fp->tx_desc_mapping);
5571 context->xstorm_st_context.tx_bd_page_base_lo =
5572 U64_LO(fp->tx_desc_mapping);
ca00392c 5573 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5574 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5575 }
5576}
5577
5578static void bnx2x_init_ind_table(struct bnx2x *bp)
5579{
26c8fa4d 5580 int func = BP_FUNC(bp);
a2fbb9ea
ET
5581 int i;
5582
555f6c78 5583 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5584 return;
5585
555f6c78
EG
5586 DP(NETIF_MSG_IFUP,
5587 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5588 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5589 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5590 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5591 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5592}
5593
49d66772
ET
5594static void bnx2x_set_client_config(struct bnx2x *bp)
5595{
49d66772 5596 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5597 int port = BP_PORT(bp);
5598 int i;
49d66772 5599
e7799c5f 5600 tstorm_client.mtu = bp->dev->mtu;
49d66772 5601 tstorm_client.config_flags =
de832a55
EG
5602 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5603 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5604#ifdef BCM_VLAN
0c6671b0 5605 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5606 tstorm_client.config_flags |=
8d9c5f34 5607 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5608 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5609 }
5610#endif
49d66772
ET
5611
5612 for_each_queue(bp, i) {
de832a55
EG
5613 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5614
49d66772 5615 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5616 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5617 ((u32 *)&tstorm_client)[0]);
5618 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5619 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5620 ((u32 *)&tstorm_client)[1]);
5621 }
5622
34f80b04
EG
5623 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5624 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5625}
5626
a2fbb9ea
ET
5627static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5628{
a2fbb9ea 5629 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5630 int mode = bp->rx_mode;
37b091ba 5631 int mask = bp->rx_mode_cl_mask;
34f80b04 5632 int func = BP_FUNC(bp);
581ce43d 5633 int port = BP_PORT(bp);
a2fbb9ea 5634 int i;
581ce43d
EG
5635 /* All but management unicast packets should pass to the host as well */
5636 u32 llh_mask =
5637 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5638 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5639 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5640 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5641
3196a88a 5642 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5643
5644 switch (mode) {
5645 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5646 tstorm_mac_filter.ucast_drop_all = mask;
5647 tstorm_mac_filter.mcast_drop_all = mask;
5648 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5649 break;
356e2385 5650
a2fbb9ea 5651 case BNX2X_RX_MODE_NORMAL:
34f80b04 5652 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5653 break;
356e2385 5654
a2fbb9ea 5655 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5656 tstorm_mac_filter.mcast_accept_all = mask;
5657 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5658 break;
356e2385 5659
a2fbb9ea 5660 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5661 tstorm_mac_filter.ucast_accept_all = mask;
5662 tstorm_mac_filter.mcast_accept_all = mask;
5663 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5664 /* pass management unicast packets as well */
5665 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5666 break;
356e2385 5667
a2fbb9ea 5668 default:
34f80b04
EG
5669 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5670 break;
a2fbb9ea
ET
5671 }
5672
581ce43d
EG
5673 REG_WR(bp,
5674 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5675 llh_mask);
5676
a2fbb9ea
ET
5677 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5678 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5679 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5680 ((u32 *)&tstorm_mac_filter)[i]);
5681
34f80b04 5682/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5683 ((u32 *)&tstorm_mac_filter)[i]); */
5684 }
a2fbb9ea 5685
49d66772
ET
5686 if (mode != BNX2X_RX_MODE_NONE)
5687 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5688}
5689
471de716
EG
5690static void bnx2x_init_internal_common(struct bnx2x *bp)
5691{
5692 int i;
5693
5694 /* Zero this manually as its initialization is
5695 currently missing in the initTool */
5696 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5697 REG_WR(bp, BAR_USTRORM_INTMEM +
5698 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5699}
5700
5701static void bnx2x_init_internal_port(struct bnx2x *bp)
5702{
5703 int port = BP_PORT(bp);
5704
ca00392c
EG
5705 REG_WR(bp,
5706 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5707 REG_WR(bp,
5708 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5709 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5710 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5711}
5712
5713static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5714{
a2fbb9ea
ET
5715 struct tstorm_eth_function_common_config tstorm_config = {0};
5716 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5717 int port = BP_PORT(bp);
5718 int func = BP_FUNC(bp);
de832a55
EG
5719 int i, j;
5720 u32 offset;
471de716 5721 u16 max_agg_size;
a2fbb9ea
ET
5722
5723 if (is_multi(bp)) {
555f6c78 5724 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5725 tstorm_config.rss_result_mask = MULTI_MASK;
5726 }
ca00392c
EG
5727
5728 /* Enable TPA if needed */
5729 if (bp->flags & TPA_ENABLE_FLAG)
5730 tstorm_config.config_flags |=
5731 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5732
8d9c5f34
EG
5733 if (IS_E1HMF(bp))
5734 tstorm_config.config_flags |=
5735 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5736
34f80b04
EG
5737 tstorm_config.leading_client_id = BP_L_ID(bp);
5738
a2fbb9ea 5739 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5740 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5741 (*(u32 *)&tstorm_config));
5742
c14423fe 5743 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5744 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5745 bnx2x_set_storm_rx_mode(bp);
5746
de832a55
EG
5747 for_each_queue(bp, i) {
5748 u8 cl_id = bp->fp[i].cl_id;
5749
5750 /* reset xstorm per client statistics */
5751 offset = BAR_XSTRORM_INTMEM +
5752 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5753 for (j = 0;
5754 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5755 REG_WR(bp, offset + j*4, 0);
5756
5757 /* reset tstorm per client statistics */
5758 offset = BAR_TSTRORM_INTMEM +
5759 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5760 for (j = 0;
5761 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5762 REG_WR(bp, offset + j*4, 0);
5763
5764 /* reset ustorm per client statistics */
5765 offset = BAR_USTRORM_INTMEM +
5766 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5767 for (j = 0;
5768 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5769 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5770 }
5771
5772 /* Init statistics related context */
34f80b04 5773 stats_flags.collect_eth = 1;
a2fbb9ea 5774
66e855f3 5775 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5776 ((u32 *)&stats_flags)[0]);
66e855f3 5777 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5778 ((u32 *)&stats_flags)[1]);
5779
66e855f3 5780 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5781 ((u32 *)&stats_flags)[0]);
66e855f3 5782 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5783 ((u32 *)&stats_flags)[1]);
5784
de832a55
EG
5785 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5786 ((u32 *)&stats_flags)[0]);
5787 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5788 ((u32 *)&stats_flags)[1]);
5789
66e855f3 5790 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5791 ((u32 *)&stats_flags)[0]);
66e855f3 5792 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5793 ((u32 *)&stats_flags)[1]);
5794
66e855f3
YG
5795 REG_WR(bp, BAR_XSTRORM_INTMEM +
5796 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5797 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5798 REG_WR(bp, BAR_XSTRORM_INTMEM +
5799 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5800 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5801
5802 REG_WR(bp, BAR_TSTRORM_INTMEM +
5803 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5804 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5805 REG_WR(bp, BAR_TSTRORM_INTMEM +
5806 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5807 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5808
de832a55
EG
5809 REG_WR(bp, BAR_USTRORM_INTMEM +
5810 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5811 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5812 REG_WR(bp, BAR_USTRORM_INTMEM +
5813 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5814 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5815
34f80b04
EG
5816 if (CHIP_IS_E1H(bp)) {
5817 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5818 IS_E1HMF(bp));
5819 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5820 IS_E1HMF(bp));
5821 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5822 IS_E1HMF(bp));
5823 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5824 IS_E1HMF(bp));
5825
7a9b2557
VZ
5826 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5827 bp->e1hov);
34f80b04
EG
5828 }
5829
4f40f2cb
EG
5830 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5831 max_agg_size =
5832 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5833 SGE_PAGE_SIZE * PAGES_PER_SGE),
5834 (u32)0xffff);
54b9ddaa 5835 for_each_queue(bp, i) {
7a9b2557 5836 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5837
5838 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5839 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5840 U64_LO(fp->rx_comp_mapping));
5841 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5842 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5843 U64_HI(fp->rx_comp_mapping));
5844
ca00392c
EG
5845 /* Next page */
5846 REG_WR(bp, BAR_USTRORM_INTMEM +
5847 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5848 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5849 REG_WR(bp, BAR_USTRORM_INTMEM +
5850 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5851 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5852
7a9b2557 5853 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5854 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5855 max_agg_size);
5856 }
8a1c38d1 5857
1c06328c
EG
5858 /* dropless flow control */
5859 if (CHIP_IS_E1H(bp)) {
5860 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5861
5862 rx_pause.bd_thr_low = 250;
5863 rx_pause.cqe_thr_low = 250;
5864 rx_pause.cos = 1;
5865 rx_pause.sge_thr_low = 0;
5866 rx_pause.bd_thr_high = 350;
5867 rx_pause.cqe_thr_high = 350;
5868 rx_pause.sge_thr_high = 0;
5869
54b9ddaa 5870 for_each_queue(bp, i) {
1c06328c
EG
5871 struct bnx2x_fastpath *fp = &bp->fp[i];
5872
5873 if (!fp->disable_tpa) {
5874 rx_pause.sge_thr_low = 150;
5875 rx_pause.sge_thr_high = 250;
5876 }
5877
5878
5879 offset = BAR_USTRORM_INTMEM +
5880 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5881 fp->cl_id);
5882 for (j = 0;
5883 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5884 j++)
5885 REG_WR(bp, offset + j*4,
5886 ((u32 *)&rx_pause)[j]);
5887 }
5888 }
5889
8a1c38d1
EG
5890 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5891
5892 /* Init rate shaping and fairness contexts */
5893 if (IS_E1HMF(bp)) {
5894 int vn;
5895
5896 /* During init there is no active link
5897 Until link is up, set link rate to 10Gbps */
5898 bp->link_vars.line_speed = SPEED_10000;
5899 bnx2x_init_port_minmax(bp);
5900
b015e3d1
EG
5901 if (!BP_NOMCP(bp))
5902 bp->mf_config =
5903 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5904 bnx2x_calc_vn_weight_sum(bp);
5905
5906 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5907 bnx2x_init_vn_minmax(bp, 2*vn + port);
5908
5909 /* Enable rate shaping and fairness */
b015e3d1 5910 bp->cmng.flags.cmng_enables |=
8a1c38d1 5911 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5912
8a1c38d1
EG
5913 } else {
5914 /* rate shaping and fairness are disabled */
5915 DP(NETIF_MSG_IFUP,
5916 "single function mode minmax will be disabled\n");
5917 }
5918
5919
5920 /* Store it to internal memory */
5921 if (bp->port.pmf)
5922 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5923 REG_WR(bp, BAR_XSTRORM_INTMEM +
5924 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5925 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5926}
5927
471de716
EG
5928static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5929{
5930 switch (load_code) {
5931 case FW_MSG_CODE_DRV_LOAD_COMMON:
5932 bnx2x_init_internal_common(bp);
5933 /* no break */
5934
5935 case FW_MSG_CODE_DRV_LOAD_PORT:
5936 bnx2x_init_internal_port(bp);
5937 /* no break */
5938
5939 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5940 bnx2x_init_internal_func(bp);
5941 break;
5942
5943 default:
5944 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5945 break;
5946 }
5947}
5948
5949static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5950{
5951 int i;
5952
5953 for_each_queue(bp, i) {
5954 struct bnx2x_fastpath *fp = &bp->fp[i];
5955
34f80b04 5956 fp->bp = bp;
a2fbb9ea 5957 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5958 fp->index = i;
34f80b04 5959 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5960#ifdef BCM_CNIC
5961 fp->sb_id = fp->cl_id + 1;
5962#else
34f80b04 5963 fp->sb_id = fp->cl_id;
37b091ba 5964#endif
34f80b04 5965 DP(NETIF_MSG_IFUP,
f5372251
EG
5966 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5967 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5968 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5969 fp->sb_id);
5c862848 5970 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5971 }
5972
16119785
EG
5973 /* ensure status block indices were read */
5974 rmb();
5975
5976
5c862848
EG
5977 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5978 DEF_SB_ID);
5979 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5980 bnx2x_update_coalesce(bp);
5981 bnx2x_init_rx_rings(bp);
5982 bnx2x_init_tx_ring(bp);
5983 bnx2x_init_sp_ring(bp);
5984 bnx2x_init_context(bp);
471de716 5985 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5986 bnx2x_init_ind_table(bp);
0ef00459
EG
5987 bnx2x_stats_init(bp);
5988
5989 /* At this point, we are ready for interrupts */
5990 atomic_set(&bp->intr_sem, 0);
5991
5992 /* flush all before enabling interrupts */
5993 mb();
5994 mmiowb();
5995
615f8fd9 5996 bnx2x_int_enable(bp);
eb8da205
EG
5997
5998 /* Check for SPIO5 */
5999 bnx2x_attn_int_deasserted0(bp,
6000 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6001 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
6002}
6003
6004/* end of nic init */
6005
6006/*
6007 * gzip service functions
6008 */
6009
6010static int bnx2x_gunzip_init(struct bnx2x *bp)
6011{
1a983142
FT
6012 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6013 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
6014 if (bp->gunzip_buf == NULL)
6015 goto gunzip_nomem1;
6016
6017 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6018 if (bp->strm == NULL)
6019 goto gunzip_nomem2;
6020
6021 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6022 GFP_KERNEL);
6023 if (bp->strm->workspace == NULL)
6024 goto gunzip_nomem3;
6025
6026 return 0;
6027
6028gunzip_nomem3:
6029 kfree(bp->strm);
6030 bp->strm = NULL;
6031
6032gunzip_nomem2:
1a983142
FT
6033 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6034 bp->gunzip_mapping);
a2fbb9ea
ET
6035 bp->gunzip_buf = NULL;
6036
6037gunzip_nomem1:
7995c64e 6038 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
a2fbb9ea
ET
6039 return -ENOMEM;
6040}
6041
6042static void bnx2x_gunzip_end(struct bnx2x *bp)
6043{
6044 kfree(bp->strm->workspace);
6045
6046 kfree(bp->strm);
6047 bp->strm = NULL;
6048
6049 if (bp->gunzip_buf) {
1a983142
FT
6050 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6051 bp->gunzip_mapping);
a2fbb9ea
ET
6052 bp->gunzip_buf = NULL;
6053 }
6054}
6055
94a78b79 6056static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
6057{
6058 int n, rc;
6059
6060 /* check gzip header */
94a78b79
VZ
6061 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6062 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 6063 return -EINVAL;
94a78b79 6064 }
a2fbb9ea
ET
6065
6066 n = 10;
6067
34f80b04 6068#define FNAME 0x8
a2fbb9ea
ET
6069
6070 if (zbuf[3] & FNAME)
6071 while ((zbuf[n++] != 0) && (n < len));
6072
94a78b79 6073 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
6074 bp->strm->avail_in = len - n;
6075 bp->strm->next_out = bp->gunzip_buf;
6076 bp->strm->avail_out = FW_BUF_SIZE;
6077
6078 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6079 if (rc != Z_OK)
6080 return rc;
6081
6082 rc = zlib_inflate(bp->strm, Z_FINISH);
6083 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
6084 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6085 bp->strm->msg);
a2fbb9ea
ET
6086
6087 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6088 if (bp->gunzip_outlen & 0x3)
7995c64e
JP
6089 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6090 bp->gunzip_outlen);
a2fbb9ea
ET
6091 bp->gunzip_outlen >>= 2;
6092
6093 zlib_inflateEnd(bp->strm);
6094
6095 if (rc == Z_STREAM_END)
6096 return 0;
6097
6098 return rc;
6099}
6100
6101/* nic load/unload */
6102
6103/*
34f80b04 6104 * General service functions
a2fbb9ea
ET
6105 */
6106
6107/* send a NIG loopback debug packet */
6108static void bnx2x_lb_pckt(struct bnx2x *bp)
6109{
a2fbb9ea 6110 u32 wb_write[3];
a2fbb9ea
ET
6111
6112 /* Ethernet source and destination addresses */
a2fbb9ea
ET
6113 wb_write[0] = 0x55555555;
6114 wb_write[1] = 0x55555555;
34f80b04 6115 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 6116 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6117
6118 /* NON-IP protocol */
a2fbb9ea
ET
6119 wb_write[0] = 0x09000000;
6120 wb_write[1] = 0x55555555;
34f80b04 6121 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 6122 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6123}
6124
6125/* some of the internal memories
6126 * are not directly readable from the driver
6127 * to test them we send debug packets
6128 */
6129static int bnx2x_int_mem_test(struct bnx2x *bp)
6130{
6131 int factor;
6132 int count, i;
6133 u32 val = 0;
6134
ad8d3948 6135 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 6136 factor = 120;
ad8d3948
EG
6137 else if (CHIP_REV_IS_EMUL(bp))
6138 factor = 200;
6139 else
a2fbb9ea 6140 factor = 1;
a2fbb9ea
ET
6141
6142 DP(NETIF_MSG_HW, "start part1\n");
6143
6144 /* Disable inputs of parser neighbor blocks */
6145 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6146 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6147 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6148 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6149
6150 /* Write 0 to parser credits for CFC search request */
6151 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6152
6153 /* send Ethernet packet */
6154 bnx2x_lb_pckt(bp);
6155
6156 /* TODO do i reset NIG statistic? */
6157 /* Wait until NIG register shows 1 packet of size 0x10 */
6158 count = 1000 * factor;
6159 while (count) {
34f80b04 6160
a2fbb9ea
ET
6161 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6162 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6163 if (val == 0x10)
6164 break;
6165
6166 msleep(10);
6167 count--;
6168 }
6169 if (val != 0x10) {
6170 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6171 return -1;
6172 }
6173
6174 /* Wait until PRS register shows 1 packet */
6175 count = 1000 * factor;
6176 while (count) {
6177 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
6178 if (val == 1)
6179 break;
6180
6181 msleep(10);
6182 count--;
6183 }
6184 if (val != 0x1) {
6185 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6186 return -2;
6187 }
6188
6189 /* Reset and init BRB, PRS */
34f80b04 6190 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 6191 msleep(50);
34f80b04 6192 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 6193 msleep(50);
94a78b79
VZ
6194 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6195 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
6196
6197 DP(NETIF_MSG_HW, "part2\n");
6198
6199 /* Disable inputs of parser neighbor blocks */
6200 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6201 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6202 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6203 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6204
6205 /* Write 0 to parser credits for CFC search request */
6206 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6207
6208 /* send 10 Ethernet packets */
6209 for (i = 0; i < 10; i++)
6210 bnx2x_lb_pckt(bp);
6211
6212 /* Wait until NIG register shows 10 + 1
6213 packets of size 11*0x10 = 0xb0 */
6214 count = 1000 * factor;
6215 while (count) {
34f80b04 6216
a2fbb9ea
ET
6217 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6218 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6219 if (val == 0xb0)
6220 break;
6221
6222 msleep(10);
6223 count--;
6224 }
6225 if (val != 0xb0) {
6226 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6227 return -3;
6228 }
6229
6230 /* Wait until PRS register shows 2 packets */
6231 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6232 if (val != 2)
6233 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6234
6235 /* Write 1 to parser credits for CFC search request */
6236 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6237
6238 /* Wait until PRS register shows 3 packets */
6239 msleep(10 * factor);
6240 /* Wait until NIG register shows 1 packet of size 0x10 */
6241 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6242 if (val != 3)
6243 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6244
6245 /* clear NIG EOP FIFO */
6246 for (i = 0; i < 11; i++)
6247 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6248 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6249 if (val != 1) {
6250 BNX2X_ERR("clear of NIG failed\n");
6251 return -4;
6252 }
6253
6254 /* Reset and init BRB, PRS, NIG */
6255 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6256 msleep(50);
6257 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6258 msleep(50);
94a78b79
VZ
6259 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6260 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 6261#ifndef BCM_CNIC
a2fbb9ea
ET
6262 /* set NIC mode */
6263 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6264#endif
6265
6266 /* Enable inputs of parser neighbor blocks */
6267 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6268 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6269 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 6270 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
6271
6272 DP(NETIF_MSG_HW, "done\n");
6273
6274 return 0; /* OK */
6275}
6276
6277static void enable_blocks_attention(struct bnx2x *bp)
6278{
6279 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6280 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6281 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6282 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6283 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6284 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6285 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6286 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6287 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
6288/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6289/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6290 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6291 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6292 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
6293/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6294/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6295 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6296 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6297 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6298 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
6299/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6300/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6301 if (CHIP_REV_IS_FPGA(bp))
6302 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6303 else
6304 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
6305 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6306 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6307 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
6308/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6309/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6310 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6311 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
6312/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6313 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
6314}
6315
72fd0718
VZ
6316static const struct {
6317 u32 addr;
6318 u32 mask;
6319} bnx2x_parity_mask[] = {
6320 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6321 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6322 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6323 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6324 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6325 {QM_REG_QM_PRTY_MASK, 0x0},
6326 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6327 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6328 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6329 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6330 {CDU_REG_CDU_PRTY_MASK, 0x0},
6331 {CFC_REG_CFC_PRTY_MASK, 0x0},
6332 {DBG_REG_DBG_PRTY_MASK, 0x0},
6333 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6334 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6335 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6336 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6337 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6338 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6339 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6340 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6341 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6342 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6343 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6344 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6345 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6346 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6347 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6348};
6349
6350static void enable_blocks_parity(struct bnx2x *bp)
6351{
6352 int i, mask_arr_len =
6353 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6354
6355 for (i = 0; i < mask_arr_len; i++)
6356 REG_WR(bp, bnx2x_parity_mask[i].addr,
6357 bnx2x_parity_mask[i].mask);
6358}
6359
34f80b04 6360
81f75bbf
EG
6361static void bnx2x_reset_common(struct bnx2x *bp)
6362{
6363 /* reset_common */
6364 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6365 0xd3ffff7f);
6366 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6367}
6368
573f2035
EG
6369static void bnx2x_init_pxp(struct bnx2x *bp)
6370{
6371 u16 devctl;
6372 int r_order, w_order;
6373
6374 pci_read_config_word(bp->pdev,
6375 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6376 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6377 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6378 if (bp->mrrs == -1)
6379 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6380 else {
6381 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6382 r_order = bp->mrrs;
6383 }
6384
6385 bnx2x_init_pxp_arb(bp, r_order, w_order);
6386}
fd4ef40d
EG
6387
6388static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6389{
2145a920 6390 int is_required;
fd4ef40d 6391 u32 val;
2145a920 6392 int port;
fd4ef40d 6393
2145a920
VZ
6394 if (BP_NOMCP(bp))
6395 return;
6396
6397 is_required = 0;
fd4ef40d
EG
6398 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6399 SHARED_HW_CFG_FAN_FAILURE_MASK;
6400
6401 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6402 is_required = 1;
6403
6404 /*
6405 * The fan failure mechanism is usually related to the PHY type since
6406 * the power consumption of the board is affected by the PHY. Currently,
6407 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6408 */
6409 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6410 for (port = PORT_0; port < PORT_MAX; port++) {
6411 u32 phy_type =
6412 SHMEM_RD(bp, dev_info.port_hw_config[port].
6413 external_phy_config) &
6414 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6415 is_required |=
6416 ((phy_type ==
6417 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6418 (phy_type ==
6419 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6420 (phy_type ==
6421 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6422 }
6423
6424 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6425
6426 if (is_required == 0)
6427 return;
6428
6429 /* Fan failure is indicated by SPIO 5 */
6430 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6431 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6432
6433 /* set to active low mode */
6434 val = REG_RD(bp, MISC_REG_SPIO_INT);
6435 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6436 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6437 REG_WR(bp, MISC_REG_SPIO_INT, val);
6438
6439 /* enable interrupt to signal the IGU */
6440 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6441 val |= (1 << MISC_REGISTERS_SPIO_5);
6442 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6443}
6444
34f80b04 6445static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6446{
a2fbb9ea 6447 u32 val, i;
37b091ba
MC
6448#ifdef BCM_CNIC
6449 u32 wb_write[2];
6450#endif
a2fbb9ea 6451
34f80b04 6452 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6453
81f75bbf 6454 bnx2x_reset_common(bp);
34f80b04
EG
6455 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6456 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6457
94a78b79 6458 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6459 if (CHIP_IS_E1H(bp))
6460 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6461
34f80b04
EG
6462 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6463 msleep(30);
6464 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6465
94a78b79 6466 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6467 if (CHIP_IS_E1(bp)) {
6468 /* enable HW interrupt from PXP on USDM overflow
6469 bit 16 on INT_MASK_0 */
6470 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6471 }
a2fbb9ea 6472
94a78b79 6473 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6474 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6475
6476#ifdef __BIG_ENDIAN
34f80b04
EG
6477 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6478 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6479 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6480 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6481 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6482 /* make sure this value is 0 */
6483 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6484
6485/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6486 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6487 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6488 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6489 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6490#endif
6491
34f80b04 6492 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6493#ifdef BCM_CNIC
34f80b04
EG
6494 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6495 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6496 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6497#endif
6498
34f80b04
EG
6499 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6500 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6501
34f80b04
EG
6502 /* let the HW do it's magic ... */
6503 msleep(100);
6504 /* finish PXP init */
6505 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6506 if (val != 1) {
6507 BNX2X_ERR("PXP2 CFG failed\n");
6508 return -EBUSY;
6509 }
6510 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6511 if (val != 1) {
6512 BNX2X_ERR("PXP2 RD_INIT failed\n");
6513 return -EBUSY;
6514 }
a2fbb9ea 6515
34f80b04
EG
6516 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6517 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6518
94a78b79 6519 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6520
34f80b04
EG
6521 /* clean the DMAE memory */
6522 bp->dmae_ready = 1;
6523 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6524
94a78b79
VZ
6525 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6526 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6527 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6528 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6529
34f80b04
EG
6530 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6531 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6532 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6533 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6534
94a78b79 6535 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6536
6537#ifdef BCM_CNIC
6538 wb_write[0] = 0;
6539 wb_write[1] = 0;
6540 for (i = 0; i < 64; i++) {
6541 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6542 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6543
6544 if (CHIP_IS_E1H(bp)) {
6545 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6546 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6547 wb_write, 2);
6548 }
6549 }
6550#endif
34f80b04
EG
6551 /* soft reset pulse */
6552 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6553 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6554
37b091ba 6555#ifdef BCM_CNIC
94a78b79 6556 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6557#endif
a2fbb9ea 6558
94a78b79 6559 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6560 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6561 if (!CHIP_REV_IS_SLOW(bp)) {
6562 /* enable hw interrupt from doorbell Q */
6563 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6564 }
a2fbb9ea 6565
94a78b79
VZ
6566 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6567 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6568 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6569#ifndef BCM_CNIC
3196a88a
EG
6570 /* set NIC mode */
6571 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6572#endif
34f80b04
EG
6573 if (CHIP_IS_E1H(bp))
6574 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6575
94a78b79
VZ
6576 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6577 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6578 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6579 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6580
ca00392c
EG
6581 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6582 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6583 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6584 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6585
94a78b79
VZ
6586 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6587 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6588 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6589 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6590
34f80b04
EG
6591 /* sync semi rtc */
6592 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6593 0x80000000);
6594 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6595 0x80000000);
a2fbb9ea 6596
94a78b79
VZ
6597 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6598 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6599 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6600
34f80b04
EG
6601 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6602 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6603 REG_WR(bp, i, 0xc0cac01a);
6604 /* TODO: replace with something meaningful */
6605 }
94a78b79 6606 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6607#ifdef BCM_CNIC
6608 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6609 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6610 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6611 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6612 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6613 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6614 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6615 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6616 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6617 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6618#endif
34f80b04 6619 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6620
34f80b04
EG
6621 if (sizeof(union cdu_context) != 1024)
6622 /* we currently assume that a context is 1024 bytes */
7995c64e
JP
6623 pr_alert("please adjust the size of cdu_context(%ld)\n",
6624 (long)sizeof(union cdu_context));
a2fbb9ea 6625
94a78b79 6626 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6627 val = (4 << 24) + (0 << 12) + 1024;
6628 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6629
94a78b79 6630 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6631 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6632 /* enable context validation interrupt from CFC */
6633 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6634
6635 /* set the thresholds to prevent CFC/CDU race */
6636 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6637
94a78b79
VZ
6638 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6639 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6640
94a78b79 6641 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6642 /* Reset PCIE errors for debug */
6643 REG_WR(bp, 0x2814, 0xffffffff);
6644 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6645
94a78b79 6646 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6647 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6648 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6649 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6650
94a78b79 6651 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6652 if (CHIP_IS_E1H(bp)) {
6653 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6654 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6655 }
6656
6657 if (CHIP_REV_IS_SLOW(bp))
6658 msleep(200);
6659
6660 /* finish CFC init */
6661 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6662 if (val != 1) {
6663 BNX2X_ERR("CFC LL_INIT failed\n");
6664 return -EBUSY;
6665 }
6666 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6667 if (val != 1) {
6668 BNX2X_ERR("CFC AC_INIT failed\n");
6669 return -EBUSY;
6670 }
6671 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6672 if (val != 1) {
6673 BNX2X_ERR("CFC CAM_INIT failed\n");
6674 return -EBUSY;
6675 }
6676 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6677
34f80b04
EG
6678 /* read NIG statistic
6679 to see if this is our first up since powerup */
6680 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6681 val = *bnx2x_sp(bp, wb_data[0]);
6682
6683 /* do internal memory self test */
6684 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6685 BNX2X_ERR("internal mem self test failed\n");
6686 return -EBUSY;
6687 }
6688
35b19ba5 6689 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6690 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6691 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6692 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6694 bp->port.need_hw_lock = 1;
6695 break;
6696
34f80b04
EG
6697 default:
6698 break;
6699 }
f1410647 6700
fd4ef40d
EG
6701 bnx2x_setup_fan_failure_detection(bp);
6702
34f80b04
EG
6703 /* clear PXP2 attentions */
6704 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6705
34f80b04 6706 enable_blocks_attention(bp);
72fd0718
VZ
6707 if (CHIP_PARITY_SUPPORTED(bp))
6708 enable_blocks_parity(bp);
a2fbb9ea 6709
6bbca910
YR
6710 if (!BP_NOMCP(bp)) {
6711 bnx2x_acquire_phy_lock(bp);
6712 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6713 bnx2x_release_phy_lock(bp);
6714 } else
6715 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6716
34f80b04
EG
6717 return 0;
6718}
a2fbb9ea 6719
34f80b04
EG
6720static int bnx2x_init_port(struct bnx2x *bp)
6721{
6722 int port = BP_PORT(bp);
94a78b79 6723 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6724 u32 low, high;
34f80b04 6725 u32 val;
a2fbb9ea 6726
34f80b04
EG
6727 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6728
6729 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6730
94a78b79 6731 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6732 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6733
6734 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6735 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6736 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6737 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6738
37b091ba
MC
6739#ifdef BCM_CNIC
6740 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6741
94a78b79 6742 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6743 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6744 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6745#endif
94a78b79 6746 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6747
94a78b79 6748 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6749 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6750 /* no pause for emulation and FPGA */
6751 low = 0;
6752 high = 513;
6753 } else {
6754 if (IS_E1HMF(bp))
6755 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6756 else if (bp->dev->mtu > 4096) {
6757 if (bp->flags & ONE_PORT_FLAG)
6758 low = 160;
6759 else {
6760 val = bp->dev->mtu;
6761 /* (24*1024 + val*4)/256 */
6762 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6763 }
6764 } else
6765 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6766 high = low + 56; /* 14*1024/256 */
6767 }
6768 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6769 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6770
6771
94a78b79 6772 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6773
94a78b79 6774 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6775 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6776 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6777 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6778
94a78b79
VZ
6779 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6780 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6781 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6782 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6783
94a78b79 6784 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6785 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6786
94a78b79 6787 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6788
6789 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6790 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6791
6792 /* update threshold */
34f80b04 6793 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6794 /* update init credit */
34f80b04 6795 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6796
6797 /* probe changes */
34f80b04 6798 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6799 msleep(5);
34f80b04 6800 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6801
37b091ba
MC
6802#ifdef BCM_CNIC
6803 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6804#endif
94a78b79 6805 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6806 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6807
6808 if (CHIP_IS_E1(bp)) {
6809 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6810 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6811 }
94a78b79 6812 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6813
94a78b79 6814 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6815 /* init aeu_mask_attn_func_0/1:
6816 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6817 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6818 * bits 4-7 are used for "per vn group attention" */
6819 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6820 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6821
94a78b79 6822 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6823 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6824 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6825 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6826 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6827
94a78b79 6828 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6829
6830 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6831
6832 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6833 /* 0x2 disable e1hov, 0x1 enable */
6834 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6835 (IS_E1HMF(bp) ? 0x1 : 0x2));
6836
1c06328c
EG
6837 {
6838 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6839 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6840 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6841 }
34f80b04
EG
6842 }
6843
94a78b79 6844 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6845 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6846
35b19ba5 6847 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6848 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6849 {
6850 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6851
6852 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6853 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6854
6855 /* The GPIO should be swapped if the swap register is
6856 set and active */
6857 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6858 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6859
6860 /* Select function upon port-swap configuration */
6861 if (port == 0) {
6862 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6863 aeu_gpio_mask = (swap_val && swap_override) ?
6864 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6865 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6866 } else {
6867 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6868 aeu_gpio_mask = (swap_val && swap_override) ?
6869 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6870 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6871 }
6872 val = REG_RD(bp, offset);
6873 /* add GPIO3 to group */
6874 val |= aeu_gpio_mask;
6875 REG_WR(bp, offset, val);
6876 }
6877 break;
6878
35b19ba5 6879 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6880 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6881 /* add SPIO 5 to group 0 */
4d295db0
EG
6882 {
6883 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6884 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6885 val = REG_RD(bp, reg_addr);
f1410647 6886 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6887 REG_WR(bp, reg_addr, val);
6888 }
f1410647
ET
6889 break;
6890
6891 default:
6892 break;
6893 }
6894
c18487ee 6895 bnx2x__link_reset(bp);
a2fbb9ea 6896
34f80b04
EG
6897 return 0;
6898}
6899
6900#define ILT_PER_FUNC (768/2)
6901#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6902/* the phys address is shifted right 12 bits and has an added
6903 1=valid bit added to the 53rd bit
6904 then since this is a wide register(TM)
6905 we split it into two 32 bit writes
6906 */
6907#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6908#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6909#define PXP_ONE_ILT(x) (((x) << 10) | x)
6910#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6911
37b091ba
MC
6912#ifdef BCM_CNIC
6913#define CNIC_ILT_LINES 127
6914#define CNIC_CTX_PER_ILT 16
6915#else
34f80b04 6916#define CNIC_ILT_LINES 0
37b091ba 6917#endif
34f80b04
EG
6918
6919static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6920{
6921 int reg;
6922
6923 if (CHIP_IS_E1H(bp))
6924 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6925 else /* E1 */
6926 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6927
6928 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6929}
6930
6931static int bnx2x_init_func(struct bnx2x *bp)
6932{
6933 int port = BP_PORT(bp);
6934 int func = BP_FUNC(bp);
8badd27a 6935 u32 addr, val;
34f80b04
EG
6936 int i;
6937
6938 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6939
8badd27a
EG
6940 /* set MSI reconfigure capability */
6941 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6942 val = REG_RD(bp, addr);
6943 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6944 REG_WR(bp, addr, val);
6945
34f80b04
EG
6946 i = FUNC_ILT_BASE(func);
6947
6948 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6949 if (CHIP_IS_E1H(bp)) {
6950 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6951 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6952 } else /* E1 */
6953 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6954 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6955
37b091ba
MC
6956#ifdef BCM_CNIC
6957 i += 1 + CNIC_ILT_LINES;
6958 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6959 if (CHIP_IS_E1(bp))
6960 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6961 else {
6962 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6963 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6964 }
6965
6966 i++;
6967 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6968 if (CHIP_IS_E1(bp))
6969 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6970 else {
6971 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6972 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6973 }
6974
6975 i++;
6976 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6977 if (CHIP_IS_E1(bp))
6978 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6979 else {
6980 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6981 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6982 }
6983
6984 /* tell the searcher where the T2 table is */
6985 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6986
6987 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6988 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6989
6990 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6991 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6992 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6993
6994 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6995#endif
34f80b04
EG
6996
6997 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6998 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6999 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7000 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7001 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7002 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7003 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7004 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7005 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7006 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
7007
7008 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7009 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7010 }
7011
7012 /* HC init per function */
7013 if (CHIP_IS_E1H(bp)) {
7014 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7015
7016 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7017 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7018 }
94a78b79 7019 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 7020
c14423fe 7021 /* Reset PCIE errors for debug */
a2fbb9ea
ET
7022 REG_WR(bp, 0x2114, 0xffffffff);
7023 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 7024
34f80b04
EG
7025 return 0;
7026}
7027
7028static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7029{
7030 int i, rc = 0;
a2fbb9ea 7031
34f80b04
EG
7032 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7033 BP_FUNC(bp), load_code);
a2fbb9ea 7034
34f80b04
EG
7035 bp->dmae_ready = 0;
7036 mutex_init(&bp->dmae_mutex);
54016b26
EG
7037 rc = bnx2x_gunzip_init(bp);
7038 if (rc)
7039 return rc;
a2fbb9ea 7040
34f80b04
EG
7041 switch (load_code) {
7042 case FW_MSG_CODE_DRV_LOAD_COMMON:
7043 rc = bnx2x_init_common(bp);
7044 if (rc)
7045 goto init_hw_err;
7046 /* no break */
7047
7048 case FW_MSG_CODE_DRV_LOAD_PORT:
7049 bp->dmae_ready = 1;
7050 rc = bnx2x_init_port(bp);
7051 if (rc)
7052 goto init_hw_err;
7053 /* no break */
7054
7055 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7056 bp->dmae_ready = 1;
7057 rc = bnx2x_init_func(bp);
7058 if (rc)
7059 goto init_hw_err;
7060 break;
7061
7062 default:
7063 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7064 break;
7065 }
7066
7067 if (!BP_NOMCP(bp)) {
7068 int func = BP_FUNC(bp);
a2fbb9ea
ET
7069
7070 bp->fw_drv_pulse_wr_seq =
34f80b04 7071 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 7072 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
7073 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7074 }
a2fbb9ea 7075
34f80b04
EG
7076 /* this needs to be done before gunzip end */
7077 bnx2x_zero_def_sb(bp);
7078 for_each_queue(bp, i)
7079 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
7080#ifdef BCM_CNIC
7081 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7082#endif
34f80b04
EG
7083
7084init_hw_err:
7085 bnx2x_gunzip_end(bp);
7086
7087 return rc;
a2fbb9ea
ET
7088}
7089
a2fbb9ea
ET
7090static void bnx2x_free_mem(struct bnx2x *bp)
7091{
7092
7093#define BNX2X_PCI_FREE(x, y, size) \
7094 do { \
7095 if (x) { \
1a983142 7096 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
7097 x = NULL; \
7098 y = 0; \
7099 } \
7100 } while (0)
7101
7102#define BNX2X_FREE(x) \
7103 do { \
7104 if (x) { \
7105 vfree(x); \
7106 x = NULL; \
7107 } \
7108 } while (0)
7109
7110 int i;
7111
7112 /* fastpath */
555f6c78 7113 /* Common */
a2fbb9ea
ET
7114 for_each_queue(bp, i) {
7115
555f6c78 7116 /* status blocks */
a2fbb9ea
ET
7117 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7118 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7119 sizeof(struct host_status_block));
555f6c78
EG
7120 }
7121 /* Rx */
54b9ddaa 7122 for_each_queue(bp, i) {
a2fbb9ea 7123
555f6c78 7124 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7125 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7126 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7127 bnx2x_fp(bp, i, rx_desc_mapping),
7128 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7129
7130 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7131 bnx2x_fp(bp, i, rx_comp_mapping),
7132 sizeof(struct eth_fast_path_rx_cqe) *
7133 NUM_RCQ_BD);
a2fbb9ea 7134
7a9b2557 7135 /* SGE ring */
32626230 7136 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
7137 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7138 bnx2x_fp(bp, i, rx_sge_mapping),
7139 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7140 }
555f6c78 7141 /* Tx */
54b9ddaa 7142 for_each_queue(bp, i) {
555f6c78
EG
7143
7144 /* fastpath tx rings: tx_buf tx_desc */
7145 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7146 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7147 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7148 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7149 }
a2fbb9ea
ET
7150 /* end of fastpath */
7151
7152 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 7153 sizeof(struct host_def_status_block));
a2fbb9ea
ET
7154
7155 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 7156 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7157
37b091ba 7158#ifdef BCM_CNIC
a2fbb9ea
ET
7159 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7160 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7161 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7162 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
7163 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7164 sizeof(struct host_status_block));
a2fbb9ea 7165#endif
7a9b2557 7166 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
7167
7168#undef BNX2X_PCI_FREE
7169#undef BNX2X_KFREE
7170}
7171
7172static int bnx2x_alloc_mem(struct bnx2x *bp)
7173{
7174
7175#define BNX2X_PCI_ALLOC(x, y, size) \
7176 do { \
1a983142 7177 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
a2fbb9ea
ET
7178 if (x == NULL) \
7179 goto alloc_mem_err; \
7180 memset(x, 0, size); \
7181 } while (0)
7182
7183#define BNX2X_ALLOC(x, size) \
7184 do { \
7185 x = vmalloc(size); \
7186 if (x == NULL) \
7187 goto alloc_mem_err; \
7188 memset(x, 0, size); \
7189 } while (0)
7190
7191 int i;
7192
7193 /* fastpath */
555f6c78 7194 /* Common */
a2fbb9ea
ET
7195 for_each_queue(bp, i) {
7196 bnx2x_fp(bp, i, bp) = bp;
7197
555f6c78 7198 /* status blocks */
a2fbb9ea
ET
7199 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7200 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7201 sizeof(struct host_status_block));
555f6c78
EG
7202 }
7203 /* Rx */
54b9ddaa 7204 for_each_queue(bp, i) {
a2fbb9ea 7205
555f6c78 7206 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7207 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7208 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7209 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7210 &bnx2x_fp(bp, i, rx_desc_mapping),
7211 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7212
7213 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7214 &bnx2x_fp(bp, i, rx_comp_mapping),
7215 sizeof(struct eth_fast_path_rx_cqe) *
7216 NUM_RCQ_BD);
7217
7a9b2557
VZ
7218 /* SGE ring */
7219 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7220 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7221 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7222 &bnx2x_fp(bp, i, rx_sge_mapping),
7223 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 7224 }
555f6c78 7225 /* Tx */
54b9ddaa 7226 for_each_queue(bp, i) {
555f6c78 7227
555f6c78
EG
7228 /* fastpath tx rings: tx_buf tx_desc */
7229 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7230 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7231 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7232 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7233 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7234 }
a2fbb9ea
ET
7235 /* end of fastpath */
7236
7237 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7238 sizeof(struct host_def_status_block));
7239
7240 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7241 sizeof(struct bnx2x_slowpath));
7242
37b091ba 7243#ifdef BCM_CNIC
a2fbb9ea
ET
7244 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7245
a2fbb9ea
ET
7246 /* allocate searcher T2 table
7247 we allocate 1/4 of alloc num for T2
7248 (which is not entered into the ILT) */
7249 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7250
37b091ba 7251 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 7252 for (i = 0; i < 16*1024; i += 64)
37b091ba 7253 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 7254
37b091ba 7255 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
7256 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7257
7258 /* QM queues (128*MAX_CONN) */
7259 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
7260
7261 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7262 sizeof(struct host_status_block));
a2fbb9ea
ET
7263#endif
7264
7265 /* Slow path ring */
7266 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7267
7268 return 0;
7269
7270alloc_mem_err:
7271 bnx2x_free_mem(bp);
7272 return -ENOMEM;
7273
7274#undef BNX2X_PCI_ALLOC
7275#undef BNX2X_ALLOC
7276}
7277
7278static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7279{
7280 int i;
7281
54b9ddaa 7282 for_each_queue(bp, i) {
a2fbb9ea
ET
7283 struct bnx2x_fastpath *fp = &bp->fp[i];
7284
7285 u16 bd_cons = fp->tx_bd_cons;
7286 u16 sw_prod = fp->tx_pkt_prod;
7287 u16 sw_cons = fp->tx_pkt_cons;
7288
a2fbb9ea
ET
7289 while (sw_cons != sw_prod) {
7290 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7291 sw_cons++;
7292 }
7293 }
7294}
7295
7296static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7297{
7298 int i, j;
7299
54b9ddaa 7300 for_each_queue(bp, j) {
a2fbb9ea
ET
7301 struct bnx2x_fastpath *fp = &bp->fp[j];
7302
a2fbb9ea
ET
7303 for (i = 0; i < NUM_RX_BD; i++) {
7304 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7305 struct sk_buff *skb = rx_buf->skb;
7306
7307 if (skb == NULL)
7308 continue;
7309
1a983142
FT
7310 dma_unmap_single(&bp->pdev->dev,
7311 dma_unmap_addr(rx_buf, mapping),
7312 bp->rx_buf_size, DMA_FROM_DEVICE);
a2fbb9ea
ET
7313
7314 rx_buf->skb = NULL;
7315 dev_kfree_skb(skb);
7316 }
7a9b2557 7317 if (!fp->disable_tpa)
32626230
EG
7318 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7319 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 7320 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
7321 }
7322}
7323
7324static void bnx2x_free_skbs(struct bnx2x *bp)
7325{
7326 bnx2x_free_tx_skbs(bp);
7327 bnx2x_free_rx_skbs(bp);
7328}
7329
7330static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7331{
34f80b04 7332 int i, offset = 1;
a2fbb9ea
ET
7333
7334 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 7335 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
7336 bp->msix_table[0].vector);
7337
37b091ba
MC
7338#ifdef BCM_CNIC
7339 offset++;
7340#endif
a2fbb9ea 7341 for_each_queue(bp, i) {
c14423fe 7342 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 7343 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
7344 bnx2x_fp(bp, i, state));
7345
34f80b04 7346 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 7347 }
a2fbb9ea
ET
7348}
7349
6cbe5065 7350static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 7351{
a2fbb9ea 7352 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
7353 if (!disable_only)
7354 bnx2x_free_msix_irqs(bp);
a2fbb9ea 7355 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
7356 bp->flags &= ~USING_MSIX_FLAG;
7357
8badd27a 7358 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
7359 if (!disable_only)
7360 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
7361 pci_disable_msi(bp->pdev);
7362 bp->flags &= ~USING_MSI_FLAG;
7363
6cbe5065 7364 } else if (!disable_only)
a2fbb9ea
ET
7365 free_irq(bp->pdev->irq, bp->dev);
7366}
7367
7368static int bnx2x_enable_msix(struct bnx2x *bp)
7369{
8badd27a
EG
7370 int i, rc, offset = 1;
7371 int igu_vec = 0;
a2fbb9ea 7372
8badd27a
EG
7373 bp->msix_table[0].entry = igu_vec;
7374 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 7375
37b091ba
MC
7376#ifdef BCM_CNIC
7377 igu_vec = BP_L_ID(bp) + offset;
7378 bp->msix_table[1].entry = igu_vec;
7379 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7380 offset++;
7381#endif
34f80b04 7382 for_each_queue(bp, i) {
8badd27a 7383 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7384 bp->msix_table[i + offset].entry = igu_vec;
7385 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7386 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7387 }
7388
34f80b04 7389 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7390 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 7391 if (rc) {
8badd27a
EG
7392 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7393 return rc;
34f80b04 7394 }
8badd27a 7395
a2fbb9ea
ET
7396 bp->flags |= USING_MSIX_FLAG;
7397
7398 return 0;
a2fbb9ea
ET
7399}
7400
a2fbb9ea
ET
7401static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7402{
34f80b04 7403 int i, rc, offset = 1;
a2fbb9ea 7404
a2fbb9ea
ET
7405 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7406 bp->dev->name, bp->dev);
a2fbb9ea
ET
7407 if (rc) {
7408 BNX2X_ERR("request sp irq failed\n");
7409 return -EBUSY;
7410 }
7411
37b091ba
MC
7412#ifdef BCM_CNIC
7413 offset++;
7414#endif
a2fbb9ea 7415 for_each_queue(bp, i) {
555f6c78 7416 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7417 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7418 bp->dev->name, i);
ca00392c 7419
34f80b04 7420 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7421 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7422 if (rc) {
555f6c78 7423 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7424 bnx2x_free_msix_irqs(bp);
7425 return -EBUSY;
7426 }
7427
555f6c78 7428 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7429 }
7430
555f6c78 7431 i = BNX2X_NUM_QUEUES(bp);
7995c64e
JP
7432 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7433 bp->msix_table[0].vector,
7434 0, bp->msix_table[offset].vector,
7435 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7436
a2fbb9ea 7437 return 0;
a2fbb9ea
ET
7438}
7439
8badd27a
EG
7440static int bnx2x_enable_msi(struct bnx2x *bp)
7441{
7442 int rc;
7443
7444 rc = pci_enable_msi(bp->pdev);
7445 if (rc) {
7446 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7447 return -1;
7448 }
7449 bp->flags |= USING_MSI_FLAG;
7450
7451 return 0;
7452}
7453
a2fbb9ea
ET
7454static int bnx2x_req_irq(struct bnx2x *bp)
7455{
8badd27a 7456 unsigned long flags;
34f80b04 7457 int rc;
a2fbb9ea 7458
8badd27a
EG
7459 if (bp->flags & USING_MSI_FLAG)
7460 flags = 0;
7461 else
7462 flags = IRQF_SHARED;
7463
7464 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7465 bp->dev->name, bp->dev);
a2fbb9ea
ET
7466 if (!rc)
7467 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7468
7469 return rc;
a2fbb9ea
ET
7470}
7471
65abd74d
YG
7472static void bnx2x_napi_enable(struct bnx2x *bp)
7473{
7474 int i;
7475
54b9ddaa 7476 for_each_queue(bp, i)
65abd74d
YG
7477 napi_enable(&bnx2x_fp(bp, i, napi));
7478}
7479
7480static void bnx2x_napi_disable(struct bnx2x *bp)
7481{
7482 int i;
7483
54b9ddaa 7484 for_each_queue(bp, i)
65abd74d
YG
7485 napi_disable(&bnx2x_fp(bp, i, napi));
7486}
7487
7488static void bnx2x_netif_start(struct bnx2x *bp)
7489{
e1510706
EG
7490 int intr_sem;
7491
7492 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7493 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7494
7495 if (intr_sem) {
65abd74d 7496 if (netif_running(bp->dev)) {
65abd74d
YG
7497 bnx2x_napi_enable(bp);
7498 bnx2x_int_enable(bp);
555f6c78
EG
7499 if (bp->state == BNX2X_STATE_OPEN)
7500 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7501 }
7502 }
7503}
7504
f8ef6e44 7505static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7506{
f8ef6e44 7507 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7508 bnx2x_napi_disable(bp);
762d5f6c 7509 netif_tx_disable(bp->dev);
65abd74d
YG
7510}
7511
a2fbb9ea
ET
7512/*
7513 * Init service functions
7514 */
7515
e665bfda
MC
7516/**
7517 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7518 *
7519 * @param bp driver descriptor
7520 * @param set set or clear an entry (1 or 0)
7521 * @param mac pointer to a buffer containing a MAC
7522 * @param cl_bit_vec bit vector of clients to register a MAC for
7523 * @param cam_offset offset in a CAM to use
7524 * @param with_bcast set broadcast MAC as well
7525 */
7526static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7527 u32 cl_bit_vec, u8 cam_offset,
7528 u8 with_bcast)
a2fbb9ea
ET
7529{
7530 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7531 int port = BP_PORT(bp);
a2fbb9ea
ET
7532
7533 /* CAM allocation
7534 * unicasts 0-31:port0 32-63:port1
7535 * multicast 64-127:port0 128-191:port1
7536 */
e665bfda
MC
7537 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7538 config->hdr.offset = cam_offset;
7539 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7540 config->hdr.reserved1 = 0;
7541
7542 /* primary MAC */
7543 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7544 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7545 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7546 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7547 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7548 swab16(*(u16 *)&mac[4]);
34f80b04 7549 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7550 if (set)
7551 config->config_table[0].target_table_entry.flags = 0;
7552 else
7553 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7554 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7555 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7556 config->config_table[0].target_table_entry.vlan_id = 0;
7557
3101c2bc
YG
7558 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7559 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7560 config->config_table[0].cam_entry.msb_mac_addr,
7561 config->config_table[0].cam_entry.middle_mac_addr,
7562 config->config_table[0].cam_entry.lsb_mac_addr);
7563
7564 /* broadcast */
e665bfda
MC
7565 if (with_bcast) {
7566 config->config_table[1].cam_entry.msb_mac_addr =
7567 cpu_to_le16(0xffff);
7568 config->config_table[1].cam_entry.middle_mac_addr =
7569 cpu_to_le16(0xffff);
7570 config->config_table[1].cam_entry.lsb_mac_addr =
7571 cpu_to_le16(0xffff);
7572 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7573 if (set)
7574 config->config_table[1].target_table_entry.flags =
7575 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7576 else
7577 CAM_INVALIDATE(config->config_table[1]);
7578 config->config_table[1].target_table_entry.clients_bit_vector =
7579 cpu_to_le32(cl_bit_vec);
7580 config->config_table[1].target_table_entry.vlan_id = 0;
7581 }
a2fbb9ea
ET
7582
7583 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7584 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7585 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7586}
7587
e665bfda
MC
7588/**
7589 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7590 *
7591 * @param bp driver descriptor
7592 * @param set set or clear an entry (1 or 0)
7593 * @param mac pointer to a buffer containing a MAC
7594 * @param cl_bit_vec bit vector of clients to register a MAC for
7595 * @param cam_offset offset in a CAM to use
7596 */
7597static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7598 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7599{
7600 struct mac_configuration_cmd_e1h *config =
7601 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7602
8d9c5f34 7603 config->hdr.length = 1;
e665bfda
MC
7604 config->hdr.offset = cam_offset;
7605 config->hdr.client_id = 0xff;
34f80b04
EG
7606 config->hdr.reserved1 = 0;
7607
7608 /* primary MAC */
7609 config->config_table[0].msb_mac_addr =
e665bfda 7610 swab16(*(u16 *)&mac[0]);
34f80b04 7611 config->config_table[0].middle_mac_addr =
e665bfda 7612 swab16(*(u16 *)&mac[2]);
34f80b04 7613 config->config_table[0].lsb_mac_addr =
e665bfda 7614 swab16(*(u16 *)&mac[4]);
ca00392c 7615 config->config_table[0].clients_bit_vector =
e665bfda 7616 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7617 config->config_table[0].vlan_id = 0;
7618 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7619 if (set)
7620 config->config_table[0].flags = BP_PORT(bp);
7621 else
7622 config->config_table[0].flags =
7623 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7624
e665bfda 7625 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7626 (set ? "setting" : "clearing"),
34f80b04
EG
7627 config->config_table[0].msb_mac_addr,
7628 config->config_table[0].middle_mac_addr,
e665bfda 7629 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7630
7631 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7632 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7633 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7634}
7635
a2fbb9ea
ET
7636static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7637 int *state_p, int poll)
7638{
7639 /* can take a while if any port is running */
8b3a0f0b 7640 int cnt = 5000;
a2fbb9ea 7641
c14423fe
ET
7642 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7643 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7644
7645 might_sleep();
34f80b04 7646 while (cnt--) {
a2fbb9ea
ET
7647 if (poll) {
7648 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7649 /* if index is different from 0
7650 * the reply for some commands will
3101c2bc 7651 * be on the non default queue
a2fbb9ea
ET
7652 */
7653 if (idx)
7654 bnx2x_rx_int(&bp->fp[idx], 10);
7655 }
a2fbb9ea 7656
3101c2bc 7657 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7658 if (*state_p == state) {
7659#ifdef BNX2X_STOP_ON_ERROR
7660 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7661#endif
a2fbb9ea 7662 return 0;
8b3a0f0b 7663 }
a2fbb9ea 7664
a2fbb9ea 7665 msleep(1);
e3553b29
EG
7666
7667 if (bp->panic)
7668 return -EIO;
a2fbb9ea
ET
7669 }
7670
a2fbb9ea 7671 /* timeout! */
49d66772
ET
7672 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7673 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7674#ifdef BNX2X_STOP_ON_ERROR
7675 bnx2x_panic();
7676#endif
a2fbb9ea 7677
49d66772 7678 return -EBUSY;
a2fbb9ea
ET
7679}
7680
e665bfda
MC
7681static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7682{
7683 bp->set_mac_pending++;
7684 smp_wmb();
7685
7686 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7687 (1 << bp->fp->cl_id), BP_FUNC(bp));
7688
7689 /* Wait for a completion */
7690 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7691}
7692
7693static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7694{
7695 bp->set_mac_pending++;
7696 smp_wmb();
7697
7698 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7699 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7700 1);
7701
7702 /* Wait for a completion */
7703 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7704}
7705
993ac7b5
MC
7706#ifdef BCM_CNIC
7707/**
7708 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7709 * MAC(s). This function will wait until the ramdord completion
7710 * returns.
7711 *
7712 * @param bp driver handle
7713 * @param set set or clear the CAM entry
7714 *
7715 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7716 */
7717static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7718{
7719 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7720
7721 bp->set_mac_pending++;
7722 smp_wmb();
7723
7724 /* Send a SET_MAC ramrod */
7725 if (CHIP_IS_E1(bp))
7726 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7727 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7728 1);
7729 else
7730 /* CAM allocation for E1H
7731 * unicasts: by func number
7732 * multicast: 20+FUNC*20, 20 each
7733 */
7734 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7735 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7736
7737 /* Wait for a completion when setting */
7738 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7739
7740 return 0;
7741}
7742#endif
7743
a2fbb9ea
ET
7744static int bnx2x_setup_leading(struct bnx2x *bp)
7745{
34f80b04 7746 int rc;
a2fbb9ea 7747
c14423fe 7748 /* reset IGU state */
34f80b04 7749 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7750
7751 /* SETUP ramrod */
7752 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7753
34f80b04
EG
7754 /* Wait for completion */
7755 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7756
34f80b04 7757 return rc;
a2fbb9ea
ET
7758}
7759
7760static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7761{
555f6c78
EG
7762 struct bnx2x_fastpath *fp = &bp->fp[index];
7763
a2fbb9ea 7764 /* reset IGU state */
555f6c78 7765 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7766
228241eb 7767 /* SETUP ramrod */
555f6c78
EG
7768 fp->state = BNX2X_FP_STATE_OPENING;
7769 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7770 fp->cl_id, 0);
a2fbb9ea
ET
7771
7772 /* Wait for completion */
7773 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7774 &(fp->state), 0);
a2fbb9ea
ET
7775}
7776
a2fbb9ea 7777static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7778
54b9ddaa 7779static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7780{
ca00392c
EG
7781
7782 switch (bp->multi_mode) {
7783 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7784 bp->num_queues = 1;
ca00392c
EG
7785 break;
7786
7787 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7788 if (num_queues)
7789 bp->num_queues = min_t(u32, num_queues,
7790 BNX2X_MAX_QUEUES(bp));
ca00392c 7791 else
54b9ddaa
VZ
7792 bp->num_queues = min_t(u32, num_online_cpus(),
7793 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7794 break;
7795
7796
7797 default:
54b9ddaa 7798 bp->num_queues = 1;
ca00392c
EG
7799 break;
7800 }
ca00392c
EG
7801}
7802
54b9ddaa 7803static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7804{
ca00392c 7805 int rc = 0;
a2fbb9ea 7806
8badd27a
EG
7807 switch (int_mode) {
7808 case INT_MODE_INTx:
7809 case INT_MODE_MSI:
54b9ddaa 7810 bp->num_queues = 1;
ca00392c 7811 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7812 break;
7813
7814 case INT_MODE_MSIX:
7815 default:
54b9ddaa
VZ
7816 /* Set number of queues according to bp->multi_mode value */
7817 bnx2x_set_num_queues_msix(bp);
ca00392c 7818
54b9ddaa
VZ
7819 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7820 bp->num_queues);
ca00392c 7821
2dfe0e1f
EG
7822 /* if we can't use MSI-X we only need one fp,
7823 * so try to enable MSI-X with the requested number of fp's
7824 * and fallback to MSI or legacy INTx with one fp
7825 */
ca00392c 7826 rc = bnx2x_enable_msix(bp);
54b9ddaa 7827 if (rc)
34f80b04 7828 /* failed to enable MSI-X */
54b9ddaa 7829 bp->num_queues = 1;
8badd27a 7830 break;
a2fbb9ea 7831 }
54b9ddaa 7832 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7833 return rc;
8badd27a
EG
7834}
7835
993ac7b5
MC
7836#ifdef BCM_CNIC
7837static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7838static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7839#endif
8badd27a
EG
7840
7841/* must be called with rtnl_lock */
7842static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7843{
7844 u32 load_code;
ca00392c
EG
7845 int i, rc;
7846
8badd27a 7847#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7848 if (unlikely(bp->panic))
7849 return -EPERM;
7850#endif
7851
7852 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7853
54b9ddaa 7854 rc = bnx2x_set_num_queues(bp);
c14423fe 7855
6cbe5065
VZ
7856 if (bnx2x_alloc_mem(bp)) {
7857 bnx2x_free_irq(bp, true);
a2fbb9ea 7858 return -ENOMEM;
6cbe5065 7859 }
a2fbb9ea 7860
54b9ddaa 7861 for_each_queue(bp, i)
7a9b2557
VZ
7862 bnx2x_fp(bp, i, disable_tpa) =
7863 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7864
54b9ddaa 7865 for_each_queue(bp, i)
2dfe0e1f
EG
7866 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7867 bnx2x_poll, 128);
7868
2dfe0e1f
EG
7869 bnx2x_napi_enable(bp);
7870
34f80b04
EG
7871 if (bp->flags & USING_MSIX_FLAG) {
7872 rc = bnx2x_req_msix_irqs(bp);
7873 if (rc) {
6cbe5065 7874 bnx2x_free_irq(bp, true);
2dfe0e1f 7875 goto load_error1;
34f80b04
EG
7876 }
7877 } else {
ca00392c 7878 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7879 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7880 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7881 bnx2x_enable_msi(bp);
34f80b04
EG
7882 bnx2x_ack_int(bp);
7883 rc = bnx2x_req_irq(bp);
7884 if (rc) {
2dfe0e1f 7885 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7886 bnx2x_free_irq(bp, true);
2dfe0e1f 7887 goto load_error1;
a2fbb9ea 7888 }
8badd27a
EG
7889 if (bp->flags & USING_MSI_FLAG) {
7890 bp->dev->irq = bp->pdev->irq;
7995c64e
JP
7891 netdev_info(bp->dev, "using MSI IRQ %d\n",
7892 bp->pdev->irq);
8badd27a 7893 }
a2fbb9ea
ET
7894 }
7895
2dfe0e1f
EG
7896 /* Send LOAD_REQUEST command to MCP
7897 Returns the type of LOAD command:
7898 if it is the first port to be initialized
7899 common blocks should be initialized, otherwise - not
7900 */
7901 if (!BP_NOMCP(bp)) {
7902 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7903 if (!load_code) {
7904 BNX2X_ERR("MCP response failure, aborting\n");
7905 rc = -EBUSY;
7906 goto load_error2;
7907 }
7908 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7909 rc = -EBUSY; /* other port in diagnostic mode */
7910 goto load_error2;
7911 }
7912
7913 } else {
7914 int port = BP_PORT(bp);
7915
f5372251 7916 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7917 load_count[0], load_count[1], load_count[2]);
7918 load_count[0]++;
7919 load_count[1 + port]++;
f5372251 7920 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7921 load_count[0], load_count[1], load_count[2]);
7922 if (load_count[0] == 1)
7923 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7924 else if (load_count[1 + port] == 1)
7925 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7926 else
7927 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7928 }
7929
7930 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7931 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7932 bp->port.pmf = 1;
7933 else
7934 bp->port.pmf = 0;
7935 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7936
a2fbb9ea 7937 /* Initialize HW */
34f80b04
EG
7938 rc = bnx2x_init_hw(bp, load_code);
7939 if (rc) {
a2fbb9ea 7940 BNX2X_ERR("HW init failed, aborting\n");
f1e1a199
VZ
7941 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7942 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7943 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
2dfe0e1f 7944 goto load_error2;
a2fbb9ea
ET
7945 }
7946
a2fbb9ea 7947 /* Setup NIC internals and enable interrupts */
471de716 7948 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7949
2691d51d
EG
7950 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7951 (bp->common.shmem2_base))
7952 SHMEM2_WR(bp, dcc_support,
7953 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7954 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7955
a2fbb9ea 7956 /* Send LOAD_DONE command to MCP */
34f80b04 7957 if (!BP_NOMCP(bp)) {
228241eb
ET
7958 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7959 if (!load_code) {
da5a662a 7960 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7961 rc = -EBUSY;
2dfe0e1f 7962 goto load_error3;
a2fbb9ea
ET
7963 }
7964 }
7965
7966 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7967
34f80b04
EG
7968 rc = bnx2x_setup_leading(bp);
7969 if (rc) {
da5a662a 7970 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7971#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7972 goto load_error3;
e3553b29
EG
7973#else
7974 bp->panic = 1;
7975 return -EBUSY;
7976#endif
34f80b04 7977 }
a2fbb9ea 7978
34f80b04
EG
7979 if (CHIP_IS_E1H(bp))
7980 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7981 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 7982 bp->flags |= MF_FUNC_DIS;
34f80b04 7983 }
a2fbb9ea 7984
ca00392c 7985 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7986#ifdef BCM_CNIC
7987 /* Enable Timer scan */
7988 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7989#endif
34f80b04
EG
7990 for_each_nondefault_queue(bp, i) {
7991 rc = bnx2x_setup_multi(bp, i);
7992 if (rc)
37b091ba
MC
7993#ifdef BCM_CNIC
7994 goto load_error4;
7995#else
2dfe0e1f 7996 goto load_error3;
37b091ba 7997#endif
34f80b04 7998 }
a2fbb9ea 7999
ca00392c 8000 if (CHIP_IS_E1(bp))
e665bfda 8001 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 8002 else
e665bfda 8003 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
8004#ifdef BCM_CNIC
8005 /* Set iSCSI L2 MAC */
8006 mutex_lock(&bp->cnic_mutex);
8007 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8008 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8009 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
8010 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8011 CNIC_SB_ID(bp));
993ac7b5
MC
8012 }
8013 mutex_unlock(&bp->cnic_mutex);
8014#endif
ca00392c 8015 }
34f80b04
EG
8016
8017 if (bp->port.pmf)
b5bf9068 8018 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
8019
8020 /* Start fast path */
34f80b04
EG
8021 switch (load_mode) {
8022 case LOAD_NORMAL:
ca00392c
EG
8023 if (bp->state == BNX2X_STATE_OPEN) {
8024 /* Tx queue should be only reenabled */
8025 netif_tx_wake_all_queues(bp->dev);
8026 }
2dfe0e1f 8027 /* Initialize the receive filter. */
34f80b04
EG
8028 bnx2x_set_rx_mode(bp->dev);
8029 break;
8030
8031 case LOAD_OPEN:
555f6c78 8032 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
8033 if (bp->state != BNX2X_STATE_OPEN)
8034 netif_tx_disable(bp->dev);
2dfe0e1f 8035 /* Initialize the receive filter. */
34f80b04 8036 bnx2x_set_rx_mode(bp->dev);
34f80b04 8037 break;
a2fbb9ea 8038
34f80b04 8039 case LOAD_DIAG:
2dfe0e1f 8040 /* Initialize the receive filter. */
a2fbb9ea 8041 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
8042 bp->state = BNX2X_STATE_DIAG;
8043 break;
8044
8045 default:
8046 break;
a2fbb9ea
ET
8047 }
8048
34f80b04
EG
8049 if (!bp->port.pmf)
8050 bnx2x__link_status_update(bp);
8051
a2fbb9ea
ET
8052 /* start the timer */
8053 mod_timer(&bp->timer, jiffies + bp->current_interval);
8054
993ac7b5
MC
8055#ifdef BCM_CNIC
8056 bnx2x_setup_cnic_irq_info(bp);
8057 if (bp->state == BNX2X_STATE_OPEN)
8058 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8059#endif
72fd0718 8060 bnx2x_inc_load_cnt(bp);
34f80b04 8061
a2fbb9ea
ET
8062 return 0;
8063
37b091ba
MC
8064#ifdef BCM_CNIC
8065load_error4:
8066 /* Disable Timer scan */
8067 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8068#endif
2dfe0e1f
EG
8069load_error3:
8070 bnx2x_int_disable_sync(bp, 1);
8071 if (!BP_NOMCP(bp)) {
8072 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8073 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8074 }
8075 bp->port.pmf = 0;
7a9b2557
VZ
8076 /* Free SKBs, SGEs, TPA pool and driver internals */
8077 bnx2x_free_skbs(bp);
54b9ddaa 8078 for_each_queue(bp, i)
3196a88a 8079 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 8080load_error2:
d1014634 8081 /* Release IRQs */
6cbe5065 8082 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
8083load_error1:
8084 bnx2x_napi_disable(bp);
54b9ddaa 8085 for_each_queue(bp, i)
7cde1c8b 8086 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8087 bnx2x_free_mem(bp);
8088
34f80b04 8089 return rc;
a2fbb9ea
ET
8090}
8091
8092static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8093{
555f6c78 8094 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
8095 int rc;
8096
c14423fe 8097 /* halt the connection */
555f6c78
EG
8098 fp->state = BNX2X_FP_STATE_HALTING;
8099 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 8100
34f80b04 8101 /* Wait for completion */
a2fbb9ea 8102 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 8103 &(fp->state), 1);
c14423fe 8104 if (rc) /* timeout */
a2fbb9ea
ET
8105 return rc;
8106
8107 /* delete cfc entry */
8108 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8109
34f80b04
EG
8110 /* Wait for completion */
8111 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 8112 &(fp->state), 1);
34f80b04 8113 return rc;
a2fbb9ea
ET
8114}
8115
da5a662a 8116static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 8117{
4781bfad 8118 __le16 dsb_sp_prod_idx;
c14423fe 8119 /* if the other port is handling traffic,
a2fbb9ea 8120 this can take a lot of time */
34f80b04
EG
8121 int cnt = 500;
8122 int rc;
a2fbb9ea
ET
8123
8124 might_sleep();
8125
8126 /* Send HALT ramrod */
8127 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 8128 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 8129
34f80b04
EG
8130 /* Wait for completion */
8131 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8132 &(bp->fp[0].state), 1);
8133 if (rc) /* timeout */
da5a662a 8134 return rc;
a2fbb9ea 8135
49d66772 8136 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 8137
228241eb 8138 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
8139 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8140
49d66772 8141 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
8142 we are going to reset the chip anyway
8143 so there is not much to do if this times out
8144 */
34f80b04 8145 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
8146 if (!cnt) {
8147 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8148 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8149 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8150#ifdef BNX2X_STOP_ON_ERROR
8151 bnx2x_panic();
8152#endif
36e552ab 8153 rc = -EBUSY;
34f80b04
EG
8154 break;
8155 }
8156 cnt--;
da5a662a 8157 msleep(1);
5650d9d4 8158 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
8159 }
8160 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8161 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
8162
8163 return rc;
a2fbb9ea
ET
8164}
8165
34f80b04
EG
8166static void bnx2x_reset_func(struct bnx2x *bp)
8167{
8168 int port = BP_PORT(bp);
8169 int func = BP_FUNC(bp);
8170 int base, i;
8171
8172 /* Configure IGU */
8173 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8174 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8175
37b091ba
MC
8176#ifdef BCM_CNIC
8177 /* Disable Timer scan */
8178 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8179 /*
8180 * Wait for at least 10ms and up to 2 second for the timers scan to
8181 * complete
8182 */
8183 for (i = 0; i < 200; i++) {
8184 msleep(10);
8185 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8186 break;
8187 }
8188#endif
34f80b04
EG
8189 /* Clear ILT */
8190 base = FUNC_ILT_BASE(func);
8191 for (i = base; i < base + ILT_PER_FUNC; i++)
8192 bnx2x_ilt_wr(bp, i, 0);
8193}
8194
8195static void bnx2x_reset_port(struct bnx2x *bp)
8196{
8197 int port = BP_PORT(bp);
8198 u32 val;
8199
8200 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8201
8202 /* Do not rcv packets to BRB */
8203 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8204 /* Do not direct rcv packets that are not for MCP to the BRB */
8205 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8206 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8207
8208 /* Configure AEU */
8209 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8210
8211 msleep(100);
8212 /* Check for BRB port occupancy */
8213 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8214 if (val)
8215 DP(NETIF_MSG_IFDOWN,
33471629 8216 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
8217
8218 /* TODO: Close Doorbell port? */
8219}
8220
34f80b04
EG
8221static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8222{
8223 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8224 BP_FUNC(bp), reset_code);
8225
8226 switch (reset_code) {
8227 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8228 bnx2x_reset_port(bp);
8229 bnx2x_reset_func(bp);
8230 bnx2x_reset_common(bp);
8231 break;
8232
8233 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8234 bnx2x_reset_port(bp);
8235 bnx2x_reset_func(bp);
8236 break;
8237
8238 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8239 bnx2x_reset_func(bp);
8240 break;
49d66772 8241
34f80b04
EG
8242 default:
8243 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8244 break;
8245 }
8246}
8247
72fd0718 8248static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 8249{
da5a662a 8250 int port = BP_PORT(bp);
a2fbb9ea 8251 u32 reset_code = 0;
da5a662a 8252 int i, cnt, rc;
a2fbb9ea 8253
555f6c78 8254 /* Wait until tx fastpath tasks complete */
54b9ddaa 8255 for_each_queue(bp, i) {
228241eb
ET
8256 struct bnx2x_fastpath *fp = &bp->fp[i];
8257
34f80b04 8258 cnt = 1000;
e8b5fc51 8259 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 8260
7961f791 8261 bnx2x_tx_int(fp);
34f80b04
EG
8262 if (!cnt) {
8263 BNX2X_ERR("timeout waiting for queue[%d]\n",
8264 i);
8265#ifdef BNX2X_STOP_ON_ERROR
8266 bnx2x_panic();
8267 return -EBUSY;
8268#else
8269 break;
8270#endif
8271 }
8272 cnt--;
da5a662a 8273 msleep(1);
34f80b04 8274 }
228241eb 8275 }
da5a662a
VZ
8276 /* Give HW time to discard old tx messages */
8277 msleep(1);
a2fbb9ea 8278
3101c2bc
YG
8279 if (CHIP_IS_E1(bp)) {
8280 struct mac_configuration_cmd *config =
8281 bnx2x_sp(bp, mcast_config);
8282
e665bfda 8283 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 8284
8d9c5f34 8285 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
8286 CAM_INVALIDATE(config->config_table[i]);
8287
8d9c5f34 8288 config->hdr.length = i;
3101c2bc
YG
8289 if (CHIP_REV_IS_SLOW(bp))
8290 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8291 else
8292 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 8293 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
8294 config->hdr.reserved1 = 0;
8295
e665bfda
MC
8296 bp->set_mac_pending++;
8297 smp_wmb();
8298
3101c2bc
YG
8299 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8300 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8301 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8302
8303 } else { /* E1H */
65abd74d
YG
8304 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8305
e665bfda 8306 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
8307
8308 for (i = 0; i < MC_HASH_SIZE; i++)
8309 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
8310
8311 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 8312 }
993ac7b5
MC
8313#ifdef BCM_CNIC
8314 /* Clear iSCSI L2 MAC */
8315 mutex_lock(&bp->cnic_mutex);
8316 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8317 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8318 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8319 }
8320 mutex_unlock(&bp->cnic_mutex);
8321#endif
3101c2bc 8322
65abd74d
YG
8323 if (unload_mode == UNLOAD_NORMAL)
8324 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8325
7d0446c2 8326 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8327 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8328
7d0446c2 8329 else if (bp->wol) {
65abd74d
YG
8330 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8331 u8 *mac_addr = bp->dev->dev_addr;
8332 u32 val;
8333 /* The mac address is written to entries 1-4 to
8334 preserve entry 0 which is used by the PMF */
8335 u8 entry = (BP_E1HVN(bp) + 1)*8;
8336
8337 val = (mac_addr[0] << 8) | mac_addr[1];
8338 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8339
8340 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8341 (mac_addr[4] << 8) | mac_addr[5];
8342 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8343
8344 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8345
8346 } else
8347 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8348
34f80b04
EG
8349 /* Close multi and leading connections
8350 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8351 for_each_nondefault_queue(bp, i)
8352 if (bnx2x_stop_multi(bp, i))
228241eb 8353 goto unload_error;
a2fbb9ea 8354
da5a662a
VZ
8355 rc = bnx2x_stop_leading(bp);
8356 if (rc) {
34f80b04 8357 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8358#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8359 return -EBUSY;
da5a662a
VZ
8360#else
8361 goto unload_error;
34f80b04 8362#endif
228241eb
ET
8363 }
8364
8365unload_error:
34f80b04 8366 if (!BP_NOMCP(bp))
228241eb 8367 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8368 else {
f5372251 8369 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8370 load_count[0], load_count[1], load_count[2]);
8371 load_count[0]--;
da5a662a 8372 load_count[1 + port]--;
f5372251 8373 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8374 load_count[0], load_count[1], load_count[2]);
8375 if (load_count[0] == 0)
8376 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8377 else if (load_count[1 + port] == 0)
34f80b04
EG
8378 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8379 else
8380 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8381 }
a2fbb9ea 8382
34f80b04
EG
8383 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8384 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8385 bnx2x__link_reset(bp);
a2fbb9ea
ET
8386
8387 /* Reset the chip */
228241eb 8388 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8389
8390 /* Report UNLOAD_DONE to MCP */
34f80b04 8391 if (!BP_NOMCP(bp))
a2fbb9ea 8392 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8393
72fd0718
VZ
8394}
8395
8396static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8397{
8398 u32 val;
8399
8400 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8401
8402 if (CHIP_IS_E1(bp)) {
8403 int port = BP_PORT(bp);
8404 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8405 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8406
8407 val = REG_RD(bp, addr);
8408 val &= ~(0x300);
8409 REG_WR(bp, addr, val);
8410 } else if (CHIP_IS_E1H(bp)) {
8411 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8412 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8413 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8414 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8415 }
8416}
8417
8418/* must be called with rtnl_lock */
8419static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8420{
8421 int i;
8422
8423 if (bp->state == BNX2X_STATE_CLOSED) {
8424 /* Interface has been removed - nothing to recover */
8425 bp->recovery_state = BNX2X_RECOVERY_DONE;
8426 bp->is_leader = 0;
8427 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8428 smp_wmb();
8429
8430 return -EINVAL;
8431 }
8432
8433#ifdef BCM_CNIC
8434 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8435#endif
8436 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8437
8438 /* Set "drop all" */
8439 bp->rx_mode = BNX2X_RX_MODE_NONE;
8440 bnx2x_set_storm_rx_mode(bp);
8441
8442 /* Disable HW interrupts, NAPI and Tx */
8443 bnx2x_netif_stop(bp, 1);
8444
8445 del_timer_sync(&bp->timer);
8446 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8447 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8448 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8449
8450 /* Release IRQs */
8451 bnx2x_free_irq(bp, false);
8452
8453 /* Cleanup the chip if needed */
8454 if (unload_mode != UNLOAD_RECOVERY)
8455 bnx2x_chip_cleanup(bp, unload_mode);
8456
9a035440 8457 bp->port.pmf = 0;
a2fbb9ea 8458
7a9b2557 8459 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8460 bnx2x_free_skbs(bp);
54b9ddaa 8461 for_each_queue(bp, i)
3196a88a 8462 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8463 for_each_queue(bp, i)
7cde1c8b 8464 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8465 bnx2x_free_mem(bp);
8466
8467 bp->state = BNX2X_STATE_CLOSED;
228241eb 8468
a2fbb9ea
ET
8469 netif_carrier_off(bp->dev);
8470
72fd0718
VZ
8471 /* The last driver must disable a "close the gate" if there is no
8472 * parity attention or "process kill" pending.
8473 */
8474 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8475 bnx2x_reset_is_done(bp))
8476 bnx2x_disable_close_the_gate(bp);
8477
8478 /* Reset MCP mail box sequence if there is on going recovery */
8479 if (unload_mode == UNLOAD_RECOVERY)
8480 bp->fw_seq = 0;
8481
8482 return 0;
8483}
8484
8485/* Close gates #2, #3 and #4: */
8486static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8487{
8488 u32 val, addr;
8489
8490 /* Gates #2 and #4a are closed/opened for "not E1" only */
8491 if (!CHIP_IS_E1(bp)) {
8492 /* #4 */
8493 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8494 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8495 close ? (val | 0x1) : (val & (~(u32)1)));
8496 /* #2 */
8497 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8498 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8499 close ? (val | 0x1) : (val & (~(u32)1)));
8500 }
8501
8502 /* #3 */
8503 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8504 val = REG_RD(bp, addr);
8505 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8506
8507 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8508 close ? "closing" : "opening");
8509 mmiowb();
8510}
8511
8512#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8513
8514static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8515{
8516 /* Do some magic... */
8517 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8518 *magic_val = val & SHARED_MF_CLP_MAGIC;
8519 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8520}
8521
8522/* Restore the value of the `magic' bit.
8523 *
8524 * @param pdev Device handle.
8525 * @param magic_val Old value of the `magic' bit.
8526 */
8527static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8528{
8529 /* Restore the `magic' bit value... */
8530 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8531 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8532 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8533 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8534 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8535 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8536}
8537
8538/* Prepares for MCP reset: takes care of CLP configurations.
8539 *
8540 * @param bp
8541 * @param magic_val Old value of 'magic' bit.
8542 */
8543static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8544{
8545 u32 shmem;
8546 u32 validity_offset;
8547
8548 DP(NETIF_MSG_HW, "Starting\n");
8549
8550 /* Set `magic' bit in order to save MF config */
8551 if (!CHIP_IS_E1(bp))
8552 bnx2x_clp_reset_prep(bp, magic_val);
8553
8554 /* Get shmem offset */
8555 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8556 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8557
8558 /* Clear validity map flags */
8559 if (shmem > 0)
8560 REG_WR(bp, shmem + validity_offset, 0);
8561}
8562
8563#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8564#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8565
8566/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8567 * depending on the HW type.
8568 *
8569 * @param bp
8570 */
8571static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8572{
8573 /* special handling for emulation and FPGA,
8574 wait 10 times longer */
8575 if (CHIP_REV_IS_SLOW(bp))
8576 msleep(MCP_ONE_TIMEOUT*10);
8577 else
8578 msleep(MCP_ONE_TIMEOUT);
8579}
8580
8581static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8582{
8583 u32 shmem, cnt, validity_offset, val;
8584 int rc = 0;
8585
8586 msleep(100);
8587
8588 /* Get shmem offset */
8589 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8590 if (shmem == 0) {
8591 BNX2X_ERR("Shmem 0 return failure\n");
8592 rc = -ENOTTY;
8593 goto exit_lbl;
8594 }
8595
8596 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8597
8598 /* Wait for MCP to come up */
8599 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8600 /* TBD: its best to check validity map of last port.
8601 * currently checks on port 0.
8602 */
8603 val = REG_RD(bp, shmem + validity_offset);
8604 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8605 shmem + validity_offset, val);
8606
8607 /* check that shared memory is valid. */
8608 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8609 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8610 break;
8611
8612 bnx2x_mcp_wait_one(bp);
8613 }
8614
8615 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8616
8617 /* Check that shared memory is valid. This indicates that MCP is up. */
8618 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8619 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8620 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8621 rc = -ENOTTY;
8622 goto exit_lbl;
8623 }
8624
8625exit_lbl:
8626 /* Restore the `magic' bit value */
8627 if (!CHIP_IS_E1(bp))
8628 bnx2x_clp_reset_done(bp, magic_val);
8629
8630 return rc;
8631}
8632
8633static void bnx2x_pxp_prep(struct bnx2x *bp)
8634{
8635 if (!CHIP_IS_E1(bp)) {
8636 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8637 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8638 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8639 mmiowb();
8640 }
8641}
8642
8643/*
8644 * Reset the whole chip except for:
8645 * - PCIE core
8646 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8647 * one reset bit)
8648 * - IGU
8649 * - MISC (including AEU)
8650 * - GRC
8651 * - RBCN, RBCP
8652 */
8653static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8654{
8655 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8656
8657 not_reset_mask1 =
8658 MISC_REGISTERS_RESET_REG_1_RST_HC |
8659 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8660 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8661
8662 not_reset_mask2 =
8663 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8664 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8665 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8666 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8667 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8668 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8669 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8670 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8671
8672 reset_mask1 = 0xffffffff;
8673
8674 if (CHIP_IS_E1(bp))
8675 reset_mask2 = 0xffff;
8676 else
8677 reset_mask2 = 0x1ffff;
8678
8679 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8680 reset_mask1 & (~not_reset_mask1));
8681 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8682 reset_mask2 & (~not_reset_mask2));
8683
8684 barrier();
8685 mmiowb();
8686
8687 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8688 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8689 mmiowb();
8690}
8691
8692static int bnx2x_process_kill(struct bnx2x *bp)
8693{
8694 int cnt = 1000;
8695 u32 val = 0;
8696 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8697
8698
8699 /* Empty the Tetris buffer, wait for 1s */
8700 do {
8701 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8702 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8703 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8704 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8705 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8706 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8707 ((port_is_idle_0 & 0x1) == 0x1) &&
8708 ((port_is_idle_1 & 0x1) == 0x1) &&
8709 (pgl_exp_rom2 == 0xffffffff))
8710 break;
8711 msleep(1);
8712 } while (cnt-- > 0);
8713
8714 if (cnt <= 0) {
8715 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8716 " are still"
8717 " outstanding read requests after 1s!\n");
8718 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8719 " port_is_idle_0=0x%08x,"
8720 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8721 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8722 pgl_exp_rom2);
8723 return -EAGAIN;
8724 }
8725
8726 barrier();
8727
8728 /* Close gates #2, #3 and #4 */
8729 bnx2x_set_234_gates(bp, true);
8730
8731 /* TBD: Indicate that "process kill" is in progress to MCP */
8732
8733 /* Clear "unprepared" bit */
8734 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8735 barrier();
8736
8737 /* Make sure all is written to the chip before the reset */
8738 mmiowb();
8739
8740 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8741 * PSWHST, GRC and PSWRD Tetris buffer.
8742 */
8743 msleep(1);
8744
8745 /* Prepare to chip reset: */
8746 /* MCP */
8747 bnx2x_reset_mcp_prep(bp, &val);
8748
8749 /* PXP */
8750 bnx2x_pxp_prep(bp);
8751 barrier();
8752
8753 /* reset the chip */
8754 bnx2x_process_kill_chip_reset(bp);
8755 barrier();
8756
8757 /* Recover after reset: */
8758 /* MCP */
8759 if (bnx2x_reset_mcp_comp(bp, val))
8760 return -EAGAIN;
8761
8762 /* PXP */
8763 bnx2x_pxp_prep(bp);
8764
8765 /* Open the gates #2, #3 and #4 */
8766 bnx2x_set_234_gates(bp, false);
8767
8768 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8769 * reset state, re-enable attentions. */
8770
a2fbb9ea
ET
8771 return 0;
8772}
8773
72fd0718
VZ
8774static int bnx2x_leader_reset(struct bnx2x *bp)
8775{
8776 int rc = 0;
8777 /* Try to recover after the failure */
8778 if (bnx2x_process_kill(bp)) {
8779 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8780 bp->dev->name);
8781 rc = -EAGAIN;
8782 goto exit_leader_reset;
8783 }
8784
8785 /* Clear "reset is in progress" bit and update the driver state */
8786 bnx2x_set_reset_done(bp);
8787 bp->recovery_state = BNX2X_RECOVERY_DONE;
8788
8789exit_leader_reset:
8790 bp->is_leader = 0;
8791 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8792 smp_wmb();
8793 return rc;
8794}
8795
8796static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8797
8798/* Assumption: runs under rtnl lock. This together with the fact
8799 * that it's called only from bnx2x_reset_task() ensure that it
8800 * will never be called when netif_running(bp->dev) is false.
8801 */
8802static void bnx2x_parity_recover(struct bnx2x *bp)
8803{
8804 DP(NETIF_MSG_HW, "Handling parity\n");
8805 while (1) {
8806 switch (bp->recovery_state) {
8807 case BNX2X_RECOVERY_INIT:
8808 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8809 /* Try to get a LEADER_LOCK HW lock */
8810 if (bnx2x_trylock_hw_lock(bp,
8811 HW_LOCK_RESOURCE_RESERVED_08))
8812 bp->is_leader = 1;
8813
8814 /* Stop the driver */
8815 /* If interface has been removed - break */
8816 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8817 return;
8818
8819 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8820 /* Ensure "is_leader" and "recovery_state"
8821 * update values are seen on other CPUs
8822 */
8823 smp_wmb();
8824 break;
8825
8826 case BNX2X_RECOVERY_WAIT:
8827 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8828 if (bp->is_leader) {
8829 u32 load_counter = bnx2x_get_load_cnt(bp);
8830 if (load_counter) {
8831 /* Wait until all other functions get
8832 * down.
8833 */
8834 schedule_delayed_work(&bp->reset_task,
8835 HZ/10);
8836 return;
8837 } else {
8838 /* If all other functions got down -
8839 * try to bring the chip back to
8840 * normal. In any case it's an exit
8841 * point for a leader.
8842 */
8843 if (bnx2x_leader_reset(bp) ||
8844 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8845 printk(KERN_ERR"%s: Recovery "
8846 "has failed. Power cycle is "
8847 "needed.\n", bp->dev->name);
8848 /* Disconnect this device */
8849 netif_device_detach(bp->dev);
8850 /* Block ifup for all function
8851 * of this ASIC until
8852 * "process kill" or power
8853 * cycle.
8854 */
8855 bnx2x_set_reset_in_progress(bp);
8856 /* Shut down the power */
8857 bnx2x_set_power_state(bp,
8858 PCI_D3hot);
8859 return;
8860 }
8861
8862 return;
8863 }
8864 } else { /* non-leader */
8865 if (!bnx2x_reset_is_done(bp)) {
8866 /* Try to get a LEADER_LOCK HW lock as
8867 * long as a former leader may have
8868 * been unloaded by the user or
8869 * released a leadership by another
8870 * reason.
8871 */
8872 if (bnx2x_trylock_hw_lock(bp,
8873 HW_LOCK_RESOURCE_RESERVED_08)) {
8874 /* I'm a leader now! Restart a
8875 * switch case.
8876 */
8877 bp->is_leader = 1;
8878 break;
8879 }
8880
8881 schedule_delayed_work(&bp->reset_task,
8882 HZ/10);
8883 return;
8884
8885 } else { /* A leader has completed
8886 * the "process kill". It's an exit
8887 * point for a non-leader.
8888 */
8889 bnx2x_nic_load(bp, LOAD_NORMAL);
8890 bp->recovery_state =
8891 BNX2X_RECOVERY_DONE;
8892 smp_wmb();
8893 return;
8894 }
8895 }
8896 default:
8897 return;
8898 }
8899 }
8900}
8901
8902/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8903 * scheduled on a general queue in order to prevent a dead lock.
8904 */
34f80b04
EG
8905static void bnx2x_reset_task(struct work_struct *work)
8906{
72fd0718 8907 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
8908
8909#ifdef BNX2X_STOP_ON_ERROR
8910 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8911 " so reset not done to allow debug dump,\n"
72fd0718 8912 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
8913 return;
8914#endif
8915
8916 rtnl_lock();
8917
8918 if (!netif_running(bp->dev))
8919 goto reset_task_exit;
8920
72fd0718
VZ
8921 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8922 bnx2x_parity_recover(bp);
8923 else {
8924 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8925 bnx2x_nic_load(bp, LOAD_NORMAL);
8926 }
34f80b04
EG
8927
8928reset_task_exit:
8929 rtnl_unlock();
8930}
8931
a2fbb9ea
ET
8932/* end of nic load/unload */
8933
8934/* ethtool_ops */
8935
8936/*
8937 * Init service functions
8938 */
8939
f1ef27ef
EG
8940static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8941{
8942 switch (func) {
8943 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8944 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8945 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8946 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8947 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8948 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8949 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8950 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8951 default:
8952 BNX2X_ERR("Unsupported function index: %d\n", func);
8953 return (u32)(-1);
8954 }
8955}
8956
8957static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8958{
8959 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8960
8961 /* Flush all outstanding writes */
8962 mmiowb();
8963
8964 /* Pretend to be function 0 */
8965 REG_WR(bp, reg, 0);
8966 /* Flush the GRC transaction (in the chip) */
8967 new_val = REG_RD(bp, reg);
8968 if (new_val != 0) {
8969 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8970 new_val);
8971 BUG();
8972 }
8973
8974 /* From now we are in the "like-E1" mode */
8975 bnx2x_int_disable(bp);
8976
8977 /* Flush all outstanding writes */
8978 mmiowb();
8979
8980 /* Restore the original funtion settings */
8981 REG_WR(bp, reg, orig_func);
8982 new_val = REG_RD(bp, reg);
8983 if (new_val != orig_func) {
8984 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8985 orig_func, new_val);
8986 BUG();
8987 }
8988}
8989
8990static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8991{
8992 if (CHIP_IS_E1H(bp))
8993 bnx2x_undi_int_disable_e1h(bp, func);
8994 else
8995 bnx2x_int_disable(bp);
8996}
8997
34f80b04
EG
8998static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8999{
9000 u32 val;
9001
9002 /* Check if there is any driver already loaded */
9003 val = REG_RD(bp, MISC_REG_UNPREPARED);
9004 if (val == 0x1) {
9005 /* Check if it is the UNDI driver
9006 * UNDI driver initializes CID offset for normal bell to 0x7
9007 */
4a37fb66 9008 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9009 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9010 if (val == 0x7) {
9011 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 9012 /* save our func */
34f80b04 9013 int func = BP_FUNC(bp);
da5a662a
VZ
9014 u32 swap_en;
9015 u32 swap_val;
34f80b04 9016
b4661739
EG
9017 /* clear the UNDI indication */
9018 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9019
34f80b04
EG
9020 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9021
9022 /* try unload UNDI on port 0 */
9023 bp->func = 0;
da5a662a
VZ
9024 bp->fw_seq =
9025 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9026 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 9027 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9028
9029 /* if UNDI is loaded on the other port */
9030 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9031
da5a662a
VZ
9032 /* send "DONE" for previous unload */
9033 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9034
9035 /* unload UNDI on port 1 */
34f80b04 9036 bp->func = 1;
da5a662a
VZ
9037 bp->fw_seq =
9038 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9039 DRV_MSG_SEQ_NUMBER_MASK);
9040 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9041
9042 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9043 }
9044
b4661739
EG
9045 /* now it's safe to release the lock */
9046 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9047
f1ef27ef 9048 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
9049
9050 /* close input traffic and wait for it */
9051 /* Do not rcv packets to BRB */
9052 REG_WR(bp,
9053 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9054 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9055 /* Do not direct rcv packets that are not for MCP to
9056 * the BRB */
9057 REG_WR(bp,
9058 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9059 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9060 /* clear AEU */
9061 REG_WR(bp,
9062 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9063 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9064 msleep(10);
9065
9066 /* save NIG port swap info */
9067 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9068 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
9069 /* reset device */
9070 REG_WR(bp,
9071 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 9072 0xd3ffffff);
34f80b04
EG
9073 REG_WR(bp,
9074 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9075 0x1403);
da5a662a
VZ
9076 /* take the NIG out of reset and restore swap values */
9077 REG_WR(bp,
9078 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9079 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9080 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9081 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9082
9083 /* send unload done to the MCP */
9084 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9085
9086 /* restore our func and fw_seq */
9087 bp->func = func;
9088 bp->fw_seq =
9089 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9090 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
9091
9092 } else
9093 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9094 }
9095}
9096
9097static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9098{
9099 u32 val, val2, val3, val4, id;
72ce58c3 9100 u16 pmc;
34f80b04
EG
9101
9102 /* Get the chip revision id and number. */
9103 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9104 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9105 id = ((val & 0xffff) << 16);
9106 val = REG_RD(bp, MISC_REG_CHIP_REV);
9107 id |= ((val & 0xf) << 12);
9108 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9109 id |= ((val & 0xff) << 4);
5a40e08e 9110 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
9111 id |= (val & 0xf);
9112 bp->common.chip_id = id;
9113 bp->link_params.chip_id = bp->common.chip_id;
9114 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9115
1c06328c
EG
9116 val = (REG_RD(bp, 0x2874) & 0x55);
9117 if ((bp->common.chip_id & 0x1) ||
9118 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9119 bp->flags |= ONE_PORT_FLAG;
9120 BNX2X_DEV_INFO("single port device\n");
9121 }
9122
34f80b04
EG
9123 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9124 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9125 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9126 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9127 bp->common.flash_size, bp->common.flash_size);
9128
9129 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 9130 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 9131 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
9132 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9133 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
9134
9135 if (!bp->common.shmem_base ||
9136 (bp->common.shmem_base < 0xA0000) ||
9137 (bp->common.shmem_base >= 0xC0000)) {
9138 BNX2X_DEV_INFO("MCP not active\n");
9139 bp->flags |= NO_MCP_FLAG;
9140 return;
9141 }
9142
9143 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9144 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9145 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9146 BNX2X_ERR("BAD MCP validity signature\n");
9147
9148 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 9149 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
9150
9151 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9152 SHARED_HW_CFG_LED_MODE_MASK) >>
9153 SHARED_HW_CFG_LED_MODE_SHIFT);
9154
c2c8b03e
EG
9155 bp->link_params.feature_config_flags = 0;
9156 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9157 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9158 bp->link_params.feature_config_flags |=
9159 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9160 else
9161 bp->link_params.feature_config_flags &=
9162 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9163
34f80b04
EG
9164 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9165 bp->common.bc_ver = val;
9166 BNX2X_DEV_INFO("bc_ver %X\n", val);
9167 if (val < BNX2X_BC_VER) {
9168 /* for now only warn
9169 * later we might need to enforce this */
9170 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
9171 " please upgrade BC\n", BNX2X_BC_VER, val);
9172 }
4d295db0
EG
9173 bp->link_params.feature_config_flags |=
9174 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9175 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
9176
9177 if (BP_E1HVN(bp) == 0) {
9178 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9179 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9180 } else {
9181 /* no WOL capability for E1HVN != 0 */
9182 bp->flags |= NO_WOL_FLAG;
9183 }
9184 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 9185 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
9186
9187 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9188 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9189 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9190 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9191
7995c64e 9192 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
34f80b04
EG
9193}
9194
9195static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9196 u32 switch_cfg)
a2fbb9ea 9197{
34f80b04 9198 int port = BP_PORT(bp);
a2fbb9ea
ET
9199 u32 ext_phy_type;
9200
a2fbb9ea
ET
9201 switch (switch_cfg) {
9202 case SWITCH_CFG_1G:
9203 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9204
c18487ee
YR
9205 ext_phy_type =
9206 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9207 switch (ext_phy_type) {
9208 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9209 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9210 ext_phy_type);
9211
34f80b04
EG
9212 bp->port.supported |= (SUPPORTED_10baseT_Half |
9213 SUPPORTED_10baseT_Full |
9214 SUPPORTED_100baseT_Half |
9215 SUPPORTED_100baseT_Full |
9216 SUPPORTED_1000baseT_Full |
9217 SUPPORTED_2500baseX_Full |
9218 SUPPORTED_TP |
9219 SUPPORTED_FIBRE |
9220 SUPPORTED_Autoneg |
9221 SUPPORTED_Pause |
9222 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9223 break;
9224
9225 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9226 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9227 ext_phy_type);
9228
34f80b04
EG
9229 bp->port.supported |= (SUPPORTED_10baseT_Half |
9230 SUPPORTED_10baseT_Full |
9231 SUPPORTED_100baseT_Half |
9232 SUPPORTED_100baseT_Full |
9233 SUPPORTED_1000baseT_Full |
9234 SUPPORTED_TP |
9235 SUPPORTED_FIBRE |
9236 SUPPORTED_Autoneg |
9237 SUPPORTED_Pause |
9238 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9239 break;
9240
9241 default:
9242 BNX2X_ERR("NVRAM config error. "
9243 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 9244 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9245 return;
9246 }
9247
34f80b04
EG
9248 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9249 port*0x10);
9250 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
9251 break;
9252
9253 case SWITCH_CFG_10G:
9254 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9255
c18487ee
YR
9256 ext_phy_type =
9257 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9258 switch (ext_phy_type) {
9259 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9260 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9261 ext_phy_type);
9262
34f80b04
EG
9263 bp->port.supported |= (SUPPORTED_10baseT_Half |
9264 SUPPORTED_10baseT_Full |
9265 SUPPORTED_100baseT_Half |
9266 SUPPORTED_100baseT_Full |
9267 SUPPORTED_1000baseT_Full |
9268 SUPPORTED_2500baseX_Full |
9269 SUPPORTED_10000baseT_Full |
9270 SUPPORTED_TP |
9271 SUPPORTED_FIBRE |
9272 SUPPORTED_Autoneg |
9273 SUPPORTED_Pause |
9274 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9275 break;
9276
589abe3a
EG
9277 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9278 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 9279 ext_phy_type);
f1410647 9280
34f80b04 9281 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9282 SUPPORTED_1000baseT_Full |
34f80b04 9283 SUPPORTED_FIBRE |
589abe3a 9284 SUPPORTED_Autoneg |
34f80b04
EG
9285 SUPPORTED_Pause |
9286 SUPPORTED_Asym_Pause);
f1410647
ET
9287 break;
9288
589abe3a
EG
9289 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9290 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
9291 ext_phy_type);
9292
34f80b04 9293 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9294 SUPPORTED_2500baseX_Full |
34f80b04 9295 SUPPORTED_1000baseT_Full |
589abe3a
EG
9296 SUPPORTED_FIBRE |
9297 SUPPORTED_Autoneg |
9298 SUPPORTED_Pause |
9299 SUPPORTED_Asym_Pause);
9300 break;
9301
9302 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9303 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9304 ext_phy_type);
9305
9306 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
9307 SUPPORTED_FIBRE |
9308 SUPPORTED_Pause |
9309 SUPPORTED_Asym_Pause);
f1410647
ET
9310 break;
9311
589abe3a
EG
9312 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9313 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
9314 ext_phy_type);
9315
34f80b04
EG
9316 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9317 SUPPORTED_1000baseT_Full |
9318 SUPPORTED_FIBRE |
34f80b04
EG
9319 SUPPORTED_Pause |
9320 SUPPORTED_Asym_Pause);
f1410647
ET
9321 break;
9322
589abe3a
EG
9323 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9324 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
9325 ext_phy_type);
9326
34f80b04 9327 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 9328 SUPPORTED_1000baseT_Full |
34f80b04 9329 SUPPORTED_Autoneg |
589abe3a 9330 SUPPORTED_FIBRE |
34f80b04
EG
9331 SUPPORTED_Pause |
9332 SUPPORTED_Asym_Pause);
c18487ee
YR
9333 break;
9334
4d295db0
EG
9335 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9336 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9337 ext_phy_type);
9338
9339 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9340 SUPPORTED_1000baseT_Full |
9341 SUPPORTED_Autoneg |
9342 SUPPORTED_FIBRE |
9343 SUPPORTED_Pause |
9344 SUPPORTED_Asym_Pause);
9345 break;
9346
f1410647
ET
9347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9348 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9349 ext_phy_type);
9350
34f80b04
EG
9351 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9352 SUPPORTED_TP |
9353 SUPPORTED_Autoneg |
9354 SUPPORTED_Pause |
9355 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9356 break;
9357
28577185
EG
9358 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9359 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9360 ext_phy_type);
9361
9362 bp->port.supported |= (SUPPORTED_10baseT_Half |
9363 SUPPORTED_10baseT_Full |
9364 SUPPORTED_100baseT_Half |
9365 SUPPORTED_100baseT_Full |
9366 SUPPORTED_1000baseT_Full |
9367 SUPPORTED_10000baseT_Full |
9368 SUPPORTED_TP |
9369 SUPPORTED_Autoneg |
9370 SUPPORTED_Pause |
9371 SUPPORTED_Asym_Pause);
9372 break;
9373
c18487ee
YR
9374 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9375 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9376 bp->link_params.ext_phy_config);
9377 break;
9378
a2fbb9ea
ET
9379 default:
9380 BNX2X_ERR("NVRAM config error. "
9381 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 9382 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9383 return;
9384 }
9385
34f80b04
EG
9386 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9387 port*0x18);
9388 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 9389
a2fbb9ea
ET
9390 break;
9391
9392 default:
9393 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 9394 bp->port.link_config);
a2fbb9ea
ET
9395 return;
9396 }
34f80b04 9397 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
9398
9399 /* mask what we support according to speed_cap_mask */
c18487ee
YR
9400 if (!(bp->link_params.speed_cap_mask &
9401 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 9402 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 9403
c18487ee
YR
9404 if (!(bp->link_params.speed_cap_mask &
9405 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 9406 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 9407
c18487ee
YR
9408 if (!(bp->link_params.speed_cap_mask &
9409 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 9410 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 9411
c18487ee
YR
9412 if (!(bp->link_params.speed_cap_mask &
9413 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 9414 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 9415
c18487ee
YR
9416 if (!(bp->link_params.speed_cap_mask &
9417 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
9418 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9419 SUPPORTED_1000baseT_Full);
a2fbb9ea 9420
c18487ee
YR
9421 if (!(bp->link_params.speed_cap_mask &
9422 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 9423 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 9424
c18487ee
YR
9425 if (!(bp->link_params.speed_cap_mask &
9426 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 9427 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 9428
34f80b04 9429 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
9430}
9431
34f80b04 9432static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 9433{
c18487ee 9434 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 9435
34f80b04 9436 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 9437 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 9438 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 9439 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9440 bp->port.advertising = bp->port.supported;
a2fbb9ea 9441 } else {
c18487ee
YR
9442 u32 ext_phy_type =
9443 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9444
9445 if ((ext_phy_type ==
9446 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9447 (ext_phy_type ==
9448 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 9449 /* force 10G, no AN */
c18487ee 9450 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 9451 bp->port.advertising =
a2fbb9ea
ET
9452 (ADVERTISED_10000baseT_Full |
9453 ADVERTISED_FIBRE);
9454 break;
9455 }
9456 BNX2X_ERR("NVRAM config error. "
9457 "Invalid link_config 0x%x"
9458 " Autoneg not supported\n",
34f80b04 9459 bp->port.link_config);
a2fbb9ea
ET
9460 return;
9461 }
9462 break;
9463
9464 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 9465 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 9466 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
9467 bp->port.advertising = (ADVERTISED_10baseT_Full |
9468 ADVERTISED_TP);
a2fbb9ea
ET
9469 } else {
9470 BNX2X_ERR("NVRAM config error. "
9471 "Invalid link_config 0x%x"
9472 " speed_cap_mask 0x%x\n",
34f80b04 9473 bp->port.link_config,
c18487ee 9474 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9475 return;
9476 }
9477 break;
9478
9479 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 9480 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
9481 bp->link_params.req_line_speed = SPEED_10;
9482 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9483 bp->port.advertising = (ADVERTISED_10baseT_Half |
9484 ADVERTISED_TP);
a2fbb9ea
ET
9485 } else {
9486 BNX2X_ERR("NVRAM config error. "
9487 "Invalid link_config 0x%x"
9488 " speed_cap_mask 0x%x\n",
34f80b04 9489 bp->port.link_config,
c18487ee 9490 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9491 return;
9492 }
9493 break;
9494
9495 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 9496 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 9497 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
9498 bp->port.advertising = (ADVERTISED_100baseT_Full |
9499 ADVERTISED_TP);
a2fbb9ea
ET
9500 } else {
9501 BNX2X_ERR("NVRAM config error. "
9502 "Invalid link_config 0x%x"
9503 " speed_cap_mask 0x%x\n",
34f80b04 9504 bp->port.link_config,
c18487ee 9505 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9506 return;
9507 }
9508 break;
9509
9510 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 9511 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
9512 bp->link_params.req_line_speed = SPEED_100;
9513 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9514 bp->port.advertising = (ADVERTISED_100baseT_Half |
9515 ADVERTISED_TP);
a2fbb9ea
ET
9516 } else {
9517 BNX2X_ERR("NVRAM config error. "
9518 "Invalid link_config 0x%x"
9519 " speed_cap_mask 0x%x\n",
34f80b04 9520 bp->port.link_config,
c18487ee 9521 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9522 return;
9523 }
9524 break;
9525
9526 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 9527 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 9528 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
9529 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9530 ADVERTISED_TP);
a2fbb9ea
ET
9531 } else {
9532 BNX2X_ERR("NVRAM config error. "
9533 "Invalid link_config 0x%x"
9534 " speed_cap_mask 0x%x\n",
34f80b04 9535 bp->port.link_config,
c18487ee 9536 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9537 return;
9538 }
9539 break;
9540
9541 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 9542 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 9543 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
9544 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9545 ADVERTISED_TP);
a2fbb9ea
ET
9546 } else {
9547 BNX2X_ERR("NVRAM config error. "
9548 "Invalid link_config 0x%x"
9549 " speed_cap_mask 0x%x\n",
34f80b04 9550 bp->port.link_config,
c18487ee 9551 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9552 return;
9553 }
9554 break;
9555
9556 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9557 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9558 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 9559 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 9560 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
9561 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9562 ADVERTISED_FIBRE);
a2fbb9ea
ET
9563 } else {
9564 BNX2X_ERR("NVRAM config error. "
9565 "Invalid link_config 0x%x"
9566 " speed_cap_mask 0x%x\n",
34f80b04 9567 bp->port.link_config,
c18487ee 9568 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9569 return;
9570 }
9571 break;
9572
9573 default:
9574 BNX2X_ERR("NVRAM config error. "
9575 "BAD link speed link_config 0x%x\n",
34f80b04 9576 bp->port.link_config);
c18487ee 9577 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9578 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
9579 break;
9580 }
a2fbb9ea 9581
34f80b04
EG
9582 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9583 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 9584 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 9585 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 9586 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9587
c18487ee 9588 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 9589 " advertising 0x%x\n",
c18487ee
YR
9590 bp->link_params.req_line_speed,
9591 bp->link_params.req_duplex,
34f80b04 9592 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
9593}
9594
e665bfda
MC
9595static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9596{
9597 mac_hi = cpu_to_be16(mac_hi);
9598 mac_lo = cpu_to_be32(mac_lo);
9599 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9600 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9601}
9602
34f80b04 9603static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 9604{
34f80b04
EG
9605 int port = BP_PORT(bp);
9606 u32 val, val2;
589abe3a 9607 u32 config;
c2c8b03e 9608 u16 i;
01cd4528 9609 u32 ext_phy_type;
a2fbb9ea 9610
c18487ee 9611 bp->link_params.bp = bp;
34f80b04 9612 bp->link_params.port = port;
c18487ee 9613
c18487ee 9614 bp->link_params.lane_config =
a2fbb9ea 9615 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 9616 bp->link_params.ext_phy_config =
a2fbb9ea
ET
9617 SHMEM_RD(bp,
9618 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
9619 /* BCM8727_NOC => BCM8727 no over current */
9620 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9621 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9622 bp->link_params.ext_phy_config &=
9623 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9624 bp->link_params.ext_phy_config |=
9625 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9626 bp->link_params.feature_config_flags |=
9627 FEATURE_CONFIG_BCM8727_NOC;
9628 }
9629
c18487ee 9630 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
9631 SHMEM_RD(bp,
9632 dev_info.port_hw_config[port].speed_capability_mask);
9633
34f80b04 9634 bp->port.link_config =
a2fbb9ea
ET
9635 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9636
c2c8b03e
EG
9637 /* Get the 4 lanes xgxs config rx and tx */
9638 for (i = 0; i < 2; i++) {
9639 val = SHMEM_RD(bp,
9640 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9641 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9642 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9643
9644 val = SHMEM_RD(bp,
9645 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9646 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9647 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9648 }
9649
3ce2c3f9
EG
9650 /* If the device is capable of WoL, set the default state according
9651 * to the HW
9652 */
4d295db0 9653 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
9654 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9655 (config & PORT_FEATURE_WOL_ENABLED));
9656
c2c8b03e
EG
9657 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9658 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
9659 bp->link_params.lane_config,
9660 bp->link_params.ext_phy_config,
34f80b04 9661 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 9662
4d295db0
EG
9663 bp->link_params.switch_cfg |= (bp->port.link_config &
9664 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 9665 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
9666
9667 bnx2x_link_settings_requested(bp);
9668
01cd4528
EG
9669 /*
9670 * If connected directly, work with the internal PHY, otherwise, work
9671 * with the external PHY
9672 */
9673 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9674 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9675 bp->mdio.prtad = bp->link_params.phy_addr;
9676
9677 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9678 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9679 bp->mdio.prtad =
659bc5c4 9680 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 9681
a2fbb9ea
ET
9682 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9683 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 9684 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
9685 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9686 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
9687
9688#ifdef BCM_CNIC
9689 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9690 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9691 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9692#endif
34f80b04
EG
9693}
9694
9695static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9696{
9697 int func = BP_FUNC(bp);
9698 u32 val, val2;
9699 int rc = 0;
a2fbb9ea 9700
34f80b04 9701 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 9702
34f80b04
EG
9703 bp->e1hov = 0;
9704 bp->e1hmf = 0;
2145a920 9705 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
9706 bp->mf_config =
9707 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 9708
2691d51d 9709 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 9710 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 9711 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 9712 bp->e1hmf = 1;
2691d51d
EG
9713 BNX2X_DEV_INFO("%s function mode\n",
9714 IS_E1HMF(bp) ? "multi" : "single");
9715
9716 if (IS_E1HMF(bp)) {
9717 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9718 e1hov_tag) &
9719 FUNC_MF_CFG_E1HOV_TAG_MASK);
9720 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9721 bp->e1hov = val;
9722 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9723 "(0x%04x)\n",
9724 func, bp->e1hov, bp->e1hov);
9725 } else {
34f80b04
EG
9726 BNX2X_ERR("!!! No valid E1HOV for func %d,"
9727 " aborting\n", func);
9728 rc = -EPERM;
9729 }
2691d51d
EG
9730 } else {
9731 if (BP_E1HVN(bp)) {
9732 BNX2X_ERR("!!! VN %d in single function mode,"
9733 " aborting\n", BP_E1HVN(bp));
9734 rc = -EPERM;
9735 }
34f80b04
EG
9736 }
9737 }
a2fbb9ea 9738
34f80b04
EG
9739 if (!BP_NOMCP(bp)) {
9740 bnx2x_get_port_hwinfo(bp);
9741
9742 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9743 DRV_MSG_SEQ_NUMBER_MASK);
9744 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9745 }
9746
9747 if (IS_E1HMF(bp)) {
9748 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9749 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9750 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9751 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9752 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9753 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9754 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9755 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9756 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9757 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9758 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9759 ETH_ALEN);
9760 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9761 ETH_ALEN);
a2fbb9ea 9762 }
34f80b04
EG
9763
9764 return rc;
a2fbb9ea
ET
9765 }
9766
34f80b04
EG
9767 if (BP_NOMCP(bp)) {
9768 /* only supposed to happen on emulation/FPGA */
33471629 9769 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
9770 random_ether_addr(bp->dev->dev_addr);
9771 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9772 }
a2fbb9ea 9773
34f80b04
EG
9774 return rc;
9775}
9776
34f24c7f
VZ
9777static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9778{
9779 int cnt, i, block_end, rodi;
9780 char vpd_data[BNX2X_VPD_LEN+1];
9781 char str_id_reg[VENDOR_ID_LEN+1];
9782 char str_id_cap[VENDOR_ID_LEN+1];
9783 u8 len;
9784
9785 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9786 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9787
9788 if (cnt < BNX2X_VPD_LEN)
9789 goto out_not_found;
9790
9791 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9792 PCI_VPD_LRDT_RO_DATA);
9793 if (i < 0)
9794 goto out_not_found;
9795
9796
9797 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9798 pci_vpd_lrdt_size(&vpd_data[i]);
9799
9800 i += PCI_VPD_LRDT_TAG_SIZE;
9801
9802 if (block_end > BNX2X_VPD_LEN)
9803 goto out_not_found;
9804
9805 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9806 PCI_VPD_RO_KEYWORD_MFR_ID);
9807 if (rodi < 0)
9808 goto out_not_found;
9809
9810 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9811
9812 if (len != VENDOR_ID_LEN)
9813 goto out_not_found;
9814
9815 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9816
9817 /* vendor specific info */
9818 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9819 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9820 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9821 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9822
9823 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9824 PCI_VPD_RO_KEYWORD_VENDOR0);
9825 if (rodi >= 0) {
9826 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9827
9828 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9829
9830 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9831 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9832 bp->fw_ver[len] = ' ';
9833 }
9834 }
9835 return;
9836 }
9837out_not_found:
9838 return;
9839}
9840
34f80b04
EG
9841static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9842{
9843 int func = BP_FUNC(bp);
87942b46 9844 int timer_interval;
34f80b04
EG
9845 int rc;
9846
da5a662a
VZ
9847 /* Disable interrupt handling until HW is initialized */
9848 atomic_set(&bp->intr_sem, 1);
e1510706 9849 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 9850
34f80b04 9851 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 9852 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
9853#ifdef BCM_CNIC
9854 mutex_init(&bp->cnic_mutex);
9855#endif
a2fbb9ea 9856
1cf167f2 9857 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 9858 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
9859
9860 rc = bnx2x_get_hwinfo(bp);
9861
34f24c7f 9862 bnx2x_read_fwinfo(bp);
34f80b04
EG
9863 /* need to reset chip if undi was active */
9864 if (!BP_NOMCP(bp))
9865 bnx2x_undi_unload(bp);
9866
9867 if (CHIP_REV_IS_FPGA(bp))
7995c64e 9868 pr_err("FPGA detected\n");
34f80b04
EG
9869
9870 if (BP_NOMCP(bp) && (func == 0))
7995c64e 9871 pr_err("MCP disabled, must load devices in order!\n");
34f80b04 9872
555f6c78 9873 /* Set multi queue mode */
8badd27a
EG
9874 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9875 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
7995c64e 9876 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
9877 multi_mode = ETH_RSS_MODE_DISABLED;
9878 }
9879 bp->multi_mode = multi_mode;
9880
9881
4fd89b7a
DK
9882 bp->dev->features |= NETIF_F_GRO;
9883
7a9b2557
VZ
9884 /* Set TPA flags */
9885 if (disable_tpa) {
9886 bp->flags &= ~TPA_ENABLE_FLAG;
9887 bp->dev->features &= ~NETIF_F_LRO;
9888 } else {
9889 bp->flags |= TPA_ENABLE_FLAG;
9890 bp->dev->features |= NETIF_F_LRO;
9891 }
9892
a18f5128
EG
9893 if (CHIP_IS_E1(bp))
9894 bp->dropless_fc = 0;
9895 else
9896 bp->dropless_fc = dropless_fc;
9897
8d5726c4 9898 bp->mrrs = mrrs;
7a9b2557 9899
34f80b04
EG
9900 bp->tx_ring_size = MAX_TX_AVAIL;
9901 bp->rx_ring_size = MAX_RX_AVAIL;
9902
9903 bp->rx_csum = 1;
34f80b04 9904
7d323bfd
EG
9905 /* make sure that the numbers are in the right granularity */
9906 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9907 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 9908
87942b46
EG
9909 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9910 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9911
9912 init_timer(&bp->timer);
9913 bp->timer.expires = jiffies + bp->current_interval;
9914 bp->timer.data = (unsigned long) bp;
9915 bp->timer.function = bnx2x_timer;
9916
9917 return rc;
a2fbb9ea
ET
9918}
9919
9920/*
9921 * ethtool service functions
9922 */
9923
9924/* All ethtool functions called with rtnl_lock */
9925
9926static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9927{
9928 struct bnx2x *bp = netdev_priv(dev);
9929
34f80b04
EG
9930 cmd->supported = bp->port.supported;
9931 cmd->advertising = bp->port.advertising;
a2fbb9ea 9932
f34d28ea
EG
9933 if ((bp->state == BNX2X_STATE_OPEN) &&
9934 !(bp->flags & MF_FUNC_DIS) &&
9935 (bp->link_vars.link_up)) {
c18487ee
YR
9936 cmd->speed = bp->link_vars.line_speed;
9937 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
9938 if (IS_E1HMF(bp)) {
9939 u16 vn_max_rate;
34f80b04 9940
b015e3d1
EG
9941 vn_max_rate =
9942 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 9943 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
9944 if (vn_max_rate < cmd->speed)
9945 cmd->speed = vn_max_rate;
9946 }
9947 } else {
9948 cmd->speed = -1;
9949 cmd->duplex = -1;
34f80b04 9950 }
a2fbb9ea 9951
c18487ee
YR
9952 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9953 u32 ext_phy_type =
9954 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9955
9956 switch (ext_phy_type) {
9957 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9958 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9959 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9961 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9962 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9963 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9964 cmd->port = PORT_FIBRE;
9965 break;
9966
9967 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9968 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9969 cmd->port = PORT_TP;
9970 break;
9971
c18487ee
YR
9972 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9973 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9974 bp->link_params.ext_phy_config);
9975 break;
9976
f1410647
ET
9977 default:
9978 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9979 bp->link_params.ext_phy_config);
9980 break;
f1410647
ET
9981 }
9982 } else
a2fbb9ea 9983 cmd->port = PORT_TP;
a2fbb9ea 9984
01cd4528 9985 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9986 cmd->transceiver = XCVR_INTERNAL;
9987
c18487ee 9988 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9989 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9990 else
a2fbb9ea 9991 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9992
9993 cmd->maxtxpkt = 0;
9994 cmd->maxrxpkt = 0;
9995
9996 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9997 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9998 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9999 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10000 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10001 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10002 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10003
10004 return 0;
10005}
10006
10007static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10008{
10009 struct bnx2x *bp = netdev_priv(dev);
10010 u32 advertising;
10011
34f80b04
EG
10012 if (IS_E1HMF(bp))
10013 return 0;
10014
a2fbb9ea
ET
10015 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10016 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10017 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10018 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10019 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10020 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10021 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10022
a2fbb9ea 10023 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
10024 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10025 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 10026 return -EINVAL;
f1410647 10027 }
a2fbb9ea
ET
10028
10029 /* advertise the requested speed and duplex if supported */
34f80b04 10030 cmd->advertising &= bp->port.supported;
a2fbb9ea 10031
c18487ee
YR
10032 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10033 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
10034 bp->port.advertising |= (ADVERTISED_Autoneg |
10035 cmd->advertising);
a2fbb9ea
ET
10036
10037 } else { /* forced speed */
10038 /* advertise the requested speed and duplex if supported */
10039 switch (cmd->speed) {
10040 case SPEED_10:
10041 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10042 if (!(bp->port.supported &
f1410647
ET
10043 SUPPORTED_10baseT_Full)) {
10044 DP(NETIF_MSG_LINK,
10045 "10M full not supported\n");
a2fbb9ea 10046 return -EINVAL;
f1410647 10047 }
a2fbb9ea
ET
10048
10049 advertising = (ADVERTISED_10baseT_Full |
10050 ADVERTISED_TP);
10051 } else {
34f80b04 10052 if (!(bp->port.supported &
f1410647
ET
10053 SUPPORTED_10baseT_Half)) {
10054 DP(NETIF_MSG_LINK,
10055 "10M half not supported\n");
a2fbb9ea 10056 return -EINVAL;
f1410647 10057 }
a2fbb9ea
ET
10058
10059 advertising = (ADVERTISED_10baseT_Half |
10060 ADVERTISED_TP);
10061 }
10062 break;
10063
10064 case SPEED_100:
10065 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10066 if (!(bp->port.supported &
f1410647
ET
10067 SUPPORTED_100baseT_Full)) {
10068 DP(NETIF_MSG_LINK,
10069 "100M full not supported\n");
a2fbb9ea 10070 return -EINVAL;
f1410647 10071 }
a2fbb9ea
ET
10072
10073 advertising = (ADVERTISED_100baseT_Full |
10074 ADVERTISED_TP);
10075 } else {
34f80b04 10076 if (!(bp->port.supported &
f1410647
ET
10077 SUPPORTED_100baseT_Half)) {
10078 DP(NETIF_MSG_LINK,
10079 "100M half not supported\n");
a2fbb9ea 10080 return -EINVAL;
f1410647 10081 }
a2fbb9ea
ET
10082
10083 advertising = (ADVERTISED_100baseT_Half |
10084 ADVERTISED_TP);
10085 }
10086 break;
10087
10088 case SPEED_1000:
f1410647
ET
10089 if (cmd->duplex != DUPLEX_FULL) {
10090 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 10091 return -EINVAL;
f1410647 10092 }
a2fbb9ea 10093
34f80b04 10094 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 10095 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 10096 return -EINVAL;
f1410647 10097 }
a2fbb9ea
ET
10098
10099 advertising = (ADVERTISED_1000baseT_Full |
10100 ADVERTISED_TP);
10101 break;
10102
10103 case SPEED_2500:
f1410647
ET
10104 if (cmd->duplex != DUPLEX_FULL) {
10105 DP(NETIF_MSG_LINK,
10106 "2.5G half not supported\n");
a2fbb9ea 10107 return -EINVAL;
f1410647 10108 }
a2fbb9ea 10109
34f80b04 10110 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
10111 DP(NETIF_MSG_LINK,
10112 "2.5G full not supported\n");
a2fbb9ea 10113 return -EINVAL;
f1410647 10114 }
a2fbb9ea 10115
f1410647 10116 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
10117 ADVERTISED_TP);
10118 break;
10119
10120 case SPEED_10000:
f1410647
ET
10121 if (cmd->duplex != DUPLEX_FULL) {
10122 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 10123 return -EINVAL;
f1410647 10124 }
a2fbb9ea 10125
34f80b04 10126 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 10127 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 10128 return -EINVAL;
f1410647 10129 }
a2fbb9ea
ET
10130
10131 advertising = (ADVERTISED_10000baseT_Full |
10132 ADVERTISED_FIBRE);
10133 break;
10134
10135 default:
f1410647 10136 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
10137 return -EINVAL;
10138 }
10139
c18487ee
YR
10140 bp->link_params.req_line_speed = cmd->speed;
10141 bp->link_params.req_duplex = cmd->duplex;
34f80b04 10142 bp->port.advertising = advertising;
a2fbb9ea
ET
10143 }
10144
c18487ee 10145 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 10146 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 10147 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 10148 bp->port.advertising);
a2fbb9ea 10149
34f80b04 10150 if (netif_running(dev)) {
bb2a0f7a 10151 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10152 bnx2x_link_set(bp);
10153 }
a2fbb9ea
ET
10154
10155 return 0;
10156}
10157
0a64ea57
EG
10158#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10159#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10160
10161static int bnx2x_get_regs_len(struct net_device *dev)
10162{
0a64ea57 10163 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 10164 int regdump_len = 0;
0a64ea57
EG
10165 int i;
10166
0a64ea57
EG
10167 if (CHIP_IS_E1(bp)) {
10168 for (i = 0; i < REGS_COUNT; i++)
10169 if (IS_E1_ONLINE(reg_addrs[i].info))
10170 regdump_len += reg_addrs[i].size;
10171
10172 for (i = 0; i < WREGS_COUNT_E1; i++)
10173 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10174 regdump_len += wreg_addrs_e1[i].size *
10175 (1 + wreg_addrs_e1[i].read_regs_count);
10176
10177 } else { /* E1H */
10178 for (i = 0; i < REGS_COUNT; i++)
10179 if (IS_E1H_ONLINE(reg_addrs[i].info))
10180 regdump_len += reg_addrs[i].size;
10181
10182 for (i = 0; i < WREGS_COUNT_E1H; i++)
10183 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10184 regdump_len += wreg_addrs_e1h[i].size *
10185 (1 + wreg_addrs_e1h[i].read_regs_count);
10186 }
10187 regdump_len *= 4;
10188 regdump_len += sizeof(struct dump_hdr);
10189
10190 return regdump_len;
10191}
10192
10193static void bnx2x_get_regs(struct net_device *dev,
10194 struct ethtool_regs *regs, void *_p)
10195{
10196 u32 *p = _p, i, j;
10197 struct bnx2x *bp = netdev_priv(dev);
10198 struct dump_hdr dump_hdr = {0};
10199
10200 regs->version = 0;
10201 memset(p, 0, regs->len);
10202
10203 if (!netif_running(bp->dev))
10204 return;
10205
10206 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10207 dump_hdr.dump_sign = dump_sign_all;
10208 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10209 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10210 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10211 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10212 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10213
10214 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10215 p += dump_hdr.hdr_size + 1;
10216
10217 if (CHIP_IS_E1(bp)) {
10218 for (i = 0; i < REGS_COUNT; i++)
10219 if (IS_E1_ONLINE(reg_addrs[i].info))
10220 for (j = 0; j < reg_addrs[i].size; j++)
10221 *p++ = REG_RD(bp,
10222 reg_addrs[i].addr + j*4);
10223
10224 } else { /* E1H */
10225 for (i = 0; i < REGS_COUNT; i++)
10226 if (IS_E1H_ONLINE(reg_addrs[i].info))
10227 for (j = 0; j < reg_addrs[i].size; j++)
10228 *p++ = REG_RD(bp,
10229 reg_addrs[i].addr + j*4);
10230 }
10231}
10232
0d28e49a
EG
10233#define PHY_FW_VER_LEN 10
10234
10235static void bnx2x_get_drvinfo(struct net_device *dev,
10236 struct ethtool_drvinfo *info)
10237{
10238 struct bnx2x *bp = netdev_priv(dev);
10239 u8 phy_fw_ver[PHY_FW_VER_LEN];
10240
10241 strcpy(info->driver, DRV_MODULE_NAME);
10242 strcpy(info->version, DRV_MODULE_VERSION);
10243
10244 phy_fw_ver[0] = '\0';
10245 if (bp->port.pmf) {
10246 bnx2x_acquire_phy_lock(bp);
10247 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10248 (bp->state != BNX2X_STATE_CLOSED),
10249 phy_fw_ver, PHY_FW_VER_LEN);
10250 bnx2x_release_phy_lock(bp);
10251 }
10252
34f24c7f
VZ
10253 strncpy(info->fw_version, bp->fw_ver, 32);
10254 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10255 "bc %d.%d.%d%s%s",
0d28e49a
EG
10256 (bp->common.bc_ver & 0xff0000) >> 16,
10257 (bp->common.bc_ver & 0xff00) >> 8,
10258 (bp->common.bc_ver & 0xff),
34f24c7f 10259 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
0d28e49a
EG
10260 strcpy(info->bus_info, pci_name(bp->pdev));
10261 info->n_stats = BNX2X_NUM_STATS;
10262 info->testinfo_len = BNX2X_NUM_TESTS;
10263 info->eedump_len = bp->common.flash_size;
10264 info->regdump_len = bnx2x_get_regs_len(dev);
10265}
10266
a2fbb9ea
ET
10267static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10268{
10269 struct bnx2x *bp = netdev_priv(dev);
10270
10271 if (bp->flags & NO_WOL_FLAG) {
10272 wol->supported = 0;
10273 wol->wolopts = 0;
10274 } else {
10275 wol->supported = WAKE_MAGIC;
10276 if (bp->wol)
10277 wol->wolopts = WAKE_MAGIC;
10278 else
10279 wol->wolopts = 0;
10280 }
10281 memset(&wol->sopass, 0, sizeof(wol->sopass));
10282}
10283
10284static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10285{
10286 struct bnx2x *bp = netdev_priv(dev);
10287
10288 if (wol->wolopts & ~WAKE_MAGIC)
10289 return -EINVAL;
10290
10291 if (wol->wolopts & WAKE_MAGIC) {
10292 if (bp->flags & NO_WOL_FLAG)
10293 return -EINVAL;
10294
10295 bp->wol = 1;
34f80b04 10296 } else
a2fbb9ea 10297 bp->wol = 0;
34f80b04 10298
a2fbb9ea
ET
10299 return 0;
10300}
10301
10302static u32 bnx2x_get_msglevel(struct net_device *dev)
10303{
10304 struct bnx2x *bp = netdev_priv(dev);
10305
7995c64e 10306 return bp->msg_enable;
a2fbb9ea
ET
10307}
10308
10309static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10310{
10311 struct bnx2x *bp = netdev_priv(dev);
10312
10313 if (capable(CAP_NET_ADMIN))
7995c64e 10314 bp->msg_enable = level;
a2fbb9ea
ET
10315}
10316
10317static int bnx2x_nway_reset(struct net_device *dev)
10318{
10319 struct bnx2x *bp = netdev_priv(dev);
10320
34f80b04
EG
10321 if (!bp->port.pmf)
10322 return 0;
a2fbb9ea 10323
34f80b04 10324 if (netif_running(dev)) {
bb2a0f7a 10325 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10326 bnx2x_link_set(bp);
10327 }
a2fbb9ea
ET
10328
10329 return 0;
10330}
10331
ab6ad5a4 10332static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
10333{
10334 struct bnx2x *bp = netdev_priv(dev);
10335
f34d28ea
EG
10336 if (bp->flags & MF_FUNC_DIS)
10337 return 0;
10338
01e53298
NO
10339 return bp->link_vars.link_up;
10340}
10341
a2fbb9ea
ET
10342static int bnx2x_get_eeprom_len(struct net_device *dev)
10343{
10344 struct bnx2x *bp = netdev_priv(dev);
10345
34f80b04 10346 return bp->common.flash_size;
a2fbb9ea
ET
10347}
10348
10349static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10350{
34f80b04 10351 int port = BP_PORT(bp);
a2fbb9ea
ET
10352 int count, i;
10353 u32 val = 0;
10354
10355 /* adjust timeout for emulation/FPGA */
10356 count = NVRAM_TIMEOUT_COUNT;
10357 if (CHIP_REV_IS_SLOW(bp))
10358 count *= 100;
10359
10360 /* request access to nvram interface */
10361 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10362 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10363
10364 for (i = 0; i < count*10; i++) {
10365 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10366 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10367 break;
10368
10369 udelay(5);
10370 }
10371
10372 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 10373 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
10374 return -EBUSY;
10375 }
10376
10377 return 0;
10378}
10379
10380static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10381{
34f80b04 10382 int port = BP_PORT(bp);
a2fbb9ea
ET
10383 int count, i;
10384 u32 val = 0;
10385
10386 /* adjust timeout for emulation/FPGA */
10387 count = NVRAM_TIMEOUT_COUNT;
10388 if (CHIP_REV_IS_SLOW(bp))
10389 count *= 100;
10390
10391 /* relinquish nvram interface */
10392 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10393 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10394
10395 for (i = 0; i < count*10; i++) {
10396 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10397 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10398 break;
10399
10400 udelay(5);
10401 }
10402
10403 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 10404 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
10405 return -EBUSY;
10406 }
10407
10408 return 0;
10409}
10410
10411static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10412{
10413 u32 val;
10414
10415 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10416
10417 /* enable both bits, even on read */
10418 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10419 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10420 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10421}
10422
10423static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10424{
10425 u32 val;
10426
10427 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10428
10429 /* disable both bits, even after read */
10430 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10431 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10432 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10433}
10434
4781bfad 10435static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
10436 u32 cmd_flags)
10437{
f1410647 10438 int count, i, rc;
a2fbb9ea
ET
10439 u32 val;
10440
10441 /* build the command word */
10442 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10443
10444 /* need to clear DONE bit separately */
10445 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10446
10447 /* address of the NVRAM to read from */
10448 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10449 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10450
10451 /* issue a read command */
10452 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10453
10454 /* adjust timeout for emulation/FPGA */
10455 count = NVRAM_TIMEOUT_COUNT;
10456 if (CHIP_REV_IS_SLOW(bp))
10457 count *= 100;
10458
10459 /* wait for completion */
10460 *ret_val = 0;
10461 rc = -EBUSY;
10462 for (i = 0; i < count; i++) {
10463 udelay(5);
10464 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10465
10466 if (val & MCPR_NVM_COMMAND_DONE) {
10467 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
10468 /* we read nvram data in cpu order
10469 * but ethtool sees it as an array of bytes
10470 * converting to big-endian will do the work */
4781bfad 10471 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
10472 rc = 0;
10473 break;
10474 }
10475 }
10476
10477 return rc;
10478}
10479
10480static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10481 int buf_size)
10482{
10483 int rc;
10484 u32 cmd_flags;
4781bfad 10485 __be32 val;
a2fbb9ea
ET
10486
10487 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10488 DP(BNX2X_MSG_NVM,
c14423fe 10489 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10490 offset, buf_size);
10491 return -EINVAL;
10492 }
10493
34f80b04
EG
10494 if (offset + buf_size > bp->common.flash_size) {
10495 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10496 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10497 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10498 return -EINVAL;
10499 }
10500
10501 /* request access to nvram interface */
10502 rc = bnx2x_acquire_nvram_lock(bp);
10503 if (rc)
10504 return rc;
10505
10506 /* enable access to nvram interface */
10507 bnx2x_enable_nvram_access(bp);
10508
10509 /* read the first word(s) */
10510 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10511 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10512 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10513 memcpy(ret_buf, &val, 4);
10514
10515 /* advance to the next dword */
10516 offset += sizeof(u32);
10517 ret_buf += sizeof(u32);
10518 buf_size -= sizeof(u32);
10519 cmd_flags = 0;
10520 }
10521
10522 if (rc == 0) {
10523 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10524 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10525 memcpy(ret_buf, &val, 4);
10526 }
10527
10528 /* disable access to nvram interface */
10529 bnx2x_disable_nvram_access(bp);
10530 bnx2x_release_nvram_lock(bp);
10531
10532 return rc;
10533}
10534
10535static int bnx2x_get_eeprom(struct net_device *dev,
10536 struct ethtool_eeprom *eeprom, u8 *eebuf)
10537{
10538 struct bnx2x *bp = netdev_priv(dev);
10539 int rc;
10540
2add3acb
EG
10541 if (!netif_running(dev))
10542 return -EAGAIN;
10543
34f80b04 10544 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10545 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10546 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10547 eeprom->len, eeprom->len);
10548
10549 /* parameters already validated in ethtool_get_eeprom */
10550
10551 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10552
10553 return rc;
10554}
10555
10556static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10557 u32 cmd_flags)
10558{
f1410647 10559 int count, i, rc;
a2fbb9ea
ET
10560
10561 /* build the command word */
10562 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10563
10564 /* need to clear DONE bit separately */
10565 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10566
10567 /* write the data */
10568 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10569
10570 /* address of the NVRAM to write to */
10571 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10572 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10573
10574 /* issue the write command */
10575 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10576
10577 /* adjust timeout for emulation/FPGA */
10578 count = NVRAM_TIMEOUT_COUNT;
10579 if (CHIP_REV_IS_SLOW(bp))
10580 count *= 100;
10581
10582 /* wait for completion */
10583 rc = -EBUSY;
10584 for (i = 0; i < count; i++) {
10585 udelay(5);
10586 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10587 if (val & MCPR_NVM_COMMAND_DONE) {
10588 rc = 0;
10589 break;
10590 }
10591 }
10592
10593 return rc;
10594}
10595
f1410647 10596#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
10597
10598static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10599 int buf_size)
10600{
10601 int rc;
10602 u32 cmd_flags;
10603 u32 align_offset;
4781bfad 10604 __be32 val;
a2fbb9ea 10605
34f80b04
EG
10606 if (offset + buf_size > bp->common.flash_size) {
10607 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10608 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10609 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10610 return -EINVAL;
10611 }
10612
10613 /* request access to nvram interface */
10614 rc = bnx2x_acquire_nvram_lock(bp);
10615 if (rc)
10616 return rc;
10617
10618 /* enable access to nvram interface */
10619 bnx2x_enable_nvram_access(bp);
10620
10621 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10622 align_offset = (offset & ~0x03);
10623 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10624
10625 if (rc == 0) {
10626 val &= ~(0xff << BYTE_OFFSET(offset));
10627 val |= (*data_buf << BYTE_OFFSET(offset));
10628
10629 /* nvram data is returned as an array of bytes
10630 * convert it back to cpu order */
10631 val = be32_to_cpu(val);
10632
a2fbb9ea
ET
10633 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10634 cmd_flags);
10635 }
10636
10637 /* disable access to nvram interface */
10638 bnx2x_disable_nvram_access(bp);
10639 bnx2x_release_nvram_lock(bp);
10640
10641 return rc;
10642}
10643
10644static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10645 int buf_size)
10646{
10647 int rc;
10648 u32 cmd_flags;
10649 u32 val;
10650 u32 written_so_far;
10651
34f80b04 10652 if (buf_size == 1) /* ethtool */
a2fbb9ea 10653 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
10654
10655 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10656 DP(BNX2X_MSG_NVM,
c14423fe 10657 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10658 offset, buf_size);
10659 return -EINVAL;
10660 }
10661
34f80b04
EG
10662 if (offset + buf_size > bp->common.flash_size) {
10663 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10664 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10665 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10666 return -EINVAL;
10667 }
10668
10669 /* request access to nvram interface */
10670 rc = bnx2x_acquire_nvram_lock(bp);
10671 if (rc)
10672 return rc;
10673
10674 /* enable access to nvram interface */
10675 bnx2x_enable_nvram_access(bp);
10676
10677 written_so_far = 0;
10678 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10679 while ((written_so_far < buf_size) && (rc == 0)) {
10680 if (written_so_far == (buf_size - sizeof(u32)))
10681 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10682 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10683 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10684 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10685 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10686
10687 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
10688
10689 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10690
10691 /* advance to the next dword */
10692 offset += sizeof(u32);
10693 data_buf += sizeof(u32);
10694 written_so_far += sizeof(u32);
10695 cmd_flags = 0;
10696 }
10697
10698 /* disable access to nvram interface */
10699 bnx2x_disable_nvram_access(bp);
10700 bnx2x_release_nvram_lock(bp);
10701
10702 return rc;
10703}
10704
10705static int bnx2x_set_eeprom(struct net_device *dev,
10706 struct ethtool_eeprom *eeprom, u8 *eebuf)
10707{
10708 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
10709 int port = BP_PORT(bp);
10710 int rc = 0;
a2fbb9ea 10711
9f4c9583
EG
10712 if (!netif_running(dev))
10713 return -EAGAIN;
10714
34f80b04 10715 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10716 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10717 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10718 eeprom->len, eeprom->len);
10719
10720 /* parameters already validated in ethtool_set_eeprom */
10721
f57a6025
EG
10722 /* PHY eeprom can be accessed only by the PMF */
10723 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10724 !bp->port.pmf)
10725 return -EINVAL;
10726
10727 if (eeprom->magic == 0x50485950) {
10728 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10729 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 10730
f57a6025
EG
10731 bnx2x_acquire_phy_lock(bp);
10732 rc |= bnx2x_link_reset(&bp->link_params,
10733 &bp->link_vars, 0);
10734 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10735 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10736 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10737 MISC_REGISTERS_GPIO_HIGH, port);
10738 bnx2x_release_phy_lock(bp);
10739 bnx2x_link_report(bp);
10740
10741 } else if (eeprom->magic == 0x50485952) {
10742 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 10743 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 10744 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
10745 rc |= bnx2x_link_reset(&bp->link_params,
10746 &bp->link_vars, 1);
10747
10748 rc |= bnx2x_phy_init(&bp->link_params,
10749 &bp->link_vars);
4a37fb66 10750 bnx2x_release_phy_lock(bp);
f57a6025
EG
10751 bnx2x_calc_fc_adv(bp);
10752 }
10753 } else if (eeprom->magic == 0x53985943) {
10754 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10755 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10756 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10757 u8 ext_phy_addr =
659bc5c4 10758 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
10759
10760 /* DSP Remove Download Mode */
10761 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10762 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 10763
f57a6025
EG
10764 bnx2x_acquire_phy_lock(bp);
10765
10766 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10767
10768 /* wait 0.5 sec to allow it to run */
10769 msleep(500);
10770 bnx2x_ext_phy_hw_reset(bp, port);
10771 msleep(500);
10772 bnx2x_release_phy_lock(bp);
10773 }
10774 } else
c18487ee 10775 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
10776
10777 return rc;
10778}
10779
10780static int bnx2x_get_coalesce(struct net_device *dev,
10781 struct ethtool_coalesce *coal)
10782{
10783 struct bnx2x *bp = netdev_priv(dev);
10784
10785 memset(coal, 0, sizeof(struct ethtool_coalesce));
10786
10787 coal->rx_coalesce_usecs = bp->rx_ticks;
10788 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
10789
10790 return 0;
10791}
10792
ca00392c 10793#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
10794static int bnx2x_set_coalesce(struct net_device *dev,
10795 struct ethtool_coalesce *coal)
10796{
10797 struct bnx2x *bp = netdev_priv(dev);
10798
10799 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
10800 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
10801 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
10802
10803 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
10804 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
10805 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 10806
34f80b04 10807 if (netif_running(dev))
a2fbb9ea
ET
10808 bnx2x_update_coalesce(bp);
10809
10810 return 0;
10811}
10812
10813static void bnx2x_get_ringparam(struct net_device *dev,
10814 struct ethtool_ringparam *ering)
10815{
10816 struct bnx2x *bp = netdev_priv(dev);
10817
10818 ering->rx_max_pending = MAX_RX_AVAIL;
10819 ering->rx_mini_max_pending = 0;
10820 ering->rx_jumbo_max_pending = 0;
10821
10822 ering->rx_pending = bp->rx_ring_size;
10823 ering->rx_mini_pending = 0;
10824 ering->rx_jumbo_pending = 0;
10825
10826 ering->tx_max_pending = MAX_TX_AVAIL;
10827 ering->tx_pending = bp->tx_ring_size;
10828}
10829
10830static int bnx2x_set_ringparam(struct net_device *dev,
10831 struct ethtool_ringparam *ering)
10832{
10833 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10834 int rc = 0;
a2fbb9ea 10835
72fd0718
VZ
10836 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10837 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10838 return -EAGAIN;
10839 }
10840
a2fbb9ea
ET
10841 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10842 (ering->tx_pending > MAX_TX_AVAIL) ||
10843 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10844 return -EINVAL;
10845
10846 bp->rx_ring_size = ering->rx_pending;
10847 bp->tx_ring_size = ering->tx_pending;
10848
34f80b04
EG
10849 if (netif_running(dev)) {
10850 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10851 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
10852 }
10853
34f80b04 10854 return rc;
a2fbb9ea
ET
10855}
10856
10857static void bnx2x_get_pauseparam(struct net_device *dev,
10858 struct ethtool_pauseparam *epause)
10859{
10860 struct bnx2x *bp = netdev_priv(dev);
10861
356e2385
EG
10862 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10863 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
10864 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10865
c0700f90
DM
10866 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10867 BNX2X_FLOW_CTRL_RX);
10868 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10869 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
10870
10871 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10872 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10873 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10874}
10875
10876static int bnx2x_set_pauseparam(struct net_device *dev,
10877 struct ethtool_pauseparam *epause)
10878{
10879 struct bnx2x *bp = netdev_priv(dev);
10880
34f80b04
EG
10881 if (IS_E1HMF(bp))
10882 return 0;
10883
a2fbb9ea
ET
10884 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10885 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10886 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10887
c0700f90 10888 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 10889
f1410647 10890 if (epause->rx_pause)
c0700f90 10891 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 10892
f1410647 10893 if (epause->tx_pause)
c0700f90 10894 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 10895
c0700f90
DM
10896 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10897 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 10898
c18487ee 10899 if (epause->autoneg) {
34f80b04 10900 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 10901 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
10902 return -EINVAL;
10903 }
a2fbb9ea 10904
c18487ee 10905 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10906 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10907 }
a2fbb9ea 10908
c18487ee
YR
10909 DP(NETIF_MSG_LINK,
10910 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10911
10912 if (netif_running(dev)) {
bb2a0f7a 10913 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10914 bnx2x_link_set(bp);
10915 }
a2fbb9ea
ET
10916
10917 return 0;
10918}
10919
df0f2343
VZ
10920static int bnx2x_set_flags(struct net_device *dev, u32 data)
10921{
10922 struct bnx2x *bp = netdev_priv(dev);
10923 int changed = 0;
10924 int rc = 0;
10925
72fd0718
VZ
10926 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10927 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10928 return -EAGAIN;
10929 }
10930
df0f2343
VZ
10931 /* TPA requires Rx CSUM offloading */
10932 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
d43a7e67
VZ
10933 if (!disable_tpa) {
10934 if (!(dev->features & NETIF_F_LRO)) {
10935 dev->features |= NETIF_F_LRO;
10936 bp->flags |= TPA_ENABLE_FLAG;
10937 changed = 1;
10938 }
10939 } else
10940 rc = -EINVAL;
df0f2343
VZ
10941 } else if (dev->features & NETIF_F_LRO) {
10942 dev->features &= ~NETIF_F_LRO;
10943 bp->flags &= ~TPA_ENABLE_FLAG;
10944 changed = 1;
10945 }
10946
10947 if (changed && netif_running(dev)) {
10948 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10949 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10950 }
10951
10952 return rc;
10953}
10954
a2fbb9ea
ET
10955static u32 bnx2x_get_rx_csum(struct net_device *dev)
10956{
10957 struct bnx2x *bp = netdev_priv(dev);
10958
10959 return bp->rx_csum;
10960}
10961
10962static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10963{
10964 struct bnx2x *bp = netdev_priv(dev);
df0f2343 10965 int rc = 0;
a2fbb9ea 10966
72fd0718
VZ
10967 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10968 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10969 return -EAGAIN;
10970 }
10971
a2fbb9ea 10972 bp->rx_csum = data;
df0f2343
VZ
10973
10974 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10975 TPA'ed packets will be discarded due to wrong TCP CSUM */
10976 if (!data) {
10977 u32 flags = ethtool_op_get_flags(dev);
10978
10979 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10980 }
10981
10982 return rc;
a2fbb9ea
ET
10983}
10984
10985static int bnx2x_set_tso(struct net_device *dev, u32 data)
10986{
755735eb 10987 if (data) {
a2fbb9ea 10988 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10989 dev->features |= NETIF_F_TSO6;
10990 } else {
a2fbb9ea 10991 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10992 dev->features &= ~NETIF_F_TSO6;
10993 }
10994
a2fbb9ea
ET
10995 return 0;
10996}
10997
f3c87cdd 10998static const struct {
a2fbb9ea
ET
10999 char string[ETH_GSTRING_LEN];
11000} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
11001 { "register_test (offline)" },
11002 { "memory_test (offline)" },
11003 { "loopback_test (offline)" },
11004 { "nvram_test (online)" },
11005 { "interrupt_test (online)" },
11006 { "link_test (online)" },
d3d4f495 11007 { "idle check (online)" }
a2fbb9ea
ET
11008};
11009
f3c87cdd
YG
11010static int bnx2x_test_registers(struct bnx2x *bp)
11011{
11012 int idx, i, rc = -ENODEV;
11013 u32 wr_val = 0;
9dabc424 11014 int port = BP_PORT(bp);
f3c87cdd
YG
11015 static const struct {
11016 u32 offset0;
11017 u32 offset1;
11018 u32 mask;
11019 } reg_tbl[] = {
11020/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11021 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11022 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11023 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11024 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11025 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11026 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11027 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11028 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11029 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11030/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11031 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11032 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11033 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11034 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11035 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11036 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11037 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 11038 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
11039 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11040/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
11041 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11042 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11043 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11044 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11045 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11046 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11047 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11048 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
11049 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11050/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
11051 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11052 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11053 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11054 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11055 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11056 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11057
11058 { 0xffffffff, 0, 0x00000000 }
11059 };
11060
11061 if (!netif_running(bp->dev))
11062 return rc;
11063
11064 /* Repeat the test twice:
11065 First by writing 0x00000000, second by writing 0xffffffff */
11066 for (idx = 0; idx < 2; idx++) {
11067
11068 switch (idx) {
11069 case 0:
11070 wr_val = 0;
11071 break;
11072 case 1:
11073 wr_val = 0xffffffff;
11074 break;
11075 }
11076
11077 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11078 u32 offset, mask, save_val, val;
f3c87cdd
YG
11079
11080 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11081 mask = reg_tbl[i].mask;
11082
11083 save_val = REG_RD(bp, offset);
11084
11085 REG_WR(bp, offset, wr_val);
11086 val = REG_RD(bp, offset);
11087
11088 /* Restore the original register's value */
11089 REG_WR(bp, offset, save_val);
11090
11091 /* verify that value is as expected value */
11092 if ((val & mask) != (wr_val & mask))
11093 goto test_reg_exit;
11094 }
11095 }
11096
11097 rc = 0;
11098
11099test_reg_exit:
11100 return rc;
11101}
11102
11103static int bnx2x_test_memory(struct bnx2x *bp)
11104{
11105 int i, j, rc = -ENODEV;
11106 u32 val;
11107 static const struct {
11108 u32 offset;
11109 int size;
11110 } mem_tbl[] = {
11111 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11112 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11113 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11114 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11115 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11116 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11117 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11118
11119 { 0xffffffff, 0 }
11120 };
11121 static const struct {
11122 char *name;
11123 u32 offset;
9dabc424
YG
11124 u32 e1_mask;
11125 u32 e1h_mask;
f3c87cdd 11126 } prty_tbl[] = {
9dabc424
YG
11127 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11128 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11129 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11130 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11131 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11132 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11133
11134 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
11135 };
11136
11137 if (!netif_running(bp->dev))
11138 return rc;
11139
11140 /* Go through all the memories */
11141 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11142 for (j = 0; j < mem_tbl[i].size; j++)
11143 REG_RD(bp, mem_tbl[i].offset + j*4);
11144
11145 /* Check the parity status */
11146 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11147 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
11148 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11149 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
11150 DP(NETIF_MSG_HW,
11151 "%s is 0x%x\n", prty_tbl[i].name, val);
11152 goto test_mem_exit;
11153 }
11154 }
11155
11156 rc = 0;
11157
11158test_mem_exit:
11159 return rc;
11160}
11161
f3c87cdd
YG
11162static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11163{
11164 int cnt = 1000;
11165
11166 if (link_up)
11167 while (bnx2x_link_test(bp) && cnt--)
11168 msleep(10);
11169}
11170
11171static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11172{
11173 unsigned int pkt_size, num_pkts, i;
11174 struct sk_buff *skb;
11175 unsigned char *packet;
ca00392c 11176 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 11177 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
11178 u16 tx_start_idx, tx_idx;
11179 u16 rx_start_idx, rx_idx;
ca00392c 11180 u16 pkt_prod, bd_prod;
f3c87cdd 11181 struct sw_tx_bd *tx_buf;
ca00392c
EG
11182 struct eth_tx_start_bd *tx_start_bd;
11183 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
11184 dma_addr_t mapping;
11185 union eth_rx_cqe *cqe;
11186 u8 cqe_fp_flags;
11187 struct sw_rx_bd *rx_buf;
11188 u16 len;
11189 int rc = -ENODEV;
11190
b5bf9068
EG
11191 /* check the loopback mode */
11192 switch (loopback_mode) {
11193 case BNX2X_PHY_LOOPBACK:
11194 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11195 return -EINVAL;
11196 break;
11197 case BNX2X_MAC_LOOPBACK:
f3c87cdd 11198 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 11199 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
11200 break;
11201 default:
f3c87cdd 11202 return -EINVAL;
b5bf9068 11203 }
f3c87cdd 11204
b5bf9068
EG
11205 /* prepare the loopback packet */
11206 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11207 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
11208 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11209 if (!skb) {
11210 rc = -ENOMEM;
11211 goto test_loopback_exit;
11212 }
11213 packet = skb_put(skb, pkt_size);
11214 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
11215 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11216 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
11217 for (i = ETH_HLEN; i < pkt_size; i++)
11218 packet[i] = (unsigned char) (i & 0xff);
11219
b5bf9068 11220 /* send the loopback packet */
f3c87cdd 11221 num_pkts = 0;
ca00392c
EG
11222 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11223 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 11224
ca00392c
EG
11225 pkt_prod = fp_tx->tx_pkt_prod++;
11226 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11227 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 11228 tx_buf->skb = skb;
ca00392c 11229 tx_buf->flags = 0;
f3c87cdd 11230
ca00392c
EG
11231 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11232 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
1a983142
FT
11233 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11234 skb_headlen(skb), DMA_TO_DEVICE);
ca00392c
EG
11235 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11236 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11237 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11238 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11239 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11240 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11241 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11242 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11243
11244 /* turn on parsing and get a BD */
11245 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11246 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11247
11248 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 11249
58f4c4cf
EG
11250 wmb();
11251
ca00392c
EG
11252 fp_tx->tx_db.data.prod += 2;
11253 barrier();
54b9ddaa 11254 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
11255
11256 mmiowb();
11257
11258 num_pkts++;
ca00392c 11259 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
11260
11261 udelay(100);
11262
ca00392c 11263 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
11264 if (tx_idx != tx_start_idx + num_pkts)
11265 goto test_loopback_exit;
11266
ca00392c 11267 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
11268 if (rx_idx != rx_start_idx + num_pkts)
11269 goto test_loopback_exit;
11270
ca00392c 11271 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
11272 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11273 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11274 goto test_loopback_rx_exit;
11275
11276 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11277 if (len != pkt_size)
11278 goto test_loopback_rx_exit;
11279
ca00392c 11280 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
11281 skb = rx_buf->skb;
11282 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11283 for (i = ETH_HLEN; i < pkt_size; i++)
11284 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11285 goto test_loopback_rx_exit;
11286
11287 rc = 0;
11288
11289test_loopback_rx_exit:
f3c87cdd 11290
ca00392c
EG
11291 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11292 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11293 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11294 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
11295
11296 /* Update producers */
ca00392c
EG
11297 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11298 fp_rx->rx_sge_prod);
f3c87cdd
YG
11299
11300test_loopback_exit:
11301 bp->link_params.loopback_mode = LOOPBACK_NONE;
11302
11303 return rc;
11304}
11305
11306static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11307{
b5bf9068 11308 int rc = 0, res;
f3c87cdd 11309
2145a920
VZ
11310 if (BP_NOMCP(bp))
11311 return rc;
11312
f3c87cdd
YG
11313 if (!netif_running(bp->dev))
11314 return BNX2X_LOOPBACK_FAILED;
11315
f8ef6e44 11316 bnx2x_netif_stop(bp, 1);
3910c8ae 11317 bnx2x_acquire_phy_lock(bp);
f3c87cdd 11318
b5bf9068
EG
11319 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11320 if (res) {
11321 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11322 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
11323 }
11324
b5bf9068
EG
11325 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11326 if (res) {
11327 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11328 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
11329 }
11330
3910c8ae 11331 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
11332 bnx2x_netif_start(bp);
11333
11334 return rc;
11335}
11336
11337#define CRC32_RESIDUAL 0xdebb20e3
11338
11339static int bnx2x_test_nvram(struct bnx2x *bp)
11340{
11341 static const struct {
11342 int offset;
11343 int size;
11344 } nvram_tbl[] = {
11345 { 0, 0x14 }, /* bootstrap */
11346 { 0x14, 0xec }, /* dir */
11347 { 0x100, 0x350 }, /* manuf_info */
11348 { 0x450, 0xf0 }, /* feature_info */
11349 { 0x640, 0x64 }, /* upgrade_key_info */
11350 { 0x6a4, 0x64 },
11351 { 0x708, 0x70 }, /* manuf_key_info */
11352 { 0x778, 0x70 },
11353 { 0, 0 }
11354 };
4781bfad 11355 __be32 buf[0x350 / 4];
f3c87cdd
YG
11356 u8 *data = (u8 *)buf;
11357 int i, rc;
ab6ad5a4 11358 u32 magic, crc;
f3c87cdd 11359
2145a920
VZ
11360 if (BP_NOMCP(bp))
11361 return 0;
11362
f3c87cdd
YG
11363 rc = bnx2x_nvram_read(bp, 0, data, 4);
11364 if (rc) {
f5372251 11365 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
11366 goto test_nvram_exit;
11367 }
11368
11369 magic = be32_to_cpu(buf[0]);
11370 if (magic != 0x669955aa) {
11371 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11372 rc = -ENODEV;
11373 goto test_nvram_exit;
11374 }
11375
11376 for (i = 0; nvram_tbl[i].size; i++) {
11377
11378 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11379 nvram_tbl[i].size);
11380 if (rc) {
11381 DP(NETIF_MSG_PROBE,
f5372251 11382 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
11383 goto test_nvram_exit;
11384 }
11385
ab6ad5a4
EG
11386 crc = ether_crc_le(nvram_tbl[i].size, data);
11387 if (crc != CRC32_RESIDUAL) {
f3c87cdd 11388 DP(NETIF_MSG_PROBE,
ab6ad5a4 11389 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
11390 rc = -ENODEV;
11391 goto test_nvram_exit;
11392 }
11393 }
11394
11395test_nvram_exit:
11396 return rc;
11397}
11398
11399static int bnx2x_test_intr(struct bnx2x *bp)
11400{
11401 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11402 int i, rc;
11403
11404 if (!netif_running(bp->dev))
11405 return -ENODEV;
11406
8d9c5f34 11407 config->hdr.length = 0;
af246401 11408 if (CHIP_IS_E1(bp))
0c43f43f
VZ
11409 /* use last unicast entries */
11410 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
af246401
EG
11411 else
11412 config->hdr.offset = BP_FUNC(bp);
0626b899 11413 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
11414 config->hdr.reserved1 = 0;
11415
e665bfda
MC
11416 bp->set_mac_pending++;
11417 smp_wmb();
f3c87cdd
YG
11418 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11419 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11420 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11421 if (rc == 0) {
f3c87cdd
YG
11422 for (i = 0; i < 10; i++) {
11423 if (!bp->set_mac_pending)
11424 break;
e665bfda 11425 smp_rmb();
f3c87cdd
YG
11426 msleep_interruptible(10);
11427 }
11428 if (i == 10)
11429 rc = -ENODEV;
11430 }
11431
11432 return rc;
11433}
11434
a2fbb9ea
ET
11435static void bnx2x_self_test(struct net_device *dev,
11436 struct ethtool_test *etest, u64 *buf)
11437{
11438 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea 11439
72fd0718
VZ
11440 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11441 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11442 etest->flags |= ETH_TEST_FL_FAILED;
11443 return;
11444 }
11445
a2fbb9ea
ET
11446 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11447
f3c87cdd 11448 if (!netif_running(dev))
a2fbb9ea 11449 return;
a2fbb9ea 11450
33471629 11451 /* offline tests are not supported in MF mode */
f3c87cdd
YG
11452 if (IS_E1HMF(bp))
11453 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11454
11455 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
11456 int port = BP_PORT(bp);
11457 u32 val;
f3c87cdd
YG
11458 u8 link_up;
11459
279abdf5
EG
11460 /* save current value of input enable for TX port IF */
11461 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11462 /* disable input for TX port IF */
11463 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11464
061bc702 11465 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
11466 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11467 bnx2x_nic_load(bp, LOAD_DIAG);
11468 /* wait until link state is restored */
11469 bnx2x_wait_for_link(bp, link_up);
11470
11471 if (bnx2x_test_registers(bp) != 0) {
11472 buf[0] = 1;
11473 etest->flags |= ETH_TEST_FL_FAILED;
11474 }
11475 if (bnx2x_test_memory(bp) != 0) {
11476 buf[1] = 1;
11477 etest->flags |= ETH_TEST_FL_FAILED;
11478 }
11479 buf[2] = bnx2x_test_loopback(bp, link_up);
11480 if (buf[2] != 0)
11481 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 11482
f3c87cdd 11483 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
11484
11485 /* restore input for TX port IF */
11486 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11487
f3c87cdd
YG
11488 bnx2x_nic_load(bp, LOAD_NORMAL);
11489 /* wait until link state is restored */
11490 bnx2x_wait_for_link(bp, link_up);
11491 }
11492 if (bnx2x_test_nvram(bp) != 0) {
11493 buf[3] = 1;
a2fbb9ea
ET
11494 etest->flags |= ETH_TEST_FL_FAILED;
11495 }
f3c87cdd
YG
11496 if (bnx2x_test_intr(bp) != 0) {
11497 buf[4] = 1;
11498 etest->flags |= ETH_TEST_FL_FAILED;
11499 }
11500 if (bp->port.pmf)
11501 if (bnx2x_link_test(bp) != 0) {
11502 buf[5] = 1;
11503 etest->flags |= ETH_TEST_FL_FAILED;
11504 }
f3c87cdd
YG
11505
11506#ifdef BNX2X_EXTRA_DEBUG
11507 bnx2x_panic_dump(bp);
11508#endif
a2fbb9ea
ET
11509}
11510
de832a55
EG
11511static const struct {
11512 long offset;
11513 int size;
11514 u8 string[ETH_GSTRING_LEN];
11515} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11516/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11517 { Q_STATS_OFFSET32(error_bytes_received_hi),
11518 8, "[%d]: rx_error_bytes" },
11519 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11520 8, "[%d]: rx_ucast_packets" },
11521 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11522 8, "[%d]: rx_mcast_packets" },
11523 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11524 8, "[%d]: rx_bcast_packets" },
11525 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11526 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11527 4, "[%d]: rx_phy_ip_err_discards"},
11528 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11529 4, "[%d]: rx_skb_alloc_discard" },
11530 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11531
11532/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11533 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11534 8, "[%d]: tx_packets" }
11535};
11536
bb2a0f7a
YG
11537static const struct {
11538 long offset;
11539 int size;
11540 u32 flags;
66e855f3
YG
11541#define STATS_FLAGS_PORT 1
11542#define STATS_FLAGS_FUNC 2
de832a55 11543#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 11544 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 11545} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
11546/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11547 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 11548 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 11549 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 11550 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 11551 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 11552 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 11553 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 11554 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 11555 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 11556 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 11557 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 11558 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 11559 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
11560 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11561 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11562 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11563 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11564/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11565 8, STATS_FLAGS_PORT, "rx_fragments" },
11566 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11567 8, STATS_FLAGS_PORT, "rx_jabbers" },
11568 { STATS_OFFSET32(no_buff_discard_hi),
11569 8, STATS_FLAGS_BOTH, "rx_discards" },
11570 { STATS_OFFSET32(mac_filter_discard),
11571 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11572 { STATS_OFFSET32(xxoverflow_discard),
11573 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11574 { STATS_OFFSET32(brb_drop_hi),
11575 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11576 { STATS_OFFSET32(brb_truncate_hi),
11577 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11578 { STATS_OFFSET32(pause_frames_received_hi),
11579 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11580 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11581 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11582 { STATS_OFFSET32(nig_timer_max),
11583 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11584/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11585 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11586 { STATS_OFFSET32(rx_skb_alloc_failed),
11587 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11588 { STATS_OFFSET32(hw_csum_err),
11589 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11590
11591 { STATS_OFFSET32(total_bytes_transmitted_hi),
11592 8, STATS_FLAGS_BOTH, "tx_bytes" },
11593 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11594 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11595 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11596 8, STATS_FLAGS_BOTH, "tx_packets" },
11597 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11598 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11599 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11600 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 11601 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 11602 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 11603 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 11604 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 11605/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 11606 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 11607 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 11608 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 11609 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 11610 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 11611 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 11612 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 11613 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 11614 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 11615 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 11616 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 11617 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 11618 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 11619 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 11620 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 11621 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 11622 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 11623 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 11624 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 11625/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 11626 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
11627 { STATS_OFFSET32(pause_frames_sent_hi),
11628 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
11629};
11630
de832a55
EG
11631#define IS_PORT_STAT(i) \
11632 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11633#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11634#define IS_E1HMF_MODE_STAT(bp) \
7995c64e 11635 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
66e855f3 11636
15f0a394
BH
11637static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11638{
11639 struct bnx2x *bp = netdev_priv(dev);
11640 int i, num_stats;
11641
11642 switch(stringset) {
11643 case ETH_SS_STATS:
11644 if (is_multi(bp)) {
54b9ddaa 11645 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
11646 if (!IS_E1HMF_MODE_STAT(bp))
11647 num_stats += BNX2X_NUM_STATS;
11648 } else {
11649 if (IS_E1HMF_MODE_STAT(bp)) {
11650 num_stats = 0;
11651 for (i = 0; i < BNX2X_NUM_STATS; i++)
11652 if (IS_FUNC_STAT(i))
11653 num_stats++;
11654 } else
11655 num_stats = BNX2X_NUM_STATS;
11656 }
11657 return num_stats;
11658
11659 case ETH_SS_TEST:
11660 return BNX2X_NUM_TESTS;
11661
11662 default:
11663 return -EINVAL;
11664 }
11665}
11666
a2fbb9ea
ET
11667static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11668{
bb2a0f7a 11669 struct bnx2x *bp = netdev_priv(dev);
de832a55 11670 int i, j, k;
bb2a0f7a 11671
a2fbb9ea
ET
11672 switch (stringset) {
11673 case ETH_SS_STATS:
de832a55
EG
11674 if (is_multi(bp)) {
11675 k = 0;
54b9ddaa 11676 for_each_queue(bp, i) {
de832a55
EG
11677 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11678 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11679 bnx2x_q_stats_arr[j].string, i);
11680 k += BNX2X_NUM_Q_STATS;
11681 }
11682 if (IS_E1HMF_MODE_STAT(bp))
11683 break;
11684 for (j = 0; j < BNX2X_NUM_STATS; j++)
11685 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11686 bnx2x_stats_arr[j].string);
11687 } else {
11688 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11689 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11690 continue;
11691 strcpy(buf + j*ETH_GSTRING_LEN,
11692 bnx2x_stats_arr[i].string);
11693 j++;
11694 }
bb2a0f7a 11695 }
a2fbb9ea
ET
11696 break;
11697
11698 case ETH_SS_TEST:
11699 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11700 break;
11701 }
11702}
11703
a2fbb9ea
ET
11704static void bnx2x_get_ethtool_stats(struct net_device *dev,
11705 struct ethtool_stats *stats, u64 *buf)
11706{
11707 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
11708 u32 *hw_stats, *offset;
11709 int i, j, k;
bb2a0f7a 11710
de832a55
EG
11711 if (is_multi(bp)) {
11712 k = 0;
54b9ddaa 11713 for_each_queue(bp, i) {
de832a55
EG
11714 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11715 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11716 if (bnx2x_q_stats_arr[j].size == 0) {
11717 /* skip this counter */
11718 buf[k + j] = 0;
11719 continue;
11720 }
11721 offset = (hw_stats +
11722 bnx2x_q_stats_arr[j].offset);
11723 if (bnx2x_q_stats_arr[j].size == 4) {
11724 /* 4-byte counter */
11725 buf[k + j] = (u64) *offset;
11726 continue;
11727 }
11728 /* 8-byte counter */
11729 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11730 }
11731 k += BNX2X_NUM_Q_STATS;
11732 }
11733 if (IS_E1HMF_MODE_STAT(bp))
11734 return;
11735 hw_stats = (u32 *)&bp->eth_stats;
11736 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11737 if (bnx2x_stats_arr[j].size == 0) {
11738 /* skip this counter */
11739 buf[k + j] = 0;
11740 continue;
11741 }
11742 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11743 if (bnx2x_stats_arr[j].size == 4) {
11744 /* 4-byte counter */
11745 buf[k + j] = (u64) *offset;
11746 continue;
11747 }
11748 /* 8-byte counter */
11749 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 11750 }
de832a55
EG
11751 } else {
11752 hw_stats = (u32 *)&bp->eth_stats;
11753 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11754 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11755 continue;
11756 if (bnx2x_stats_arr[i].size == 0) {
11757 /* skip this counter */
11758 buf[j] = 0;
11759 j++;
11760 continue;
11761 }
11762 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11763 if (bnx2x_stats_arr[i].size == 4) {
11764 /* 4-byte counter */
11765 buf[j] = (u64) *offset;
11766 j++;
11767 continue;
11768 }
11769 /* 8-byte counter */
11770 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 11771 j++;
a2fbb9ea 11772 }
a2fbb9ea
ET
11773 }
11774}
11775
11776static int bnx2x_phys_id(struct net_device *dev, u32 data)
11777{
11778 struct bnx2x *bp = netdev_priv(dev);
11779 int i;
11780
34f80b04
EG
11781 if (!netif_running(dev))
11782 return 0;
11783
11784 if (!bp->port.pmf)
11785 return 0;
11786
a2fbb9ea
ET
11787 if (data == 0)
11788 data = 2;
11789
11790 for (i = 0; i < (data * 2); i++) {
c18487ee 11791 if ((i % 2) == 0)
7846e471
YR
11792 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11793 SPEED_1000);
c18487ee 11794 else
7846e471 11795 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 11796
a2fbb9ea
ET
11797 msleep_interruptible(500);
11798 if (signal_pending(current))
11799 break;
11800 }
11801
c18487ee 11802 if (bp->link_vars.link_up)
7846e471
YR
11803 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11804 bp->link_vars.line_speed);
a2fbb9ea
ET
11805
11806 return 0;
11807}
11808
0fc0b732 11809static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
11810 .get_settings = bnx2x_get_settings,
11811 .set_settings = bnx2x_set_settings,
11812 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
11813 .get_regs_len = bnx2x_get_regs_len,
11814 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
11815 .get_wol = bnx2x_get_wol,
11816 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
11817 .get_msglevel = bnx2x_get_msglevel,
11818 .set_msglevel = bnx2x_set_msglevel,
11819 .nway_reset = bnx2x_nway_reset,
01e53298 11820 .get_link = bnx2x_get_link,
7a9b2557
VZ
11821 .get_eeprom_len = bnx2x_get_eeprom_len,
11822 .get_eeprom = bnx2x_get_eeprom,
11823 .set_eeprom = bnx2x_set_eeprom,
11824 .get_coalesce = bnx2x_get_coalesce,
11825 .set_coalesce = bnx2x_set_coalesce,
11826 .get_ringparam = bnx2x_get_ringparam,
11827 .set_ringparam = bnx2x_set_ringparam,
11828 .get_pauseparam = bnx2x_get_pauseparam,
11829 .set_pauseparam = bnx2x_set_pauseparam,
11830 .get_rx_csum = bnx2x_get_rx_csum,
11831 .set_rx_csum = bnx2x_set_rx_csum,
11832 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 11833 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
11834 .set_flags = bnx2x_set_flags,
11835 .get_flags = ethtool_op_get_flags,
11836 .get_sg = ethtool_op_get_sg,
11837 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
11838 .get_tso = ethtool_op_get_tso,
11839 .set_tso = bnx2x_set_tso,
7a9b2557 11840 .self_test = bnx2x_self_test,
15f0a394 11841 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 11842 .get_strings = bnx2x_get_strings,
a2fbb9ea 11843 .phys_id = bnx2x_phys_id,
bb2a0f7a 11844 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
11845};
11846
11847/* end of ethtool_ops */
11848
11849/****************************************************************************
11850* General service functions
11851****************************************************************************/
11852
11853static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11854{
11855 u16 pmcsr;
11856
11857 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11858
11859 switch (state) {
11860 case PCI_D0:
34f80b04 11861 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
11862 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11863 PCI_PM_CTRL_PME_STATUS));
11864
11865 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 11866 /* delay required during transition out of D3hot */
a2fbb9ea 11867 msleep(20);
34f80b04 11868 break;
a2fbb9ea 11869
34f80b04
EG
11870 case PCI_D3hot:
11871 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11872 pmcsr |= 3;
a2fbb9ea 11873
34f80b04
EG
11874 if (bp->wol)
11875 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 11876
34f80b04
EG
11877 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11878 pmcsr);
a2fbb9ea 11879
34f80b04
EG
11880 /* No more memory access after this point until
11881 * device is brought back to D0.
11882 */
11883 break;
11884
11885 default:
11886 return -EINVAL;
11887 }
11888 return 0;
a2fbb9ea
ET
11889}
11890
237907c1
EG
11891static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11892{
11893 u16 rx_cons_sb;
11894
11895 /* Tell compiler that status block fields can change */
11896 barrier();
11897 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11898 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11899 rx_cons_sb++;
11900 return (fp->rx_comp_cons != rx_cons_sb);
11901}
11902
34f80b04
EG
11903/*
11904 * net_device service functions
11905 */
11906
a2fbb9ea
ET
11907static int bnx2x_poll(struct napi_struct *napi, int budget)
11908{
54b9ddaa 11909 int work_done = 0;
a2fbb9ea
ET
11910 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11911 napi);
11912 struct bnx2x *bp = fp->bp;
a2fbb9ea 11913
54b9ddaa 11914 while (1) {
a2fbb9ea 11915#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
11916 if (unlikely(bp->panic)) {
11917 napi_complete(napi);
11918 return 0;
11919 }
a2fbb9ea
ET
11920#endif
11921
54b9ddaa
VZ
11922 if (bnx2x_has_tx_work(fp))
11923 bnx2x_tx_int(fp);
356e2385 11924
54b9ddaa
VZ
11925 if (bnx2x_has_rx_work(fp)) {
11926 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 11927
54b9ddaa
VZ
11928 /* must not complete if we consumed full budget */
11929 if (work_done >= budget)
11930 break;
11931 }
a2fbb9ea 11932
54b9ddaa
VZ
11933 /* Fall out from the NAPI loop if needed */
11934 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11935 bnx2x_update_fpsb_idx(fp);
11936 /* bnx2x_has_rx_work() reads the status block, thus we need
11937 * to ensure that status block indices have been actually read
11938 * (bnx2x_update_fpsb_idx) prior to this check
11939 * (bnx2x_has_rx_work) so that we won't write the "newer"
11940 * value of the status block to IGU (if there was a DMA right
11941 * after bnx2x_has_rx_work and if there is no rmb, the memory
11942 * reading (bnx2x_update_fpsb_idx) may be postponed to right
11943 * before bnx2x_ack_sb). In this case there will never be
11944 * another interrupt until there is another update of the
11945 * status block, while there is still unhandled work.
11946 */
11947 rmb();
a2fbb9ea 11948
54b9ddaa
VZ
11949 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11950 napi_complete(napi);
11951 /* Re-enable interrupts */
11952 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11953 le16_to_cpu(fp->fp_c_idx),
11954 IGU_INT_NOP, 1);
11955 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11956 le16_to_cpu(fp->fp_u_idx),
11957 IGU_INT_ENABLE, 1);
11958 break;
11959 }
11960 }
a2fbb9ea 11961 }
356e2385 11962
a2fbb9ea
ET
11963 return work_done;
11964}
11965
755735eb
EG
11966
11967/* we split the first BD into headers and data BDs
33471629 11968 * to ease the pain of our fellow microcode engineers
755735eb
EG
11969 * we use one mapping for both BDs
11970 * So far this has only been observed to happen
11971 * in Other Operating Systems(TM)
11972 */
11973static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11974 struct bnx2x_fastpath *fp,
ca00392c
EG
11975 struct sw_tx_bd *tx_buf,
11976 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
11977 u16 bd_prod, int nbd)
11978{
ca00392c 11979 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
11980 struct eth_tx_bd *d_tx_bd;
11981 dma_addr_t mapping;
11982 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11983
11984 /* first fix first BD */
11985 h_tx_bd->nbd = cpu_to_le16(nbd);
11986 h_tx_bd->nbytes = cpu_to_le16(hlen);
11987
11988 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11989 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11990 h_tx_bd->addr_lo, h_tx_bd->nbd);
11991
11992 /* now get a new data BD
11993 * (after the pbd) and fill it */
11994 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11995 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11996
11997 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11998 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11999
12000 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12001 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12002 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
12003
12004 /* this marks the BD as one that has no individual mapping */
12005 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12006
755735eb
EG
12007 DP(NETIF_MSG_TX_QUEUED,
12008 "TSO split data size is %d (%x:%x)\n",
12009 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12010
ca00392c
EG
12011 /* update tx_bd */
12012 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
12013
12014 return bd_prod;
12015}
12016
12017static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12018{
12019 if (fix > 0)
12020 csum = (u16) ~csum_fold(csum_sub(csum,
12021 csum_partial(t_header - fix, fix, 0)));
12022
12023 else if (fix < 0)
12024 csum = (u16) ~csum_fold(csum_add(csum,
12025 csum_partial(t_header, -fix, 0)));
12026
12027 return swab16(csum);
12028}
12029
12030static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12031{
12032 u32 rc;
12033
12034 if (skb->ip_summed != CHECKSUM_PARTIAL)
12035 rc = XMIT_PLAIN;
12036
12037 else {
4781bfad 12038 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
12039 rc = XMIT_CSUM_V6;
12040 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12041 rc |= XMIT_CSUM_TCP;
12042
12043 } else {
12044 rc = XMIT_CSUM_V4;
12045 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12046 rc |= XMIT_CSUM_TCP;
12047 }
12048 }
12049
12050 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 12051 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
12052
12053 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 12054 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
12055
12056 return rc;
12057}
12058
632da4d6 12059#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12060/* check if packet requires linearization (packet is too fragmented)
12061 no need to check fragmentation if page size > 8K (there will be no
12062 violation to FW restrictions) */
755735eb
EG
12063static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12064 u32 xmit_type)
12065{
12066 int to_copy = 0;
12067 int hlen = 0;
12068 int first_bd_sz = 0;
12069
12070 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12071 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12072
12073 if (xmit_type & XMIT_GSO) {
12074 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12075 /* Check if LSO packet needs to be copied:
12076 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12077 int wnd_size = MAX_FETCH_BD - 3;
33471629 12078 /* Number of windows to check */
755735eb
EG
12079 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12080 int wnd_idx = 0;
12081 int frag_idx = 0;
12082 u32 wnd_sum = 0;
12083
12084 /* Headers length */
12085 hlen = (int)(skb_transport_header(skb) - skb->data) +
12086 tcp_hdrlen(skb);
12087
12088 /* Amount of data (w/o headers) on linear part of SKB*/
12089 first_bd_sz = skb_headlen(skb) - hlen;
12090
12091 wnd_sum = first_bd_sz;
12092
12093 /* Calculate the first sum - it's special */
12094 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12095 wnd_sum +=
12096 skb_shinfo(skb)->frags[frag_idx].size;
12097
12098 /* If there was data on linear skb data - check it */
12099 if (first_bd_sz > 0) {
12100 if (unlikely(wnd_sum < lso_mss)) {
12101 to_copy = 1;
12102 goto exit_lbl;
12103 }
12104
12105 wnd_sum -= first_bd_sz;
12106 }
12107
12108 /* Others are easier: run through the frag list and
12109 check all windows */
12110 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12111 wnd_sum +=
12112 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12113
12114 if (unlikely(wnd_sum < lso_mss)) {
12115 to_copy = 1;
12116 break;
12117 }
12118 wnd_sum -=
12119 skb_shinfo(skb)->frags[wnd_idx].size;
12120 }
755735eb
EG
12121 } else {
12122 /* in non-LSO too fragmented packet should always
12123 be linearized */
12124 to_copy = 1;
12125 }
12126 }
12127
12128exit_lbl:
12129 if (unlikely(to_copy))
12130 DP(NETIF_MSG_TX_QUEUED,
12131 "Linearization IS REQUIRED for %s packet. "
12132 "num_frags %d hlen %d first_bd_sz %d\n",
12133 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12134 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12135
12136 return to_copy;
12137}
632da4d6 12138#endif
755735eb
EG
12139
12140/* called with netif_tx_lock
a2fbb9ea 12141 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 12142 * netif_wake_queue()
a2fbb9ea 12143 */
61357325 12144static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
12145{
12146 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 12147 struct bnx2x_fastpath *fp;
555f6c78 12148 struct netdev_queue *txq;
a2fbb9ea 12149 struct sw_tx_bd *tx_buf;
ca00392c
EG
12150 struct eth_tx_start_bd *tx_start_bd;
12151 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
12152 struct eth_tx_parse_bd *pbd = NULL;
12153 u16 pkt_prod, bd_prod;
755735eb 12154 int nbd, fp_index;
a2fbb9ea 12155 dma_addr_t mapping;
755735eb 12156 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
12157 int i;
12158 u8 hlen = 0;
ca00392c 12159 __le16 pkt_size = 0;
a2fbb9ea
ET
12160
12161#ifdef BNX2X_STOP_ON_ERROR
12162 if (unlikely(bp->panic))
12163 return NETDEV_TX_BUSY;
12164#endif
12165
555f6c78
EG
12166 fp_index = skb_get_queue_mapping(skb);
12167 txq = netdev_get_tx_queue(dev, fp_index);
12168
54b9ddaa 12169 fp = &bp->fp[fp_index];
755735eb 12170
231fd58a 12171 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 12172 fp->eth_q_stats.driver_xoff++;
555f6c78 12173 netif_tx_stop_queue(txq);
a2fbb9ea
ET
12174 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12175 return NETDEV_TX_BUSY;
12176 }
12177
755735eb
EG
12178 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12179 " gso type %x xmit_type %x\n",
12180 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12181 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12182
632da4d6 12183#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12184 /* First, check if we need to linearize the skb (due to FW
12185 restrictions). No need to check fragmentation if page size > 8K
12186 (there will be no violation to FW restrictions) */
755735eb
EG
12187 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12188 /* Statistics of linearization */
12189 bp->lin_cnt++;
12190 if (skb_linearize(skb) != 0) {
12191 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12192 "silently dropping this SKB\n");
12193 dev_kfree_skb_any(skb);
da5a662a 12194 return NETDEV_TX_OK;
755735eb
EG
12195 }
12196 }
632da4d6 12197#endif
755735eb 12198
a2fbb9ea 12199 /*
755735eb 12200 Please read carefully. First we use one BD which we mark as start,
ca00392c 12201 then we have a parsing info BD (used for TSO or xsum),
755735eb 12202 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
12203 (don't forget to mark the last one as last,
12204 and to unmap only AFTER you write to the BD ...)
755735eb 12205 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
12206 */
12207
12208 pkt_prod = fp->tx_pkt_prod++;
755735eb 12209 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 12210
755735eb 12211 /* get a tx_buf and first BD */
a2fbb9ea 12212 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 12213 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 12214
ca00392c
EG
12215 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12216 tx_start_bd->general_data = (UNICAST_ADDRESS <<
12217 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 12218 /* header nbd */
ca00392c 12219 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 12220
755735eb
EG
12221 /* remember the first BD of the packet */
12222 tx_buf->first_bd = fp->tx_bd_prod;
12223 tx_buf->skb = skb;
ca00392c 12224 tx_buf->flags = 0;
a2fbb9ea
ET
12225
12226 DP(NETIF_MSG_TX_QUEUED,
12227 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 12228 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 12229
0c6671b0
EG
12230#ifdef BCM_VLAN
12231 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12232 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
12233 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12234 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 12235 } else
0c6671b0 12236#endif
ca00392c 12237 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 12238
ca00392c
EG
12239 /* turn on parsing and get a BD */
12240 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12241 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 12242
ca00392c 12243 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
12244
12245 if (xmit_type & XMIT_CSUM) {
ca00392c 12246 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
12247
12248 /* for now NS flag is not used in Linux */
4781bfad
EG
12249 pbd->global_data =
12250 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12251 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 12252
755735eb
EG
12253 pbd->ip_hlen = (skb_transport_header(skb) -
12254 skb_network_header(skb)) / 2;
12255
12256 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 12257
755735eb 12258 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 12259 hlen = hlen*2;
a2fbb9ea 12260
ca00392c 12261 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
12262
12263 if (xmit_type & XMIT_CSUM_V4)
ca00392c 12264 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
12265 ETH_TX_BD_FLAGS_IP_CSUM;
12266 else
ca00392c
EG
12267 tx_start_bd->bd_flags.as_bitfield |=
12268 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
12269
12270 if (xmit_type & XMIT_CSUM_TCP) {
12271 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12272
12273 } else {
12274 s8 fix = SKB_CS_OFF(skb); /* signed! */
12275
ca00392c 12276 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 12277
755735eb 12278 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12279 "hlen %d fix %d csum before fix %x\n",
12280 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
12281
12282 /* HW bug: fixup the CSUM */
12283 pbd->tcp_pseudo_csum =
12284 bnx2x_csum_fix(skb_transport_header(skb),
12285 SKB_CS(skb), fix);
12286
12287 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12288 pbd->tcp_pseudo_csum);
12289 }
a2fbb9ea
ET
12290 }
12291
1a983142
FT
12292 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12293 skb_headlen(skb), DMA_TO_DEVICE);
a2fbb9ea 12294
ca00392c
EG
12295 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12296 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12297 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12298 tx_start_bd->nbd = cpu_to_le16(nbd);
12299 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12300 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
12301
12302 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 12303 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
12304 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12305 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12306 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 12307
755735eb 12308 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
12309
12310 DP(NETIF_MSG_TX_QUEUED,
12311 "TSO packet len %d hlen %d total len %d tso size %d\n",
12312 skb->len, hlen, skb_headlen(skb),
12313 skb_shinfo(skb)->gso_size);
12314
ca00392c 12315 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 12316
755735eb 12317 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
12318 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12319 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
12320
12321 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12322 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
12323 pbd->tcp_flags = pbd_tcp_flags(skb);
12324
12325 if (xmit_type & XMIT_GSO_V4) {
12326 pbd->ip_id = swab16(ip_hdr(skb)->id);
12327 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
12328 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12329 ip_hdr(skb)->daddr,
12330 0, IPPROTO_TCP, 0));
755735eb
EG
12331
12332 } else
12333 pbd->tcp_pseudo_csum =
12334 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12335 &ipv6_hdr(skb)->daddr,
12336 0, IPPROTO_TCP, 0));
12337
a2fbb9ea
ET
12338 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12339 }
ca00392c 12340 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 12341
755735eb
EG
12342 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12343 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 12344
755735eb 12345 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
12346 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12347 if (total_pkt_bd == NULL)
12348 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 12349
1a983142
FT
12350 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12351 frag->page_offset,
12352 frag->size, DMA_TO_DEVICE);
a2fbb9ea 12353
ca00392c
EG
12354 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12355 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12356 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12357 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 12358
755735eb 12359 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12360 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12361 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12362 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
12363 }
12364
ca00392c 12365 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 12366
a2fbb9ea
ET
12367 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12368
755735eb 12369 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
12370 * if the packet contains or ends with it
12371 */
12372 if (TX_BD_POFF(bd_prod) < nbd)
12373 nbd++;
12374
ca00392c
EG
12375 if (total_pkt_bd != NULL)
12376 total_pkt_bd->total_pkt_bytes = pkt_size;
12377
a2fbb9ea
ET
12378 if (pbd)
12379 DP(NETIF_MSG_TX_QUEUED,
12380 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12381 " tcp_flags %x xsum %x seq %u hlen %u\n",
12382 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12383 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 12384 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 12385
755735eb 12386 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 12387
58f4c4cf
EG
12388 /*
12389 * Make sure that the BD data is updated before updating the producer
12390 * since FW might read the BD right after the producer is updated.
12391 * This is only applicable for weak-ordered memory model archs such
12392 * as IA-64. The following barrier is also mandatory since FW will
12393 * assumes packets must have BDs.
12394 */
12395 wmb();
12396
ca00392c
EG
12397 fp->tx_db.data.prod += nbd;
12398 barrier();
54b9ddaa 12399 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
12400
12401 mmiowb();
12402
755735eb 12403 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
12404
12405 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 12406 netif_tx_stop_queue(txq);
9baddeb8
SG
12407
12408 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12409 * ordering of set_bit() in netif_tx_stop_queue() and read of
12410 * fp->bd_tx_cons */
58f4c4cf 12411 smp_mb();
9baddeb8 12412
54b9ddaa 12413 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 12414 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 12415 netif_tx_wake_queue(txq);
a2fbb9ea 12416 }
54b9ddaa 12417 fp->tx_pkt++;
a2fbb9ea
ET
12418
12419 return NETDEV_TX_OK;
12420}
12421
bb2a0f7a 12422/* called with rtnl_lock */
a2fbb9ea
ET
12423static int bnx2x_open(struct net_device *dev)
12424{
12425 struct bnx2x *bp = netdev_priv(dev);
12426
6eccabb3
EG
12427 netif_carrier_off(dev);
12428
a2fbb9ea
ET
12429 bnx2x_set_power_state(bp, PCI_D0);
12430
72fd0718
VZ
12431 if (!bnx2x_reset_is_done(bp)) {
12432 do {
12433 /* Reset MCP mail box sequence if there is on going
12434 * recovery
12435 */
12436 bp->fw_seq = 0;
12437
12438 /* If it's the first function to load and reset done
12439 * is still not cleared it may mean that. We don't
12440 * check the attention state here because it may have
12441 * already been cleared by a "common" reset but we
12442 * shell proceed with "process kill" anyway.
12443 */
12444 if ((bnx2x_get_load_cnt(bp) == 0) &&
12445 bnx2x_trylock_hw_lock(bp,
12446 HW_LOCK_RESOURCE_RESERVED_08) &&
12447 (!bnx2x_leader_reset(bp))) {
12448 DP(NETIF_MSG_HW, "Recovered in open\n");
12449 break;
12450 }
12451
12452 bnx2x_set_power_state(bp, PCI_D3hot);
12453
12454 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12455 " completed yet. Try again later. If u still see this"
12456 " message after a few retries then power cycle is"
12457 " required.\n", bp->dev->name);
12458
12459 return -EAGAIN;
12460 } while (0);
12461 }
12462
12463 bp->recovery_state = BNX2X_RECOVERY_DONE;
12464
bb2a0f7a 12465 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
12466}
12467
bb2a0f7a 12468/* called with rtnl_lock */
a2fbb9ea
ET
12469static int bnx2x_close(struct net_device *dev)
12470{
a2fbb9ea
ET
12471 struct bnx2x *bp = netdev_priv(dev);
12472
12473 /* Unload the driver, release IRQs */
bb2a0f7a
YG
12474 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12475 if (atomic_read(&bp->pdev->enable_cnt) == 1)
12476 if (!CHIP_REV_IS_SLOW(bp))
12477 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
12478
12479 return 0;
12480}
12481
f5372251 12482/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
12483static void bnx2x_set_rx_mode(struct net_device *dev)
12484{
12485 struct bnx2x *bp = netdev_priv(dev);
12486 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12487 int port = BP_PORT(bp);
12488
12489 if (bp->state != BNX2X_STATE_OPEN) {
12490 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12491 return;
12492 }
12493
12494 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12495
12496 if (dev->flags & IFF_PROMISC)
12497 rx_mode = BNX2X_RX_MODE_PROMISC;
12498
12499 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
12500 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12501 CHIP_IS_E1(bp)))
34f80b04
EG
12502 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12503
12504 else { /* some multicasts */
12505 if (CHIP_IS_E1(bp)) {
12506 int i, old, offset;
22bedad3 12507 struct netdev_hw_addr *ha;
34f80b04
EG
12508 struct mac_configuration_cmd *config =
12509 bnx2x_sp(bp, mcast_config);
12510
0ddf477b 12511 i = 0;
22bedad3 12512 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
12513 config->config_table[i].
12514 cam_entry.msb_mac_addr =
22bedad3 12515 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
12516 config->config_table[i].
12517 cam_entry.middle_mac_addr =
22bedad3 12518 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
12519 config->config_table[i].
12520 cam_entry.lsb_mac_addr =
22bedad3 12521 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
12522 config->config_table[i].cam_entry.flags =
12523 cpu_to_le16(port);
12524 config->config_table[i].
12525 target_table_entry.flags = 0;
ca00392c
EG
12526 config->config_table[i].target_table_entry.
12527 clients_bit_vector =
12528 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
12529 config->config_table[i].
12530 target_table_entry.vlan_id = 0;
12531
12532 DP(NETIF_MSG_IFUP,
12533 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12534 config->config_table[i].
12535 cam_entry.msb_mac_addr,
12536 config->config_table[i].
12537 cam_entry.middle_mac_addr,
12538 config->config_table[i].
12539 cam_entry.lsb_mac_addr);
0ddf477b 12540 i++;
34f80b04 12541 }
8d9c5f34 12542 old = config->hdr.length;
34f80b04
EG
12543 if (old > i) {
12544 for (; i < old; i++) {
12545 if (CAM_IS_INVALID(config->
12546 config_table[i])) {
af246401 12547 /* already invalidated */
34f80b04
EG
12548 break;
12549 }
12550 /* invalidate */
12551 CAM_INVALIDATE(config->
12552 config_table[i]);
12553 }
12554 }
12555
12556 if (CHIP_REV_IS_SLOW(bp))
12557 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12558 else
12559 offset = BNX2X_MAX_MULTICAST*(1 + port);
12560
8d9c5f34 12561 config->hdr.length = i;
34f80b04 12562 config->hdr.offset = offset;
8d9c5f34 12563 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
12564 config->hdr.reserved1 = 0;
12565
e665bfda
MC
12566 bp->set_mac_pending++;
12567 smp_wmb();
12568
34f80b04
EG
12569 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12570 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12571 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12572 0);
12573 } else { /* E1H */
12574 /* Accept one or more multicasts */
22bedad3 12575 struct netdev_hw_addr *ha;
34f80b04
EG
12576 u32 mc_filter[MC_HASH_SIZE];
12577 u32 crc, bit, regidx;
12578 int i;
12579
12580 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12581
22bedad3 12582 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 12583 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 12584 ha->addr);
34f80b04 12585
22bedad3 12586 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
12587 bit = (crc >> 24) & 0xff;
12588 regidx = bit >> 5;
12589 bit &= 0x1f;
12590 mc_filter[regidx] |= (1 << bit);
12591 }
12592
12593 for (i = 0; i < MC_HASH_SIZE; i++)
12594 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12595 mc_filter[i]);
12596 }
12597 }
12598
12599 bp->rx_mode = rx_mode;
12600 bnx2x_set_storm_rx_mode(bp);
12601}
12602
12603/* called with rtnl_lock */
a2fbb9ea
ET
12604static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12605{
12606 struct sockaddr *addr = p;
12607 struct bnx2x *bp = netdev_priv(dev);
12608
34f80b04 12609 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
12610 return -EINVAL;
12611
12612 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
12613 if (netif_running(dev)) {
12614 if (CHIP_IS_E1(bp))
e665bfda 12615 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 12616 else
e665bfda 12617 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 12618 }
a2fbb9ea
ET
12619
12620 return 0;
12621}
12622
c18487ee 12623/* called with rtnl_lock */
01cd4528
EG
12624static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12625 int devad, u16 addr)
a2fbb9ea 12626{
01cd4528
EG
12627 struct bnx2x *bp = netdev_priv(netdev);
12628 u16 value;
12629 int rc;
12630 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 12631
01cd4528
EG
12632 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12633 prtad, devad, addr);
a2fbb9ea 12634
01cd4528
EG
12635 if (prtad != bp->mdio.prtad) {
12636 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12637 prtad, bp->mdio.prtad);
12638 return -EINVAL;
12639 }
12640
12641 /* The HW expects different devad if CL22 is used */
12642 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 12643
01cd4528
EG
12644 bnx2x_acquire_phy_lock(bp);
12645 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12646 devad, addr, &value);
12647 bnx2x_release_phy_lock(bp);
12648 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 12649
01cd4528
EG
12650 if (!rc)
12651 rc = value;
12652 return rc;
12653}
a2fbb9ea 12654
01cd4528
EG
12655/* called with rtnl_lock */
12656static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12657 u16 addr, u16 value)
12658{
12659 struct bnx2x *bp = netdev_priv(netdev);
12660 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12661 int rc;
12662
12663 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12664 " value 0x%x\n", prtad, devad, addr, value);
12665
12666 if (prtad != bp->mdio.prtad) {
12667 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12668 prtad, bp->mdio.prtad);
12669 return -EINVAL;
a2fbb9ea
ET
12670 }
12671
01cd4528
EG
12672 /* The HW expects different devad if CL22 is used */
12673 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 12674
01cd4528
EG
12675 bnx2x_acquire_phy_lock(bp);
12676 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12677 devad, addr, value);
12678 bnx2x_release_phy_lock(bp);
12679 return rc;
12680}
c18487ee 12681
01cd4528
EG
12682/* called with rtnl_lock */
12683static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12684{
12685 struct bnx2x *bp = netdev_priv(dev);
12686 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 12687
01cd4528
EG
12688 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12689 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 12690
01cd4528
EG
12691 if (!netif_running(dev))
12692 return -EAGAIN;
12693
12694 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
12695}
12696
34f80b04 12697/* called with rtnl_lock */
a2fbb9ea
ET
12698static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12699{
12700 struct bnx2x *bp = netdev_priv(dev);
34f80b04 12701 int rc = 0;
a2fbb9ea 12702
72fd0718
VZ
12703 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12704 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12705 return -EAGAIN;
12706 }
12707
a2fbb9ea
ET
12708 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12709 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12710 return -EINVAL;
12711
12712 /* This does not race with packet allocation
c14423fe 12713 * because the actual alloc size is
a2fbb9ea
ET
12714 * only updated as part of load
12715 */
12716 dev->mtu = new_mtu;
12717
12718 if (netif_running(dev)) {
34f80b04
EG
12719 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12720 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 12721 }
34f80b04
EG
12722
12723 return rc;
a2fbb9ea
ET
12724}
12725
12726static void bnx2x_tx_timeout(struct net_device *dev)
12727{
12728 struct bnx2x *bp = netdev_priv(dev);
12729
12730#ifdef BNX2X_STOP_ON_ERROR
12731 if (!bp->panic)
12732 bnx2x_panic();
12733#endif
12734 /* This allows the netif to be shutdown gracefully before resetting */
72fd0718 12735 schedule_delayed_work(&bp->reset_task, 0);
a2fbb9ea
ET
12736}
12737
12738#ifdef BCM_VLAN
34f80b04 12739/* called with rtnl_lock */
a2fbb9ea
ET
12740static void bnx2x_vlan_rx_register(struct net_device *dev,
12741 struct vlan_group *vlgrp)
12742{
12743 struct bnx2x *bp = netdev_priv(dev);
12744
12745 bp->vlgrp = vlgrp;
0c6671b0
EG
12746
12747 /* Set flags according to the required capabilities */
12748 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12749
12750 if (dev->features & NETIF_F_HW_VLAN_TX)
12751 bp->flags |= HW_VLAN_TX_FLAG;
12752
12753 if (dev->features & NETIF_F_HW_VLAN_RX)
12754 bp->flags |= HW_VLAN_RX_FLAG;
12755
a2fbb9ea 12756 if (netif_running(dev))
49d66772 12757 bnx2x_set_client_config(bp);
a2fbb9ea 12758}
34f80b04 12759
a2fbb9ea
ET
12760#endif
12761
257ddbda 12762#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
12763static void poll_bnx2x(struct net_device *dev)
12764{
12765 struct bnx2x *bp = netdev_priv(dev);
12766
12767 disable_irq(bp->pdev->irq);
12768 bnx2x_interrupt(bp->pdev->irq, dev);
12769 enable_irq(bp->pdev->irq);
12770}
12771#endif
12772
c64213cd
SH
12773static const struct net_device_ops bnx2x_netdev_ops = {
12774 .ndo_open = bnx2x_open,
12775 .ndo_stop = bnx2x_close,
12776 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 12777 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
12778 .ndo_set_mac_address = bnx2x_change_mac_addr,
12779 .ndo_validate_addr = eth_validate_addr,
12780 .ndo_do_ioctl = bnx2x_ioctl,
12781 .ndo_change_mtu = bnx2x_change_mtu,
12782 .ndo_tx_timeout = bnx2x_tx_timeout,
12783#ifdef BCM_VLAN
12784 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12785#endif
257ddbda 12786#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
12787 .ndo_poll_controller = poll_bnx2x,
12788#endif
12789};
12790
34f80b04
EG
12791static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12792 struct net_device *dev)
a2fbb9ea
ET
12793{
12794 struct bnx2x *bp;
12795 int rc;
12796
12797 SET_NETDEV_DEV(dev, &pdev->dev);
12798 bp = netdev_priv(dev);
12799
34f80b04
EG
12800 bp->dev = dev;
12801 bp->pdev = pdev;
a2fbb9ea 12802 bp->flags = 0;
34f80b04 12803 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
12804
12805 rc = pci_enable_device(pdev);
12806 if (rc) {
7995c64e 12807 pr_err("Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
12808 goto err_out;
12809 }
12810
12811 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7995c64e 12812 pr_err("Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
12813 rc = -ENODEV;
12814 goto err_out_disable;
12815 }
12816
12817 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7995c64e 12818 pr_err("Cannot find second PCI device base address, aborting\n");
a2fbb9ea
ET
12819 rc = -ENODEV;
12820 goto err_out_disable;
12821 }
12822
34f80b04
EG
12823 if (atomic_read(&pdev->enable_cnt) == 1) {
12824 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12825 if (rc) {
7995c64e 12826 pr_err("Cannot obtain PCI resources, aborting\n");
34f80b04
EG
12827 goto err_out_disable;
12828 }
a2fbb9ea 12829
34f80b04
EG
12830 pci_set_master(pdev);
12831 pci_save_state(pdev);
12832 }
a2fbb9ea
ET
12833
12834 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12835 if (bp->pm_cap == 0) {
7995c64e 12836 pr_err("Cannot find power management capability, aborting\n");
a2fbb9ea
ET
12837 rc = -EIO;
12838 goto err_out_release;
12839 }
12840
12841 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12842 if (bp->pcie_cap == 0) {
7995c64e 12843 pr_err("Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
12844 rc = -EIO;
12845 goto err_out_release;
12846 }
12847
1a983142 12848 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 12849 bp->flags |= USING_DAC_FLAG;
1a983142
FT
12850 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12851 pr_err("dma_set_coherent_mask failed, aborting\n");
a2fbb9ea
ET
12852 rc = -EIO;
12853 goto err_out_release;
12854 }
12855
1a983142 12856 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7995c64e 12857 pr_err("System does not support DMA, aborting\n");
a2fbb9ea
ET
12858 rc = -EIO;
12859 goto err_out_release;
12860 }
12861
34f80b04
EG
12862 dev->mem_start = pci_resource_start(pdev, 0);
12863 dev->base_addr = dev->mem_start;
12864 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
12865
12866 dev->irq = pdev->irq;
12867
275f165f 12868 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 12869 if (!bp->regview) {
7995c64e 12870 pr_err("Cannot map register space, aborting\n");
a2fbb9ea
ET
12871 rc = -ENOMEM;
12872 goto err_out_release;
12873 }
12874
34f80b04
EG
12875 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12876 min_t(u64, BNX2X_DB_SIZE,
12877 pci_resource_len(pdev, 2)));
a2fbb9ea 12878 if (!bp->doorbells) {
7995c64e 12879 pr_err("Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
12880 rc = -ENOMEM;
12881 goto err_out_unmap;
12882 }
12883
12884 bnx2x_set_power_state(bp, PCI_D0);
12885
34f80b04
EG
12886 /* clean indirect addresses */
12887 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12888 PCICFG_VENDOR_ID_OFFSET);
12889 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12890 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12891 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12892 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 12893
72fd0718
VZ
12894 /* Reset the load counter */
12895 bnx2x_clear_load_cnt(bp);
12896
34f80b04 12897 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 12898
c64213cd 12899 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 12900 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
12901 dev->features |= NETIF_F_SG;
12902 dev->features |= NETIF_F_HW_CSUM;
12903 if (bp->flags & USING_DAC_FLAG)
12904 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
12905 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12906 dev->features |= NETIF_F_TSO6;
34f80b04
EG
12907#ifdef BCM_VLAN
12908 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 12909 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
12910
12911 dev->vlan_features |= NETIF_F_SG;
12912 dev->vlan_features |= NETIF_F_HW_CSUM;
12913 if (bp->flags & USING_DAC_FLAG)
12914 dev->vlan_features |= NETIF_F_HIGHDMA;
12915 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12916 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 12917#endif
a2fbb9ea 12918
01cd4528
EG
12919 /* get_port_hwinfo() will set prtad and mmds properly */
12920 bp->mdio.prtad = MDIO_PRTAD_NONE;
12921 bp->mdio.mmds = 0;
12922 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12923 bp->mdio.dev = dev;
12924 bp->mdio.mdio_read = bnx2x_mdio_read;
12925 bp->mdio.mdio_write = bnx2x_mdio_write;
12926
a2fbb9ea
ET
12927 return 0;
12928
12929err_out_unmap:
12930 if (bp->regview) {
12931 iounmap(bp->regview);
12932 bp->regview = NULL;
12933 }
a2fbb9ea
ET
12934 if (bp->doorbells) {
12935 iounmap(bp->doorbells);
12936 bp->doorbells = NULL;
12937 }
12938
12939err_out_release:
34f80b04
EG
12940 if (atomic_read(&pdev->enable_cnt) == 1)
12941 pci_release_regions(pdev);
a2fbb9ea
ET
12942
12943err_out_disable:
12944 pci_disable_device(pdev);
12945 pci_set_drvdata(pdev, NULL);
12946
12947err_out:
12948 return rc;
12949}
12950
37f9ce62
EG
12951static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
12952 int *width, int *speed)
25047950
ET
12953{
12954 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
12955
37f9ce62 12956 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 12957
37f9ce62
EG
12958 /* return value of 1=2.5GHz 2=5GHz */
12959 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 12960}
37f9ce62 12961
94a78b79
VZ
12962static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12963{
37f9ce62 12964 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
12965 struct bnx2x_fw_file_hdr *fw_hdr;
12966 struct bnx2x_fw_file_section *sections;
94a78b79 12967 u32 offset, len, num_ops;
37f9ce62 12968 u16 *ops_offsets;
94a78b79 12969 int i;
37f9ce62 12970 const u8 *fw_ver;
94a78b79
VZ
12971
12972 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12973 return -EINVAL;
12974
12975 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12976 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12977
12978 /* Make sure none of the offsets and sizes make us read beyond
12979 * the end of the firmware data */
12980 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12981 offset = be32_to_cpu(sections[i].offset);
12982 len = be32_to_cpu(sections[i].len);
12983 if (offset + len > firmware->size) {
7995c64e 12984 pr_err("Section %d length is out of bounds\n", i);
94a78b79
VZ
12985 return -EINVAL;
12986 }
12987 }
12988
12989 /* Likewise for the init_ops offsets */
12990 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12991 ops_offsets = (u16 *)(firmware->data + offset);
12992 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12993
12994 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12995 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7995c64e 12996 pr_err("Section offset %d is out of bounds\n", i);
94a78b79
VZ
12997 return -EINVAL;
12998 }
12999 }
13000
13001 /* Check FW version */
13002 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13003 fw_ver = firmware->data + offset;
13004 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13005 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13006 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13007 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7995c64e 13008 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
13009 fw_ver[0], fw_ver[1], fw_ver[2],
13010 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13011 BCM_5710_FW_MINOR_VERSION,
13012 BCM_5710_FW_REVISION_VERSION,
13013 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 13014 return -EINVAL;
94a78b79
VZ
13015 }
13016
13017 return 0;
13018}
13019
ab6ad5a4 13020static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13021{
ab6ad5a4
EG
13022 const __be32 *source = (const __be32 *)_source;
13023 u32 *target = (u32 *)_target;
94a78b79 13024 u32 i;
94a78b79
VZ
13025
13026 for (i = 0; i < n/4; i++)
13027 target[i] = be32_to_cpu(source[i]);
13028}
13029
13030/*
13031 Ops array is stored in the following format:
13032 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13033 */
ab6ad5a4 13034static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 13035{
ab6ad5a4
EG
13036 const __be32 *source = (const __be32 *)_source;
13037 struct raw_op *target = (struct raw_op *)_target;
94a78b79 13038 u32 i, j, tmp;
94a78b79 13039
ab6ad5a4 13040 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
13041 tmp = be32_to_cpu(source[j]);
13042 target[i].op = (tmp >> 24) & 0xff;
13043 target[i].offset = tmp & 0xffffff;
13044 target[i].raw_data = be32_to_cpu(source[j+1]);
13045 }
13046}
ab6ad5a4
EG
13047
13048static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13049{
ab6ad5a4
EG
13050 const __be16 *source = (const __be16 *)_source;
13051 u16 *target = (u16 *)_target;
94a78b79 13052 u32 i;
94a78b79
VZ
13053
13054 for (i = 0; i < n/2; i++)
13055 target[i] = be16_to_cpu(source[i]);
13056}
13057
7995c64e
JP
13058#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13059do { \
13060 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13061 bp->arr = kmalloc(len, GFP_KERNEL); \
13062 if (!bp->arr) { \
13063 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13064 goto lbl; \
13065 } \
13066 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13067 (u8 *)bp->arr, len); \
13068} while (0)
94a78b79 13069
94a78b79
VZ
13070static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13071{
45229b42 13072 const char *fw_file_name;
94a78b79 13073 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 13074 int rc;
94a78b79 13075
94a78b79 13076 if (CHIP_IS_E1(bp))
45229b42 13077 fw_file_name = FW_FILE_NAME_E1;
94a78b79 13078 else
45229b42 13079 fw_file_name = FW_FILE_NAME_E1H;
94a78b79 13080
7995c64e 13081 pr_info("Loading %s\n", fw_file_name);
94a78b79
VZ
13082
13083 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13084 if (rc) {
7995c64e 13085 pr_err("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
13086 goto request_firmware_exit;
13087 }
13088
13089 rc = bnx2x_check_firmware(bp);
13090 if (rc) {
7995c64e 13091 pr_err("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
13092 goto request_firmware_exit;
13093 }
13094
13095 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13096
13097 /* Initialize the pointers to the init arrays */
13098 /* Blob */
13099 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13100
13101 /* Opcodes */
13102 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13103
13104 /* Offsets */
ab6ad5a4
EG
13105 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13106 be16_to_cpu_n);
94a78b79
VZ
13107
13108 /* STORMs firmware */
573f2035
EG
13109 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13110 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13111 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13112 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13113 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13114 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13115 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13116 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13117 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13118 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13119 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13120 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13121 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13122 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13123 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13124 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
13125
13126 return 0;
ab6ad5a4 13127
94a78b79
VZ
13128init_offsets_alloc_err:
13129 kfree(bp->init_ops);
13130init_ops_alloc_err:
13131 kfree(bp->init_data);
13132request_firmware_exit:
13133 release_firmware(bp->firmware);
13134
13135 return rc;
13136}
13137
13138
a2fbb9ea
ET
13139static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13140 const struct pci_device_id *ent)
13141{
a2fbb9ea
ET
13142 struct net_device *dev = NULL;
13143 struct bnx2x *bp;
37f9ce62 13144 int pcie_width, pcie_speed;
25047950 13145 int rc;
a2fbb9ea 13146
a2fbb9ea 13147 /* dev zeroed in init_etherdev */
555f6c78 13148 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 13149 if (!dev) {
7995c64e 13150 pr_err("Cannot allocate net device\n");
a2fbb9ea 13151 return -ENOMEM;
34f80b04 13152 }
a2fbb9ea 13153
a2fbb9ea 13154 bp = netdev_priv(dev);
7995c64e 13155 bp->msg_enable = debug;
a2fbb9ea 13156
df4770de
EG
13157 pci_set_drvdata(pdev, dev);
13158
34f80b04 13159 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
13160 if (rc < 0) {
13161 free_netdev(dev);
13162 return rc;
13163 }
13164
34f80b04 13165 rc = bnx2x_init_bp(bp);
693fc0d1
EG
13166 if (rc)
13167 goto init_one_exit;
13168
94a78b79
VZ
13169 /* Set init arrays */
13170 rc = bnx2x_init_firmware(bp, &pdev->dev);
13171 if (rc) {
7995c64e 13172 pr_err("Error loading firmware\n");
94a78b79
VZ
13173 goto init_one_exit;
13174 }
13175
693fc0d1 13176 rc = register_netdev(dev);
34f80b04 13177 if (rc) {
693fc0d1 13178 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
13179 goto init_one_exit;
13180 }
13181
37f9ce62 13182 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7995c64e
JP
13183 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
13184 board_info[ent->driver_data].name,
13185 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13186 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13187 dev->base_addr, bp->pdev->irq, dev->dev_addr);
c016201c 13188
a2fbb9ea 13189 return 0;
34f80b04
EG
13190
13191init_one_exit:
13192 if (bp->regview)
13193 iounmap(bp->regview);
13194
13195 if (bp->doorbells)
13196 iounmap(bp->doorbells);
13197
13198 free_netdev(dev);
13199
13200 if (atomic_read(&pdev->enable_cnt) == 1)
13201 pci_release_regions(pdev);
13202
13203 pci_disable_device(pdev);
13204 pci_set_drvdata(pdev, NULL);
13205
13206 return rc;
a2fbb9ea
ET
13207}
13208
13209static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13210{
13211 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13212 struct bnx2x *bp;
13213
13214 if (!dev) {
7995c64e 13215 pr_err("BAD net device from bnx2x_init_one\n");
228241eb
ET
13216 return;
13217 }
228241eb 13218 bp = netdev_priv(dev);
a2fbb9ea 13219
a2fbb9ea
ET
13220 unregister_netdev(dev);
13221
72fd0718
VZ
13222 /* Make sure RESET task is not scheduled before continuing */
13223 cancel_delayed_work_sync(&bp->reset_task);
13224
94a78b79
VZ
13225 kfree(bp->init_ops_offsets);
13226 kfree(bp->init_ops);
13227 kfree(bp->init_data);
13228 release_firmware(bp->firmware);
13229
a2fbb9ea
ET
13230 if (bp->regview)
13231 iounmap(bp->regview);
13232
13233 if (bp->doorbells)
13234 iounmap(bp->doorbells);
13235
13236 free_netdev(dev);
34f80b04
EG
13237
13238 if (atomic_read(&pdev->enable_cnt) == 1)
13239 pci_release_regions(pdev);
13240
a2fbb9ea
ET
13241 pci_disable_device(pdev);
13242 pci_set_drvdata(pdev, NULL);
13243}
13244
13245static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13246{
13247 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13248 struct bnx2x *bp;
13249
34f80b04 13250 if (!dev) {
7995c64e 13251 pr_err("BAD net device from bnx2x_init_one\n");
34f80b04
EG
13252 return -ENODEV;
13253 }
13254 bp = netdev_priv(dev);
a2fbb9ea 13255
34f80b04 13256 rtnl_lock();
a2fbb9ea 13257
34f80b04 13258 pci_save_state(pdev);
228241eb 13259
34f80b04
EG
13260 if (!netif_running(dev)) {
13261 rtnl_unlock();
13262 return 0;
13263 }
a2fbb9ea
ET
13264
13265 netif_device_detach(dev);
a2fbb9ea 13266
da5a662a 13267 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 13268
a2fbb9ea 13269 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 13270
34f80b04
EG
13271 rtnl_unlock();
13272
a2fbb9ea
ET
13273 return 0;
13274}
13275
13276static int bnx2x_resume(struct pci_dev *pdev)
13277{
13278 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 13279 struct bnx2x *bp;
a2fbb9ea
ET
13280 int rc;
13281
228241eb 13282 if (!dev) {
7995c64e 13283 pr_err("BAD net device from bnx2x_init_one\n");
228241eb
ET
13284 return -ENODEV;
13285 }
228241eb 13286 bp = netdev_priv(dev);
a2fbb9ea 13287
72fd0718
VZ
13288 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13289 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13290 return -EAGAIN;
13291 }
13292
34f80b04
EG
13293 rtnl_lock();
13294
228241eb 13295 pci_restore_state(pdev);
34f80b04
EG
13296
13297 if (!netif_running(dev)) {
13298 rtnl_unlock();
13299 return 0;
13300 }
13301
a2fbb9ea
ET
13302 bnx2x_set_power_state(bp, PCI_D0);
13303 netif_device_attach(dev);
13304
da5a662a 13305 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 13306
34f80b04
EG
13307 rtnl_unlock();
13308
13309 return rc;
a2fbb9ea
ET
13310}
13311
f8ef6e44
YG
13312static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13313{
13314 int i;
13315
13316 bp->state = BNX2X_STATE_ERROR;
13317
13318 bp->rx_mode = BNX2X_RX_MODE_NONE;
13319
13320 bnx2x_netif_stop(bp, 0);
13321
13322 del_timer_sync(&bp->timer);
13323 bp->stats_state = STATS_STATE_DISABLED;
13324 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13325
13326 /* Release IRQs */
6cbe5065 13327 bnx2x_free_irq(bp, false);
f8ef6e44
YG
13328
13329 if (CHIP_IS_E1(bp)) {
13330 struct mac_configuration_cmd *config =
13331 bnx2x_sp(bp, mcast_config);
13332
8d9c5f34 13333 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
13334 CAM_INVALIDATE(config->config_table[i]);
13335 }
13336
13337 /* Free SKBs, SGEs, TPA pool and driver internals */
13338 bnx2x_free_skbs(bp);
54b9ddaa 13339 for_each_queue(bp, i)
f8ef6e44 13340 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 13341 for_each_queue(bp, i)
7cde1c8b 13342 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
13343 bnx2x_free_mem(bp);
13344
13345 bp->state = BNX2X_STATE_CLOSED;
13346
13347 netif_carrier_off(bp->dev);
13348
13349 return 0;
13350}
13351
13352static void bnx2x_eeh_recover(struct bnx2x *bp)
13353{
13354 u32 val;
13355
13356 mutex_init(&bp->port.phy_mutex);
13357
13358 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13359 bp->link_params.shmem_base = bp->common.shmem_base;
13360 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13361
13362 if (!bp->common.shmem_base ||
13363 (bp->common.shmem_base < 0xA0000) ||
13364 (bp->common.shmem_base >= 0xC0000)) {
13365 BNX2X_DEV_INFO("MCP not active\n");
13366 bp->flags |= NO_MCP_FLAG;
13367 return;
13368 }
13369
13370 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13371 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13372 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13373 BNX2X_ERR("BAD MCP validity signature\n");
13374
13375 if (!BP_NOMCP(bp)) {
13376 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13377 & DRV_MSG_SEQ_NUMBER_MASK);
13378 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13379 }
13380}
13381
493adb1f
WX
13382/**
13383 * bnx2x_io_error_detected - called when PCI error is detected
13384 * @pdev: Pointer to PCI device
13385 * @state: The current pci connection state
13386 *
13387 * This function is called after a PCI bus error affecting
13388 * this device has been detected.
13389 */
13390static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13391 pci_channel_state_t state)
13392{
13393 struct net_device *dev = pci_get_drvdata(pdev);
13394 struct bnx2x *bp = netdev_priv(dev);
13395
13396 rtnl_lock();
13397
13398 netif_device_detach(dev);
13399
07ce50e4
DN
13400 if (state == pci_channel_io_perm_failure) {
13401 rtnl_unlock();
13402 return PCI_ERS_RESULT_DISCONNECT;
13403 }
13404
493adb1f 13405 if (netif_running(dev))
f8ef6e44 13406 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
13407
13408 pci_disable_device(pdev);
13409
13410 rtnl_unlock();
13411
13412 /* Request a slot reset */
13413 return PCI_ERS_RESULT_NEED_RESET;
13414}
13415
13416/**
13417 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13418 * @pdev: Pointer to PCI device
13419 *
13420 * Restart the card from scratch, as if from a cold-boot.
13421 */
13422static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13423{
13424 struct net_device *dev = pci_get_drvdata(pdev);
13425 struct bnx2x *bp = netdev_priv(dev);
13426
13427 rtnl_lock();
13428
13429 if (pci_enable_device(pdev)) {
13430 dev_err(&pdev->dev,
13431 "Cannot re-enable PCI device after reset\n");
13432 rtnl_unlock();
13433 return PCI_ERS_RESULT_DISCONNECT;
13434 }
13435
13436 pci_set_master(pdev);
13437 pci_restore_state(pdev);
13438
13439 if (netif_running(dev))
13440 bnx2x_set_power_state(bp, PCI_D0);
13441
13442 rtnl_unlock();
13443
13444 return PCI_ERS_RESULT_RECOVERED;
13445}
13446
13447/**
13448 * bnx2x_io_resume - called when traffic can start flowing again
13449 * @pdev: Pointer to PCI device
13450 *
13451 * This callback is called when the error recovery driver tells us that
13452 * its OK to resume normal operation.
13453 */
13454static void bnx2x_io_resume(struct pci_dev *pdev)
13455{
13456 struct net_device *dev = pci_get_drvdata(pdev);
13457 struct bnx2x *bp = netdev_priv(dev);
13458
72fd0718
VZ
13459 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13460 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13461 return;
13462 }
13463
493adb1f
WX
13464 rtnl_lock();
13465
f8ef6e44
YG
13466 bnx2x_eeh_recover(bp);
13467
493adb1f 13468 if (netif_running(dev))
f8ef6e44 13469 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
13470
13471 netif_device_attach(dev);
13472
13473 rtnl_unlock();
13474}
13475
13476static struct pci_error_handlers bnx2x_err_handler = {
13477 .error_detected = bnx2x_io_error_detected,
356e2385
EG
13478 .slot_reset = bnx2x_io_slot_reset,
13479 .resume = bnx2x_io_resume,
493adb1f
WX
13480};
13481
a2fbb9ea 13482static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
13483 .name = DRV_MODULE_NAME,
13484 .id_table = bnx2x_pci_tbl,
13485 .probe = bnx2x_init_one,
13486 .remove = __devexit_p(bnx2x_remove_one),
13487 .suspend = bnx2x_suspend,
13488 .resume = bnx2x_resume,
13489 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
13490};
13491
13492static int __init bnx2x_init(void)
13493{
dd21ca6d
SG
13494 int ret;
13495
7995c64e 13496 pr_info("%s", version);
938cf541 13497
1cf167f2
EG
13498 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13499 if (bnx2x_wq == NULL) {
7995c64e 13500 pr_err("Cannot create workqueue\n");
1cf167f2
EG
13501 return -ENOMEM;
13502 }
13503
dd21ca6d
SG
13504 ret = pci_register_driver(&bnx2x_pci_driver);
13505 if (ret) {
7995c64e 13506 pr_err("Cannot register driver\n");
dd21ca6d
SG
13507 destroy_workqueue(bnx2x_wq);
13508 }
13509 return ret;
a2fbb9ea
ET
13510}
13511
13512static void __exit bnx2x_cleanup(void)
13513{
13514 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
13515
13516 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
13517}
13518
13519module_init(bnx2x_init);
13520module_exit(bnx2x_cleanup);
13521
993ac7b5
MC
13522#ifdef BCM_CNIC
13523
13524/* count denotes the number of new completions we have seen */
13525static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13526{
13527 struct eth_spe *spe;
13528
13529#ifdef BNX2X_STOP_ON_ERROR
13530 if (unlikely(bp->panic))
13531 return;
13532#endif
13533
13534 spin_lock_bh(&bp->spq_lock);
13535 bp->cnic_spq_pending -= count;
13536
13537 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13538 bp->cnic_spq_pending++) {
13539
13540 if (!bp->cnic_kwq_pending)
13541 break;
13542
13543 spe = bnx2x_sp_get_next(bp);
13544 *spe = *bp->cnic_kwq_cons;
13545
13546 bp->cnic_kwq_pending--;
13547
13548 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13549 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13550
13551 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13552 bp->cnic_kwq_cons = bp->cnic_kwq;
13553 else
13554 bp->cnic_kwq_cons++;
13555 }
13556 bnx2x_sp_prod_update(bp);
13557 spin_unlock_bh(&bp->spq_lock);
13558}
13559
13560static int bnx2x_cnic_sp_queue(struct net_device *dev,
13561 struct kwqe_16 *kwqes[], u32 count)
13562{
13563 struct bnx2x *bp = netdev_priv(dev);
13564 int i;
13565
13566#ifdef BNX2X_STOP_ON_ERROR
13567 if (unlikely(bp->panic))
13568 return -EIO;
13569#endif
13570
13571 spin_lock_bh(&bp->spq_lock);
13572
13573 for (i = 0; i < count; i++) {
13574 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13575
13576 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13577 break;
13578
13579 *bp->cnic_kwq_prod = *spe;
13580
13581 bp->cnic_kwq_pending++;
13582
13583 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13584 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13585 spe->data.mac_config_addr.hi,
13586 spe->data.mac_config_addr.lo,
13587 bp->cnic_kwq_pending);
13588
13589 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13590 bp->cnic_kwq_prod = bp->cnic_kwq;
13591 else
13592 bp->cnic_kwq_prod++;
13593 }
13594
13595 spin_unlock_bh(&bp->spq_lock);
13596
13597 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13598 bnx2x_cnic_sp_post(bp, 0);
13599
13600 return i;
13601}
13602
13603static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13604{
13605 struct cnic_ops *c_ops;
13606 int rc = 0;
13607
13608 mutex_lock(&bp->cnic_mutex);
13609 c_ops = bp->cnic_ops;
13610 if (c_ops)
13611 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13612 mutex_unlock(&bp->cnic_mutex);
13613
13614 return rc;
13615}
13616
13617static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13618{
13619 struct cnic_ops *c_ops;
13620 int rc = 0;
13621
13622 rcu_read_lock();
13623 c_ops = rcu_dereference(bp->cnic_ops);
13624 if (c_ops)
13625 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13626 rcu_read_unlock();
13627
13628 return rc;
13629}
13630
13631/*
13632 * for commands that have no data
13633 */
13634static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13635{
13636 struct cnic_ctl_info ctl = {0};
13637
13638 ctl.cmd = cmd;
13639
13640 return bnx2x_cnic_ctl_send(bp, &ctl);
13641}
13642
13643static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13644{
13645 struct cnic_ctl_info ctl;
13646
13647 /* first we tell CNIC and only then we count this as a completion */
13648 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13649 ctl.data.comp.cid = cid;
13650
13651 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13652 bnx2x_cnic_sp_post(bp, 1);
13653}
13654
13655static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13656{
13657 struct bnx2x *bp = netdev_priv(dev);
13658 int rc = 0;
13659
13660 switch (ctl->cmd) {
13661 case DRV_CTL_CTXTBL_WR_CMD: {
13662 u32 index = ctl->data.io.offset;
13663 dma_addr_t addr = ctl->data.io.dma_addr;
13664
13665 bnx2x_ilt_wr(bp, index, addr);
13666 break;
13667 }
13668
13669 case DRV_CTL_COMPLETION_CMD: {
13670 int count = ctl->data.comp.comp_count;
13671
13672 bnx2x_cnic_sp_post(bp, count);
13673 break;
13674 }
13675
13676 /* rtnl_lock is held. */
13677 case DRV_CTL_START_L2_CMD: {
13678 u32 cli = ctl->data.ring.client_id;
13679
13680 bp->rx_mode_cl_mask |= (1 << cli);
13681 bnx2x_set_storm_rx_mode(bp);
13682 break;
13683 }
13684
13685 /* rtnl_lock is held. */
13686 case DRV_CTL_STOP_L2_CMD: {
13687 u32 cli = ctl->data.ring.client_id;
13688
13689 bp->rx_mode_cl_mask &= ~(1 << cli);
13690 bnx2x_set_storm_rx_mode(bp);
13691 break;
13692 }
13693
13694 default:
13695 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13696 rc = -EINVAL;
13697 }
13698
13699 return rc;
13700}
13701
13702static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13703{
13704 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13705
13706 if (bp->flags & USING_MSIX_FLAG) {
13707 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13708 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13709 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13710 } else {
13711 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13712 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13713 }
13714 cp->irq_arr[0].status_blk = bp->cnic_sb;
13715 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13716 cp->irq_arr[1].status_blk = bp->def_status_blk;
13717 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13718
13719 cp->num_irq = 2;
13720}
13721
13722static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13723 void *data)
13724{
13725 struct bnx2x *bp = netdev_priv(dev);
13726 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13727
13728 if (ops == NULL)
13729 return -EINVAL;
13730
13731 if (atomic_read(&bp->intr_sem) != 0)
13732 return -EBUSY;
13733
13734 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13735 if (!bp->cnic_kwq)
13736 return -ENOMEM;
13737
13738 bp->cnic_kwq_cons = bp->cnic_kwq;
13739 bp->cnic_kwq_prod = bp->cnic_kwq;
13740 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13741
13742 bp->cnic_spq_pending = 0;
13743 bp->cnic_kwq_pending = 0;
13744
13745 bp->cnic_data = data;
13746
13747 cp->num_irq = 0;
13748 cp->drv_state = CNIC_DRV_STATE_REGD;
13749
13750 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13751
13752 bnx2x_setup_cnic_irq_info(bp);
13753 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13754 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13755 rcu_assign_pointer(bp->cnic_ops, ops);
13756
13757 return 0;
13758}
13759
13760static int bnx2x_unregister_cnic(struct net_device *dev)
13761{
13762 struct bnx2x *bp = netdev_priv(dev);
13763 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13764
13765 mutex_lock(&bp->cnic_mutex);
13766 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13767 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13768 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13769 }
13770 cp->drv_state = 0;
13771 rcu_assign_pointer(bp->cnic_ops, NULL);
13772 mutex_unlock(&bp->cnic_mutex);
13773 synchronize_rcu();
13774 kfree(bp->cnic_kwq);
13775 bp->cnic_kwq = NULL;
13776
13777 return 0;
13778}
13779
13780struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13781{
13782 struct bnx2x *bp = netdev_priv(dev);
13783 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13784
13785 cp->drv_owner = THIS_MODULE;
13786 cp->chip_id = CHIP_ID(bp);
13787 cp->pdev = bp->pdev;
13788 cp->io_base = bp->regview;
13789 cp->io_base2 = bp->doorbells;
13790 cp->max_kwqe_pending = 8;
13791 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13792 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13793 cp->ctx_tbl_len = CNIC_ILT_LINES;
13794 cp->starting_cid = BCM_CNIC_CID_START;
13795 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13796 cp->drv_ctl = bnx2x_drv_ctl;
13797 cp->drv_register_cnic = bnx2x_register_cnic;
13798 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13799
13800 return cp;
13801}
13802EXPORT_SYMBOL(bnx2x_cnic_probe);
13803
13804#endif /* BCM_CNIC */
94a78b79 13805