]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/bnx2x_main.c
bnx2x: Using system page size for SGE
[mirror_ubuntu-eoan-kernel.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04 47#include <net/ip6_checksum.h>
a2fbb9ea
ET
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
34f80b04 50#include <linux/crc32c.h>
a2fbb9ea
ET
51#include <linux/prefetch.h>
52#include <linux/zlib.h>
a2fbb9ea
ET
53#include <linux/io.h>
54
55#include "bnx2x_reg.h"
56#include "bnx2x_fw_defs.h"
57#include "bnx2x_hsi.h"
c18487ee 58#include "bnx2x_link.h"
a2fbb9ea
ET
59#include "bnx2x.h"
60#include "bnx2x_init.h"
61
ca8eac55
EG
62#define DRV_MODULE_VERSION "1.45.23"
63#define DRV_MODULE_RELDATE "2008/11/03"
34f80b04 64#define BNX2X_BC_VER 0x040200
a2fbb9ea 65
34f80b04
EG
66/* Time in jiffies before concluding the transmitter is hung */
67#define TX_TIMEOUT (5*HZ)
a2fbb9ea 68
53a10565 69static char version[] __devinitdata =
34f80b04 70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
24e3fcef 73MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
74MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75MODULE_LICENSE("GPL");
76MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 77
19680c48 78static int disable_tpa;
a2fbb9ea
ET
79static int use_inta;
80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
83static int use_multi;
84
19680c48 85module_param(disable_tpa, int, 0);
a2fbb9ea
ET
86module_param(use_inta, int, 0);
87module_param(poll, int, 0);
a2fbb9ea 88module_param(debug, int, 0);
19680c48 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 92MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
93
94#ifdef BNX2X_MULTI
95module_param(use_multi, int, 0);
96MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97#endif
1cf167f2 98static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
ad8d3948 238 if (!cnt) {
a2fbb9ea
ET
239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
ad8d3948 242 cnt--;
12469401
YG
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
a2fbb9ea 248 }
ad8d3948
EG
249
250 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
251}
252
c18487ee 253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 254{
ad8d3948 255 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
34f80b04
EG
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 292 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 293
ad8d3948 294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
301
302 *wb_comp = 0;
303
34f80b04 304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
305
306 udelay(5);
ad8d3948
EG
307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
ad8d3948 310 if (!cnt) {
a2fbb9ea
ET
311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
ad8d3948 314 cnt--;
12469401
YG
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
a2fbb9ea 320 }
ad8d3948 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
324
325 mutex_unlock(&bp->dmae_mutex);
326}
327
328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 336}
a2fbb9ea 337
ad8d3948
EG
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
a2fbb9ea
ET
349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
a2fbb9ea 351 char last_idx;
34f80b04
EG
352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
354
355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
380 }
381 }
382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
a2fbb9ea
ET
464 }
465 }
34f80b04 466
a2fbb9ea
ET
467 return rc;
468}
c14423fe 469
a2fbb9ea
ET
470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
473 u32 data[9];
474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
49d66772 485 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
49d66772 492 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
66e855f3
YG
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
a2fbb9ea
ET
505 BNX2X_ERR("begin crash dump -----------------\n");
506
507 for_each_queue(bp, i) {
508 struct bnx2x_fastpath *fp = &bp->fp[i];
509 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
518 fp->rx_bd_prod, fp->rx_bd_cons,
519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
523 " *sb_u_idx(%x) bd data(%x,%x)\n",
524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525 fp->status_blk->c_status_block.status_block_index,
526 fp->fp_u_idx,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
529
530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532 for (j = start; j < end; j++) {
533 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536 sw_bd->skb, sw_bd->first_bd);
537 }
538
539 start = TX_BD(fp->tx_bd_cons - 10);
540 end = TX_BD(fp->tx_bd_cons + 254);
541 for (j = start; j < end; j++) {
542 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 }
547
548 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550 for (j = start; j < end; j++) {
551 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
556 }
557
3196a88a
EG
558 start = RX_SGE(fp->rx_sge_prod);
559 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
560 for (j = start; j < end; j++) {
561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
565 j, rx_sge[1], rx_sge[0], sw_page->page);
566 }
567
a2fbb9ea
ET
568 start = RCQ_BD(fp->rx_comp_cons - 10);
569 end = RCQ_BD(fp->rx_comp_cons + 503);
570 for (j = start; j < end; j++) {
571 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574 j, cqe[0], cqe[1], cqe[2], cqe[3]);
575 }
576 }
577
49d66772
ET
578 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
579 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 580 " spq_prod_idx(%u)\n",
49d66772 581 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
582 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
34f80b04 584 bnx2x_fw_dump(bp);
a2fbb9ea
ET
585 bnx2x_mc_assert(bp);
586 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
587}
588
615f8fd9 589static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 590{
34f80b04 591 int port = BP_PORT(bp);
a2fbb9ea
ET
592 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593 u32 val = REG_RD(bp, addr);
594 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596 if (msix) {
597 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600 } else {
601 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 602 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
603 HC_CONFIG_0_REG_INT_LINE_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 605
615f8fd9
ET
606 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
607 val, port, addr, msix);
608
609 REG_WR(bp, addr, val);
610
a2fbb9ea
ET
611 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 }
613
615f8fd9 614 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
615 val, port, addr, msix);
616
617 REG_WR(bp, addr, val);
34f80b04
EG
618
619 if (CHIP_IS_E1H(bp)) {
620 /* init leading/trailing edge */
621 if (IS_E1HMF(bp)) {
622 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623 if (bp->port.pmf)
624 /* enable nig attention */
625 val |= 0x0100;
626 } else
627 val = 0xffff;
628
629 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631 }
a2fbb9ea
ET
632}
633
615f8fd9 634static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 635{
34f80b04 636 int port = BP_PORT(bp);
a2fbb9ea
ET
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
639
640 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_INT_LINE_EN_0 |
643 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 val, port, addr);
647
648 REG_WR(bp, addr, val);
649 if (REG_RD(bp, addr) != val)
650 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651}
652
f8ef6e44 653static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 654{
a2fbb9ea
ET
655 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656 int i;
657
34f80b04 658 /* disable interrupt handling */
a2fbb9ea 659 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
660 if (disable_hw)
661 /* prevent the HW from sending interrupts */
662 bnx2x_int_disable(bp);
a2fbb9ea
ET
663
664 /* make sure all ISRs are done */
665 if (msix) {
666 for_each_queue(bp, i)
667 synchronize_irq(bp->msix_table[i].vector);
668
669 /* one more for the Slow Path IRQ */
670 synchronize_irq(bp->msix_table[i].vector);
671 } else
672 synchronize_irq(bp->pdev->irq);
673
674 /* make sure sp_task is not running */
1cf167f2
EG
675 cancel_delayed_work(&bp->sp_task);
676 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
677}
678
34f80b04 679/* fast path */
a2fbb9ea
ET
680
681/*
34f80b04 682 * General service functions
a2fbb9ea
ET
683 */
684
34f80b04 685static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
686 u8 storm, u16 index, u8 op, u8 update)
687{
5c862848
EG
688 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
689 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
690 struct igu_ack_register igu_ack;
691
692 igu_ack.status_block_index = index;
693 igu_ack.sb_id_and_flags =
34f80b04 694 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
695 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
696 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
697 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698
5c862848
EG
699 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
700 (*(u32 *)&igu_ack), hc_addr);
701 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
702}
703
704static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705{
706 struct host_status_block *fpsb = fp->status_blk;
707 u16 rc = 0;
708
709 barrier(); /* status block is written to by the chip */
710 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
711 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 rc |= 1;
713 }
714 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
715 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
716 rc |= 2;
717 }
718 return rc;
719}
720
a2fbb9ea
ET
721static u16 bnx2x_ack_int(struct bnx2x *bp)
722{
5c862848
EG
723 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
724 COMMAND_REG_SIMD_MASK);
725 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 726
5c862848
EG
727 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
728 result, hc_addr);
a2fbb9ea 729
a2fbb9ea
ET
730 return result;
731}
732
733
734/*
735 * fast path service functions
736 */
737
738/* free skb in the packet ring at pos idx
739 * return idx of last bd freed
740 */
741static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
742 u16 idx)
743{
744 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
745 struct eth_tx_bd *tx_bd;
746 struct sk_buff *skb = tx_buf->skb;
34f80b04 747 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
748 int nbd;
749
750 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
751 idx, tx_buf, skb);
752
753 /* unmap first bd */
754 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
755 tx_bd = &fp->tx_desc_ring[bd_idx];
756 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
757 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
758
759 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 760 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
761#ifdef BNX2X_STOP_ON_ERROR
762 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 763 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
764 bnx2x_panic();
765 }
766#endif
767
768 /* Skip a parse bd and the TSO split header bd
769 since they have no mapping */
770 if (nbd)
771 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
772
773 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
774 ETH_TX_BD_FLAGS_TCP_CSUM |
775 ETH_TX_BD_FLAGS_SW_LSO)) {
776 if (--nbd)
777 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778 tx_bd = &fp->tx_desc_ring[bd_idx];
779 /* is this a TSO split header bd? */
780 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
781 if (--nbd)
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 }
784 }
785
786 /* now free frags */
787 while (nbd > 0) {
788
789 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
790 tx_bd = &fp->tx_desc_ring[bd_idx];
791 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
792 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
793 if (--nbd)
794 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
795 }
796
797 /* release skb */
53e5e96e 798 WARN_ON(!skb);
a2fbb9ea
ET
799 dev_kfree_skb(skb);
800 tx_buf->first_bd = 0;
801 tx_buf->skb = NULL;
802
34f80b04 803 return new_cons;
a2fbb9ea
ET
804}
805
34f80b04 806static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 807{
34f80b04
EG
808 s16 used;
809 u16 prod;
810 u16 cons;
a2fbb9ea 811
34f80b04 812 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
813 prod = fp->tx_bd_prod;
814 cons = fp->tx_bd_cons;
815
34f80b04
EG
816 /* NUM_TX_RINGS = number of "next-page" entries
817 It will be used as a threshold */
818 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 819
34f80b04 820#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
821 WARN_ON(used < 0);
822 WARN_ON(used > fp->bp->tx_ring_size);
823 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 824#endif
a2fbb9ea 825
34f80b04 826 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
827}
828
829static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
830{
831 struct bnx2x *bp = fp->bp;
832 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
833 int done = 0;
834
835#ifdef BNX2X_STOP_ON_ERROR
836 if (unlikely(bp->panic))
837 return;
838#endif
839
840 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
841 sw_cons = fp->tx_pkt_cons;
842
843 while (sw_cons != hw_cons) {
844 u16 pkt_cons;
845
846 pkt_cons = TX_BD(sw_cons);
847
848 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
849
34f80b04 850 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
851 hw_cons, sw_cons, pkt_cons);
852
34f80b04 853/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
854 rmb();
855 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856 }
857*/
858 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
859 sw_cons++;
860 done++;
861
862 if (done == work)
863 break;
864 }
865
866 fp->tx_pkt_cons = sw_cons;
867 fp->tx_bd_cons = bd_cons;
868
869 /* Need to make the tx_cons update visible to start_xmit()
870 * before checking for netif_queue_stopped(). Without the
871 * memory barrier, there is a small possibility that start_xmit()
872 * will miss it and cause the queue to be stopped forever.
873 */
874 smp_mb();
875
876 /* TBD need a thresh? */
877 if (unlikely(netif_queue_stopped(bp->dev))) {
878
879 netif_tx_lock(bp->dev);
880
881 if (netif_queue_stopped(bp->dev) &&
da5a662a 882 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
883 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
884 netif_wake_queue(bp->dev);
885
886 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
887 }
888}
889
3196a88a 890
a2fbb9ea
ET
891static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
892 union eth_rx_cqe *rr_cqe)
893{
894 struct bnx2x *bp = fp->bp;
895 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
897
34f80b04 898 DP(BNX2X_MSG_SP,
a2fbb9ea 899 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
900 FP_IDX(fp), cid, command, bp->state,
901 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
902
903 bp->spq_left++;
904
34f80b04 905 if (FP_IDX(fp)) {
a2fbb9ea
ET
906 switch (command | fp->state) {
907 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
908 BNX2X_FP_STATE_OPENING):
909 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
910 cid);
911 fp->state = BNX2X_FP_STATE_OPEN;
912 break;
913
914 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
915 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
916 cid);
917 fp->state = BNX2X_FP_STATE_HALTED;
918 break;
919
920 default:
34f80b04
EG
921 BNX2X_ERR("unexpected MC reply (%d) "
922 "fp->state is %x\n", command, fp->state);
923 break;
a2fbb9ea 924 }
34f80b04 925 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
926 return;
927 }
c14423fe 928
a2fbb9ea
ET
929 switch (command | bp->state) {
930 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
931 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
932 bp->state = BNX2X_STATE_OPEN;
933 break;
934
935 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
936 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
937 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
938 fp->state = BNX2X_FP_STATE_HALTED;
939 break;
940
a2fbb9ea 941 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 942 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 943 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
944 break;
945
3196a88a 946
a2fbb9ea 947 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 948 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 949 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 950 bp->set_mac_pending = 0;
a2fbb9ea
ET
951 break;
952
49d66772 953 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 954 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
955 break;
956
a2fbb9ea 957 default:
34f80b04 958 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 959 command, bp->state);
34f80b04 960 break;
a2fbb9ea 961 }
34f80b04 962 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
963}
964
7a9b2557
VZ
965static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
966 struct bnx2x_fastpath *fp, u16 index)
967{
968 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
969 struct page *page = sw_buf->page;
970 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
971
972 /* Skip "next page" elements */
973 if (!page)
974 return;
975
976 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 977 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
978 __free_pages(page, PAGES_PER_SGE_SHIFT);
979
980 sw_buf->page = NULL;
981 sge->addr_hi = 0;
982 sge->addr_lo = 0;
983}
984
985static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
986 struct bnx2x_fastpath *fp, int last)
987{
988 int i;
989
990 for (i = 0; i < last; i++)
991 bnx2x_free_rx_sge(bp, fp, i);
992}
993
994static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
995 struct bnx2x_fastpath *fp, u16 index)
996{
997 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
998 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
999 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1000 dma_addr_t mapping;
1001
1002 if (unlikely(page == NULL))
1003 return -ENOMEM;
1004
4f40f2cb 1005 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1006 PCI_DMA_FROMDEVICE);
8d8bb39b 1007 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1008 __free_pages(page, PAGES_PER_SGE_SHIFT);
1009 return -ENOMEM;
1010 }
1011
1012 sw_buf->page = page;
1013 pci_unmap_addr_set(sw_buf, mapping, mapping);
1014
1015 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1016 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1017
1018 return 0;
1019}
1020
a2fbb9ea
ET
1021static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct sk_buff *skb;
1025 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1026 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1027 dma_addr_t mapping;
1028
1029 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1030 if (unlikely(skb == NULL))
1031 return -ENOMEM;
1032
437cf2f1 1033 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1034 PCI_DMA_FROMDEVICE);
8d8bb39b 1035 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1036 dev_kfree_skb(skb);
1037 return -ENOMEM;
1038 }
1039
1040 rx_buf->skb = skb;
1041 pci_unmap_addr_set(rx_buf, mapping, mapping);
1042
1043 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1044 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1045
1046 return 0;
1047}
1048
1049/* note that we are not allocating a new skb,
1050 * we are just moving one from cons to prod
1051 * we are not creating a new mapping,
1052 * so there is no need to check for dma_mapping_error().
1053 */
1054static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1055 struct sk_buff *skb, u16 cons, u16 prod)
1056{
1057 struct bnx2x *bp = fp->bp;
1058 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1059 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1060 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1061 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1062
1063 pci_dma_sync_single_for_device(bp->pdev,
1064 pci_unmap_addr(cons_rx_buf, mapping),
1065 bp->rx_offset + RX_COPY_THRESH,
1066 PCI_DMA_FROMDEVICE);
1067
1068 prod_rx_buf->skb = cons_rx_buf->skb;
1069 pci_unmap_addr_set(prod_rx_buf, mapping,
1070 pci_unmap_addr(cons_rx_buf, mapping));
1071 *prod_bd = *cons_bd;
1072}
1073
7a9b2557
VZ
1074static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1075 u16 idx)
1076{
1077 u16 last_max = fp->last_max_sge;
1078
1079 if (SUB_S16(idx, last_max) > 0)
1080 fp->last_max_sge = idx;
1081}
1082
1083static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1084{
1085 int i, j;
1086
1087 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1088 int idx = RX_SGE_CNT * i - 1;
1089
1090 for (j = 0; j < 2; j++) {
1091 SGE_MASK_CLEAR_BIT(fp, idx);
1092 idx--;
1093 }
1094 }
1095}
1096
1097static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1098 struct eth_fast_path_rx_cqe *fp_cqe)
1099{
1100 struct bnx2x *bp = fp->bp;
4f40f2cb 1101 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1102 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1103 SGE_PAGE_SHIFT;
7a9b2557
VZ
1104 u16 last_max, last_elem, first_elem;
1105 u16 delta = 0;
1106 u16 i;
1107
1108 if (!sge_len)
1109 return;
1110
1111 /* First mark all used pages */
1112 for (i = 0; i < sge_len; i++)
1113 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1114
1115 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1116 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118 /* Here we assume that the last SGE index is the biggest */
1119 prefetch((void *)(fp->sge_mask));
1120 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1121
1122 last_max = RX_SGE(fp->last_max_sge);
1123 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1124 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1125
1126 /* If ring is not full */
1127 if (last_elem + 1 != first_elem)
1128 last_elem++;
1129
1130 /* Now update the prod */
1131 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1132 if (likely(fp->sge_mask[i]))
1133 break;
1134
1135 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1136 delta += RX_SGE_MASK_ELEM_SZ;
1137 }
1138
1139 if (delta > 0) {
1140 fp->rx_sge_prod += delta;
1141 /* clear page-end entries */
1142 bnx2x_clear_sge_mask_next_elems(fp);
1143 }
1144
1145 DP(NETIF_MSG_RX_STATUS,
1146 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1147 fp->last_max_sge, fp->rx_sge_prod);
1148}
1149
1150static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1151{
1152 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1153 memset(fp->sge_mask, 0xff,
1154 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1155
33471629
EG
1156 /* Clear the two last indices in the page to 1:
1157 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1158 hence will never be indicated and should be removed from
1159 the calculations. */
1160 bnx2x_clear_sge_mask_next_elems(fp);
1161}
1162
1163static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1164 struct sk_buff *skb, u16 cons, u16 prod)
1165{
1166 struct bnx2x *bp = fp->bp;
1167 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1168 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1169 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1170 dma_addr_t mapping;
1171
1172 /* move empty skb from pool to prod and map it */
1173 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1174 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1175 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1176 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1177
1178 /* move partial skb from cons to pool (don't unmap yet) */
1179 fp->tpa_pool[queue] = *cons_rx_buf;
1180
1181 /* mark bin state as start - print error if current state != stop */
1182 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1183 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1184
1185 fp->tpa_state[queue] = BNX2X_TPA_START;
1186
1187 /* point prod_bd to new skb */
1188 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1189 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1190
1191#ifdef BNX2X_STOP_ON_ERROR
1192 fp->tpa_queue_used |= (1 << queue);
1193#ifdef __powerpc64__
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1195#else
1196 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1197#endif
1198 fp->tpa_queue_used);
1199#endif
1200}
1201
1202static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1203 struct sk_buff *skb,
1204 struct eth_fast_path_rx_cqe *fp_cqe,
1205 u16 cqe_idx)
1206{
1207 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1208 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1209 u32 i, frag_len, frag_size, pages;
1210 int err;
1211 int j;
1212
1213 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1214 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1215
1216 /* This is needed in order to enable forwarding support */
1217 if (frag_size)
4f40f2cb 1218 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1219 max(frag_size, (u32)len_on_bd));
1220
1221#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1222 if (pages >
1223 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1224 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1225 pages, cqe_idx);
1226 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1227 fp_cqe->pkt_len, len_on_bd);
1228 bnx2x_panic();
1229 return -EINVAL;
1230 }
1231#endif
1232
1233 /* Run through the SGL and compose the fragmented skb */
1234 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1235 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1236
1237 /* FW gives the indices of the SGE as if the ring is an array
1238 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1239 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1240 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1241 old_rx_pg = *rx_pg;
1242
1243 /* If we fail to allocate a substitute page, we simply stop
1244 where we are and drop the whole packet */
1245 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1246 if (unlikely(err)) {
66e855f3 1247 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1248 return err;
1249 }
1250
1251 /* Unmap the page as we r going to pass it to the stack */
1252 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1253 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1254
1255 /* Add one frag and update the appropriate fields in the skb */
1256 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1257
1258 skb->data_len += frag_len;
1259 skb->truesize += frag_len;
1260 skb->len += frag_len;
1261
1262 frag_size -= frag_len;
1263 }
1264
1265 return 0;
1266}
1267
1268static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1269 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1270 u16 cqe_idx)
1271{
1272 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1273 struct sk_buff *skb = rx_buf->skb;
1274 /* alloc new skb */
1275 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1276
1277 /* Unmap skb in the pool anyway, as we are going to change
1278 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1279 fails. */
1280 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1281 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1282
7a9b2557 1283 if (likely(new_skb)) {
66e855f3
YG
1284 /* fix ip xsum and give it to the stack */
1285 /* (no need to map the new skb) */
7a9b2557
VZ
1286
1287 prefetch(skb);
1288 prefetch(((char *)(skb)) + 128);
1289
7a9b2557
VZ
1290#ifdef BNX2X_STOP_ON_ERROR
1291 if (pad + len > bp->rx_buf_size) {
1292 BNX2X_ERR("skb_put is about to fail... "
1293 "pad %d len %d rx_buf_size %d\n",
1294 pad, len, bp->rx_buf_size);
1295 bnx2x_panic();
1296 return;
1297 }
1298#endif
1299
1300 skb_reserve(skb, pad);
1301 skb_put(skb, len);
1302
1303 skb->protocol = eth_type_trans(skb, bp->dev);
1304 skb->ip_summed = CHECKSUM_UNNECESSARY;
1305
1306 {
1307 struct iphdr *iph;
1308
1309 iph = (struct iphdr *)skb->data;
1310 iph->check = 0;
1311 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1312 }
1313
1314 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1315 &cqe->fast_path_cqe, cqe_idx)) {
1316#ifdef BCM_VLAN
1317 if ((bp->vlgrp != NULL) &&
1318 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1319 PARSING_FLAGS_VLAN))
1320 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1321 le16_to_cpu(cqe->fast_path_cqe.
1322 vlan_tag));
1323 else
1324#endif
1325 netif_receive_skb(skb);
1326 } else {
1327 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1328 " - dropping packet!\n");
1329 dev_kfree_skb(skb);
1330 }
1331
7a9b2557
VZ
1332
1333 /* put new skb in bin */
1334 fp->tpa_pool[queue].skb = new_skb;
1335
1336 } else {
66e855f3 1337 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1338 DP(NETIF_MSG_RX_STATUS,
1339 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1340 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1341 }
1342
1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1344}
1345
1346static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347 struct bnx2x_fastpath *fp,
1348 u16 bd_prod, u16 rx_comp_prod,
1349 u16 rx_sge_prod)
1350{
1351 struct tstorm_eth_rx_producers rx_prods = {0};
1352 int i;
1353
1354 /* Update producers */
1355 rx_prods.bd_prod = bd_prod;
1356 rx_prods.cqe_prod = rx_comp_prod;
1357 rx_prods.sge_prod = rx_sge_prod;
1358
58f4c4cf
EG
1359 /*
1360 * Make sure that the BD and SGE data is updated before updating the
1361 * producers since FW might read the BD/SGE right after the producer
1362 * is updated.
1363 * This is only applicable for weak-ordered memory model archs such
1364 * as IA-64. The following barrier is also mandatory since FW will
1365 * assumes BDs must have buffers.
1366 */
1367 wmb();
1368
7a9b2557
VZ
1369 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1370 REG_WR(bp, BAR_TSTRORM_INTMEM +
1371 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1372 ((u32 *)&rx_prods)[i]);
1373
58f4c4cf
EG
1374 mmiowb(); /* keep prod updates ordered */
1375
7a9b2557
VZ
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1378 bd_prod, rx_comp_prod, rx_sge_prod);
1379}
1380
a2fbb9ea
ET
1381static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1382{
1383 struct bnx2x *bp = fp->bp;
34f80b04 1384 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1385 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1386 int rx_pkt = 0;
1387
1388#ifdef BNX2X_STOP_ON_ERROR
1389 if (unlikely(bp->panic))
1390 return 0;
1391#endif
1392
34f80b04
EG
1393 /* CQ "next element" is of the size of the regular element,
1394 that's why it's ok here */
a2fbb9ea
ET
1395 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1396 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1397 hw_comp_cons++;
1398
1399 bd_cons = fp->rx_bd_cons;
1400 bd_prod = fp->rx_bd_prod;
34f80b04 1401 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1402 sw_comp_cons = fp->rx_comp_cons;
1403 sw_comp_prod = fp->rx_comp_prod;
1404
1405 /* Memory barrier necessary as speculative reads of the rx
1406 * buffer can be ahead of the index in the status block
1407 */
1408 rmb();
1409
1410 DP(NETIF_MSG_RX_STATUS,
1411 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1412 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1413
1414 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1415 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1416 struct sk_buff *skb;
1417 union eth_rx_cqe *cqe;
34f80b04
EG
1418 u8 cqe_fp_flags;
1419 u16 len, pad;
a2fbb9ea
ET
1420
1421 comp_ring_cons = RCQ_BD(sw_comp_cons);
1422 bd_prod = RX_BD(bd_prod);
1423 bd_cons = RX_BD(bd_cons);
1424
1425 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1426 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1427
a2fbb9ea 1428 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1429 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1430 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1431 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1432 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1433 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1434
1435 /* is this a slowpath msg? */
34f80b04 1436 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1437 bnx2x_sp_event(fp, cqe);
1438 goto next_cqe;
1439
1440 /* this is an rx packet */
1441 } else {
1442 rx_buf = &fp->rx_buf_ring[bd_cons];
1443 skb = rx_buf->skb;
a2fbb9ea
ET
1444 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1445 pad = cqe->fast_path_cqe.placement_offset;
1446
7a9b2557
VZ
1447 /* If CQE is marked both TPA_START and TPA_END
1448 it is a non-TPA CQE */
1449 if ((!fp->disable_tpa) &&
1450 (TPA_TYPE(cqe_fp_flags) !=
1451 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1452 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1453
1454 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1455 DP(NETIF_MSG_RX_STATUS,
1456 "calling tpa_start on queue %d\n",
1457 queue);
1458
1459 bnx2x_tpa_start(fp, queue, skb,
1460 bd_cons, bd_prod);
1461 goto next_rx;
1462 }
1463
1464 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1465 DP(NETIF_MSG_RX_STATUS,
1466 "calling tpa_stop on queue %d\n",
1467 queue);
1468
1469 if (!BNX2X_RX_SUM_FIX(cqe))
1470 BNX2X_ERR("STOP on none TCP "
1471 "data\n");
1472
1473 /* This is a size of the linear data
1474 on this skb */
1475 len = le16_to_cpu(cqe->fast_path_cqe.
1476 len_on_bd);
1477 bnx2x_tpa_stop(bp, fp, queue, pad,
1478 len, cqe, comp_ring_cons);
1479#ifdef BNX2X_STOP_ON_ERROR
1480 if (bp->panic)
1481 return -EINVAL;
1482#endif
1483
1484 bnx2x_update_sge_prod(fp,
1485 &cqe->fast_path_cqe);
1486 goto next_cqe;
1487 }
1488 }
1489
a2fbb9ea
ET
1490 pci_dma_sync_single_for_device(bp->pdev,
1491 pci_unmap_addr(rx_buf, mapping),
1492 pad + RX_COPY_THRESH,
1493 PCI_DMA_FROMDEVICE);
1494 prefetch(skb);
1495 prefetch(((char *)(skb)) + 128);
1496
1497 /* is this an error packet? */
34f80b04 1498 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1499 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1500 "ERROR flags %x rx packet %u\n",
1501 cqe_fp_flags, sw_comp_cons);
66e855f3 1502 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1503 goto reuse_rx;
1504 }
1505
1506 /* Since we don't have a jumbo ring
1507 * copy small packets if mtu > 1500
1508 */
1509 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1510 (len <= RX_COPY_THRESH)) {
1511 struct sk_buff *new_skb;
1512
1513 new_skb = netdev_alloc_skb(bp->dev,
1514 len + pad);
1515 if (new_skb == NULL) {
1516 DP(NETIF_MSG_RX_ERR,
34f80b04 1517 "ERROR packet dropped "
a2fbb9ea 1518 "because of alloc failure\n");
66e855f3 1519 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1520 goto reuse_rx;
1521 }
1522
1523 /* aligned copy */
1524 skb_copy_from_linear_data_offset(skb, pad,
1525 new_skb->data + pad, len);
1526 skb_reserve(new_skb, pad);
1527 skb_put(new_skb, len);
1528
1529 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1530
1531 skb = new_skb;
1532
1533 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1534 pci_unmap_single(bp->pdev,
1535 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1536 bp->rx_buf_size,
a2fbb9ea
ET
1537 PCI_DMA_FROMDEVICE);
1538 skb_reserve(skb, pad);
1539 skb_put(skb, len);
1540
1541 } else {
1542 DP(NETIF_MSG_RX_ERR,
34f80b04 1543 "ERROR packet dropped because "
a2fbb9ea 1544 "of alloc failure\n");
66e855f3 1545 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1546reuse_rx:
1547 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1548 goto next_rx;
1549 }
1550
1551 skb->protocol = eth_type_trans(skb, bp->dev);
1552
1553 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1554 if (bp->rx_csum) {
1adcd8be
EG
1555 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1556 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1557 else
1558 bp->eth_stats.hw_csum_err++;
1559 }
a2fbb9ea
ET
1560 }
1561
1562#ifdef BCM_VLAN
34f80b04
EG
1563 if ((bp->vlgrp != NULL) &&
1564 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1565 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1566 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1567 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1568 else
1569#endif
34f80b04 1570 netif_receive_skb(skb);
a2fbb9ea 1571
a2fbb9ea
ET
1572
1573next_rx:
1574 rx_buf->skb = NULL;
1575
1576 bd_cons = NEXT_RX_IDX(bd_cons);
1577 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1578 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1579 rx_pkt++;
a2fbb9ea
ET
1580next_cqe:
1581 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1582 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1583
34f80b04 1584 if (rx_pkt == budget)
a2fbb9ea
ET
1585 break;
1586 } /* while */
1587
1588 fp->rx_bd_cons = bd_cons;
34f80b04 1589 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1590 fp->rx_comp_cons = sw_comp_cons;
1591 fp->rx_comp_prod = sw_comp_prod;
1592
7a9b2557
VZ
1593 /* Update producers */
1594 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1595 fp->rx_sge_prod);
a2fbb9ea
ET
1596
1597 fp->rx_pkt += rx_pkt;
1598 fp->rx_calls++;
1599
1600 return rx_pkt;
1601}
1602
1603static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1604{
1605 struct bnx2x_fastpath *fp = fp_cookie;
1606 struct bnx2x *bp = fp->bp;
34f80b04 1607 int index = FP_IDX(fp);
a2fbb9ea 1608
da5a662a
VZ
1609 /* Return here if interrupt is disabled */
1610 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1611 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1612 return IRQ_HANDLED;
1613 }
1614
34f80b04
EG
1615 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1616 index, FP_SB_ID(fp));
1617 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1618
1619#ifdef BNX2X_STOP_ON_ERROR
1620 if (unlikely(bp->panic))
1621 return IRQ_HANDLED;
1622#endif
1623
1624 prefetch(fp->rx_cons_sb);
1625 prefetch(fp->tx_cons_sb);
1626 prefetch(&fp->status_blk->c_status_block.status_block_index);
1627 prefetch(&fp->status_blk->u_status_block.status_block_index);
1628
908a7a16 1629 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1630
a2fbb9ea
ET
1631 return IRQ_HANDLED;
1632}
1633
1634static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1635{
1636 struct net_device *dev = dev_instance;
1637 struct bnx2x *bp = netdev_priv(dev);
1638 u16 status = bnx2x_ack_int(bp);
34f80b04 1639 u16 mask;
a2fbb9ea 1640
34f80b04 1641 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1642 if (unlikely(status == 0)) {
1643 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1644 return IRQ_NONE;
1645 }
34f80b04 1646 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1647
34f80b04 1648 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1651 return IRQ_HANDLED;
1652 }
1653
3196a88a
EG
1654#ifdef BNX2X_STOP_ON_ERROR
1655 if (unlikely(bp->panic))
1656 return IRQ_HANDLED;
1657#endif
1658
34f80b04
EG
1659 mask = 0x2 << bp->fp[0].sb_id;
1660 if (status & mask) {
a2fbb9ea
ET
1661 struct bnx2x_fastpath *fp = &bp->fp[0];
1662
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1667
908a7a16 1668 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1669
34f80b04 1670 status &= ~mask;
a2fbb9ea
ET
1671 }
1672
a2fbb9ea 1673
34f80b04 1674 if (unlikely(status & 0x1)) {
1cf167f2 1675 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1676
1677 status &= ~0x1;
1678 if (!status)
1679 return IRQ_HANDLED;
1680 }
1681
34f80b04
EG
1682 if (status)
1683 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1684 status);
a2fbb9ea 1685
c18487ee 1686 return IRQ_HANDLED;
a2fbb9ea
ET
1687}
1688
c18487ee 1689/* end of fast path */
a2fbb9ea 1690
bb2a0f7a 1691static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1692
c18487ee
YR
1693/* Link */
1694
1695/*
1696 * General service functions
1697 */
a2fbb9ea 1698
4a37fb66 1699static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1700{
1701 u32 lock_status;
1702 u32 resource_bit = (1 << resource);
4a37fb66
YG
1703 int func = BP_FUNC(bp);
1704 u32 hw_lock_control_reg;
c18487ee 1705 int cnt;
a2fbb9ea 1706
c18487ee
YR
1707 /* Validating that the resource is within range */
1708 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1709 DP(NETIF_MSG_HW,
1710 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1711 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1712 return -EINVAL;
1713 }
a2fbb9ea 1714
4a37fb66
YG
1715 if (func <= 5) {
1716 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1717 } else {
1718 hw_lock_control_reg =
1719 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1720 }
1721
c18487ee 1722 /* Validating that the resource is not already taken */
4a37fb66 1723 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1724 if (lock_status & resource_bit) {
1725 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1726 lock_status, resource_bit);
1727 return -EEXIST;
1728 }
a2fbb9ea 1729
46230476
EG
1730 /* Try for 5 second every 5ms */
1731 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1732 /* Try to acquire the lock */
4a37fb66
YG
1733 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1734 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1735 if (lock_status & resource_bit)
1736 return 0;
a2fbb9ea 1737
c18487ee 1738 msleep(5);
a2fbb9ea 1739 }
c18487ee
YR
1740 DP(NETIF_MSG_HW, "Timeout\n");
1741 return -EAGAIN;
1742}
a2fbb9ea 1743
4a37fb66 1744static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1745{
1746 u32 lock_status;
1747 u32 resource_bit = (1 << resource);
4a37fb66
YG
1748 int func = BP_FUNC(bp);
1749 u32 hw_lock_control_reg;
a2fbb9ea 1750
c18487ee
YR
1751 /* Validating that the resource is within range */
1752 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1753 DP(NETIF_MSG_HW,
1754 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1755 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1756 return -EINVAL;
1757 }
1758
4a37fb66
YG
1759 if (func <= 5) {
1760 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1761 } else {
1762 hw_lock_control_reg =
1763 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1764 }
1765
c18487ee 1766 /* Validating that the resource is currently taken */
4a37fb66 1767 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1768 if (!(lock_status & resource_bit)) {
1769 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1770 lock_status, resource_bit);
1771 return -EFAULT;
a2fbb9ea
ET
1772 }
1773
4a37fb66 1774 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1775 return 0;
1776}
1777
1778/* HW Lock for shared dual port PHYs */
4a37fb66 1779static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1780{
1781 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1782
34f80b04 1783 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1784
c18487ee
YR
1785 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1786 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1787 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1788}
a2fbb9ea 1789
4a37fb66 1790static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1791{
1792 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1793
c18487ee
YR
1794 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1795 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1796 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1797
34f80b04 1798 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1799}
a2fbb9ea 1800
17de50b7 1801int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1802{
1803 /* The GPIO should be swapped if swap register is set and active */
1804 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1805 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1806 int gpio_shift = gpio_num +
1807 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1808 u32 gpio_mask = (1 << gpio_shift);
1809 u32 gpio_reg;
a2fbb9ea 1810
c18487ee
YR
1811 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1812 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1813 return -EINVAL;
1814 }
a2fbb9ea 1815
4a37fb66 1816 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1817 /* read GPIO and mask except the float bits */
1818 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1819
c18487ee
YR
1820 switch (mode) {
1821 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1822 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1823 gpio_num, gpio_shift);
1824 /* clear FLOAT and set CLR */
1825 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1826 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1827 break;
a2fbb9ea 1828
c18487ee
YR
1829 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1830 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1831 gpio_num, gpio_shift);
1832 /* clear FLOAT and set SET */
1833 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1834 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1835 break;
a2fbb9ea 1836
17de50b7 1837 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1838 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1839 gpio_num, gpio_shift);
1840 /* set FLOAT */
1841 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1842 break;
a2fbb9ea 1843
c18487ee
YR
1844 default:
1845 break;
a2fbb9ea
ET
1846 }
1847
c18487ee 1848 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1849 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1850
c18487ee 1851 return 0;
a2fbb9ea
ET
1852}
1853
c18487ee 1854static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1855{
c18487ee
YR
1856 u32 spio_mask = (1 << spio_num);
1857 u32 spio_reg;
a2fbb9ea 1858
c18487ee
YR
1859 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1860 (spio_num > MISC_REGISTERS_SPIO_7)) {
1861 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1862 return -EINVAL;
a2fbb9ea
ET
1863 }
1864
4a37fb66 1865 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1866 /* read SPIO and mask except the float bits */
1867 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1868
c18487ee 1869 switch (mode) {
6378c025 1870 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1871 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1872 /* clear FLOAT and set CLR */
1873 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1874 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1875 break;
a2fbb9ea 1876
6378c025 1877 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1878 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1879 /* clear FLOAT and set SET */
1880 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1881 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1882 break;
a2fbb9ea 1883
c18487ee
YR
1884 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1885 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1886 /* set FLOAT */
1887 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1888 break;
a2fbb9ea 1889
c18487ee
YR
1890 default:
1891 break;
a2fbb9ea
ET
1892 }
1893
c18487ee 1894 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1895 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1896
a2fbb9ea
ET
1897 return 0;
1898}
1899
c18487ee 1900static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1901{
c18487ee
YR
1902 switch (bp->link_vars.ieee_fc) {
1903 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1904 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1905 ADVERTISED_Pause);
1906 break;
1907 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1908 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1909 ADVERTISED_Pause);
1910 break;
1911 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1912 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1913 break;
1914 default:
34f80b04 1915 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1916 ADVERTISED_Pause);
1917 break;
1918 }
1919}
f1410647 1920
c18487ee
YR
1921static void bnx2x_link_report(struct bnx2x *bp)
1922{
1923 if (bp->link_vars.link_up) {
1924 if (bp->state == BNX2X_STATE_OPEN)
1925 netif_carrier_on(bp->dev);
1926 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1927
c18487ee 1928 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1929
c18487ee
YR
1930 if (bp->link_vars.duplex == DUPLEX_FULL)
1931 printk("full duplex");
1932 else
1933 printk("half duplex");
f1410647 1934
c0700f90
DM
1935 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1936 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1937 printk(", receive ");
c0700f90 1938 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1939 printk("& transmit ");
1940 } else {
1941 printk(", transmit ");
1942 }
1943 printk("flow control ON");
1944 }
1945 printk("\n");
f1410647 1946
c18487ee
YR
1947 } else { /* link_down */
1948 netif_carrier_off(bp->dev);
1949 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1950 }
c18487ee
YR
1951}
1952
1953static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1954{
19680c48
EG
1955 if (!BP_NOMCP(bp)) {
1956 u8 rc;
a2fbb9ea 1957
19680c48 1958 /* Initialize link parameters structure variables */
8c99e7b0
YR
1959 /* It is recommended to turn off RX FC for jumbo frames
1960 for better performance */
1961 if (IS_E1HMF(bp))
c0700f90 1962 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 1963 else if (bp->dev->mtu > 5000)
c0700f90 1964 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1965 else
c0700f90 1966 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1967
4a37fb66 1968 bnx2x_acquire_phy_lock(bp);
19680c48 1969 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1970 bnx2x_release_phy_lock(bp);
a2fbb9ea 1971
19680c48
EG
1972 if (bp->link_vars.link_up)
1973 bnx2x_link_report(bp);
a2fbb9ea 1974
19680c48 1975 bnx2x_calc_fc_adv(bp);
34f80b04 1976
19680c48
EG
1977 return rc;
1978 }
1979 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1980 return -EINVAL;
a2fbb9ea
ET
1981}
1982
c18487ee 1983static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1984{
19680c48 1985 if (!BP_NOMCP(bp)) {
4a37fb66 1986 bnx2x_acquire_phy_lock(bp);
19680c48 1987 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1988 bnx2x_release_phy_lock(bp);
a2fbb9ea 1989
19680c48
EG
1990 bnx2x_calc_fc_adv(bp);
1991 } else
1992 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1993}
a2fbb9ea 1994
c18487ee
YR
1995static void bnx2x__link_reset(struct bnx2x *bp)
1996{
19680c48 1997 if (!BP_NOMCP(bp)) {
4a37fb66 1998 bnx2x_acquire_phy_lock(bp);
19680c48 1999 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2000 bnx2x_release_phy_lock(bp);
19680c48
EG
2001 } else
2002 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2003}
a2fbb9ea 2004
c18487ee
YR
2005static u8 bnx2x_link_test(struct bnx2x *bp)
2006{
2007 u8 rc;
a2fbb9ea 2008
4a37fb66 2009 bnx2x_acquire_phy_lock(bp);
c18487ee 2010 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2011 bnx2x_release_phy_lock(bp);
a2fbb9ea 2012
c18487ee
YR
2013 return rc;
2014}
a2fbb9ea 2015
34f80b04
EG
2016/* Calculates the sum of vn_min_rates.
2017 It's needed for further normalizing of the min_rates.
2018
2019 Returns:
2020 sum of vn_min_rates
2021 or
2022 0 - if all the min_rates are 0.
33471629 2023 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2024 If not all min_rates are zero then those that are zeroes will
2025 be set to 1.
2026 */
2027static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2028{
2029 int i, port = BP_PORT(bp);
2030 u32 wsum = 0;
2031 int all_zero = 1;
2032
2033 for (i = 0; i < E1HVN_MAX; i++) {
2034 u32 vn_cfg =
2035 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2036 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2037 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2038 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2039 /* If min rate is zero - set it to 1 */
2040 if (!vn_min_rate)
2041 vn_min_rate = DEF_MIN_RATE;
2042 else
2043 all_zero = 0;
2044
2045 wsum += vn_min_rate;
2046 }
2047 }
2048
2049 /* ... only if all min rates are zeros - disable FAIRNESS */
2050 if (all_zero)
2051 return 0;
2052
2053 return wsum;
2054}
2055
2056static void bnx2x_init_port_minmax(struct bnx2x *bp,
2057 int en_fness,
2058 u16 port_rate,
2059 struct cmng_struct_per_port *m_cmng_port)
2060{
2061 u32 r_param = port_rate / 8;
2062 int port = BP_PORT(bp);
2063 int i;
2064
2065 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2066
2067 /* Enable minmax only if we are in e1hmf mode */
2068 if (IS_E1HMF(bp)) {
2069 u32 fair_periodic_timeout_usec;
2070 u32 t_fair;
2071
2072 /* Enable rate shaping and fairness */
2073 m_cmng_port->flags.cmng_vn_enable = 1;
2074 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2075 m_cmng_port->flags.rate_shaping_enable = 1;
2076
2077 if (!en_fness)
2078 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2079 " fairness will be disabled\n");
2080
2081 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2082 m_cmng_port->rs_vars.rs_periodic_timeout =
2083 RS_PERIODIC_TIMEOUT_USEC / 4;
2084
2085 /* this is the threshold below which no timer arming will occur
2086 1.25 coefficient is for the threshold to be a little bigger
2087 than the real time, to compensate for timer in-accuracy */
2088 m_cmng_port->rs_vars.rs_threshold =
2089 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2090
2091 /* resolution of fairness timer */
2092 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2093 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2094 t_fair = T_FAIR_COEF / port_rate;
2095
2096 /* this is the threshold below which we won't arm
2097 the timer anymore */
2098 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2099
2100 /* we multiply by 1e3/8 to get bytes/msec.
2101 We don't want the credits to pass a credit
2102 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2103 m_cmng_port->fair_vars.upper_bound =
2104 r_param * t_fair * FAIR_MEM;
2105 /* since each tick is 4 usec */
2106 m_cmng_port->fair_vars.fairness_timeout =
2107 fair_periodic_timeout_usec / 4;
2108
2109 } else {
2110 /* Disable rate shaping and fairness */
2111 m_cmng_port->flags.cmng_vn_enable = 0;
2112 m_cmng_port->flags.fairness_enable = 0;
2113 m_cmng_port->flags.rate_shaping_enable = 0;
2114
2115 DP(NETIF_MSG_IFUP,
2116 "Single function mode minmax will be disabled\n");
2117 }
2118
2119 /* Store it to internal memory */
2120 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2121 REG_WR(bp, BAR_XSTRORM_INTMEM +
2122 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2123 ((u32 *)(m_cmng_port))[i]);
2124}
2125
2126static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2127 u32 wsum, u16 port_rate,
2128 struct cmng_struct_per_port *m_cmng_port)
2129{
2130 struct rate_shaping_vars_per_vn m_rs_vn;
2131 struct fairness_vars_per_vn m_fair_vn;
2132 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2133 u16 vn_min_rate, vn_max_rate;
2134 int i;
2135
2136 /* If function is hidden - set min and max to zeroes */
2137 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2138 vn_min_rate = 0;
2139 vn_max_rate = 0;
2140
2141 } else {
2142 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2143 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2144 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2145 if current min rate is zero - set it to 1.
33471629 2146 This is a requirement of the algorithm. */
34f80b04
EG
2147 if ((vn_min_rate == 0) && wsum)
2148 vn_min_rate = DEF_MIN_RATE;
2149 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2150 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2151 }
2152
2153 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2154 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2155
2156 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2157 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2158
2159 /* global vn counter - maximal Mbps for this vn */
2160 m_rs_vn.vn_counter.rate = vn_max_rate;
2161
2162 /* quota - number of bytes transmitted in this period */
2163 m_rs_vn.vn_counter.quota =
2164 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2165
2166#ifdef BNX2X_PER_PROT_QOS
2167 /* per protocol counter */
2168 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2169 /* maximal Mbps for this protocol */
2170 m_rs_vn.protocol_counters[protocol].rate =
2171 protocol_max_rate[protocol];
2172 /* the quota in each timer period -
2173 number of bytes transmitted in this period */
2174 m_rs_vn.protocol_counters[protocol].quota =
2175 (u32)(rs_periodic_timeout_usec *
2176 ((double)m_rs_vn.
2177 protocol_counters[protocol].rate/8));
2178 }
2179#endif
2180
2181 if (wsum) {
2182 /* credit for each period of the fairness algorithm:
2183 number of bytes in T_FAIR (the vn share the port rate).
2184 wsum should not be larger than 10000, thus
2185 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2186 m_fair_vn.vn_credit_delta =
2187 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2188 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2189 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2190 m_fair_vn.vn_credit_delta);
2191 }
2192
2193#ifdef BNX2X_PER_PROT_QOS
2194 do {
2195 u32 protocolWeightSum = 0;
2196
2197 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2198 protocolWeightSum +=
2199 drvInit.protocol_min_rate[protocol];
2200 /* per protocol counter -
2201 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2202 if (protocolWeightSum > 0) {
2203 for (protocol = 0;
2204 protocol < NUM_OF_PROTOCOLS; protocol++)
2205 /* credit for each period of the
2206 fairness algorithm - number of bytes in
2207 T_FAIR (the protocol share the vn rate) */
2208 m_fair_vn.protocol_credit_delta[protocol] =
2209 (u32)((vn_min_rate / 8) * t_fair *
2210 protocol_min_rate / protocolWeightSum);
2211 }
2212 } while (0);
2213#endif
2214
2215 /* Store it to internal memory */
2216 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2217 REG_WR(bp, BAR_XSTRORM_INTMEM +
2218 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2219 ((u32 *)(&m_rs_vn))[i]);
2220
2221 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2222 REG_WR(bp, BAR_XSTRORM_INTMEM +
2223 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2224 ((u32 *)(&m_fair_vn))[i]);
2225}
2226
c18487ee
YR
2227/* This function is called upon link interrupt */
2228static void bnx2x_link_attn(struct bnx2x *bp)
2229{
34f80b04
EG
2230 int vn;
2231
bb2a0f7a
YG
2232 /* Make sure that we are synced with the current statistics */
2233 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2234
4a37fb66 2235 bnx2x_acquire_phy_lock(bp);
c18487ee 2236 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2237 bnx2x_release_phy_lock(bp);
a2fbb9ea 2238
bb2a0f7a
YG
2239 if (bp->link_vars.link_up) {
2240
2241 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2242 struct host_port_stats *pstats;
2243
2244 pstats = bnx2x_sp(bp, port_stats);
2245 /* reset old bmac stats */
2246 memset(&(pstats->mac_stx[0]), 0,
2247 sizeof(struct mac_stx));
2248 }
2249 if ((bp->state == BNX2X_STATE_OPEN) ||
2250 (bp->state == BNX2X_STATE_DISABLED))
2251 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2252 }
2253
c18487ee
YR
2254 /* indicate link status */
2255 bnx2x_link_report(bp);
34f80b04
EG
2256
2257 if (IS_E1HMF(bp)) {
2258 int func;
2259
2260 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2261 if (vn == BP_E1HVN(bp))
2262 continue;
2263
2264 func = ((vn << 1) | BP_PORT(bp));
2265
2266 /* Set the attention towards other drivers
2267 on the same port */
2268 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2269 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2270 }
2271 }
2272
2273 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2274 struct cmng_struct_per_port m_cmng_port;
2275 u32 wsum;
2276 int port = BP_PORT(bp);
2277
2278 /* Init RATE SHAPING and FAIRNESS contexts */
2279 wsum = bnx2x_calc_vn_wsum(bp);
2280 bnx2x_init_port_minmax(bp, (int)wsum,
2281 bp->link_vars.line_speed,
2282 &m_cmng_port);
2283 if (IS_E1HMF(bp))
2284 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2285 bnx2x_init_vn_minmax(bp, 2*vn + port,
2286 wsum, bp->link_vars.line_speed,
2287 &m_cmng_port);
2288 }
c18487ee 2289}
a2fbb9ea 2290
c18487ee
YR
2291static void bnx2x__link_status_update(struct bnx2x *bp)
2292{
2293 if (bp->state != BNX2X_STATE_OPEN)
2294 return;
a2fbb9ea 2295
c18487ee 2296 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2297
bb2a0f7a
YG
2298 if (bp->link_vars.link_up)
2299 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2300 else
2301 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2302
c18487ee
YR
2303 /* indicate link status */
2304 bnx2x_link_report(bp);
a2fbb9ea 2305}
a2fbb9ea 2306
34f80b04
EG
2307static void bnx2x_pmf_update(struct bnx2x *bp)
2308{
2309 int port = BP_PORT(bp);
2310 u32 val;
2311
2312 bp->port.pmf = 1;
2313 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2314
2315 /* enable nig attention */
2316 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2317 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2318 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2319
2320 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2321}
2322
c18487ee 2323/* end of Link */
a2fbb9ea
ET
2324
2325/* slow path */
2326
2327/*
2328 * General service functions
2329 */
2330
2331/* the slow path queue is odd since completions arrive on the fastpath ring */
2332static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2333 u32 data_hi, u32 data_lo, int common)
2334{
34f80b04 2335 int func = BP_FUNC(bp);
a2fbb9ea 2336
34f80b04
EG
2337 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2338 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2339 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2340 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2341 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2342
2343#ifdef BNX2X_STOP_ON_ERROR
2344 if (unlikely(bp->panic))
2345 return -EIO;
2346#endif
2347
34f80b04 2348 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2349
2350 if (!bp->spq_left) {
2351 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2352 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2353 bnx2x_panic();
2354 return -EBUSY;
2355 }
f1410647 2356
a2fbb9ea
ET
2357 /* CID needs port number to be encoded int it */
2358 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2359 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2360 HW_CID(bp, cid)));
2361 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2362 if (common)
2363 bp->spq_prod_bd->hdr.type |=
2364 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2365
2366 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2367 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2368
2369 bp->spq_left--;
2370
2371 if (bp->spq_prod_bd == bp->spq_last_bd) {
2372 bp->spq_prod_bd = bp->spq;
2373 bp->spq_prod_idx = 0;
2374 DP(NETIF_MSG_TIMER, "end of spq\n");
2375
2376 } else {
2377 bp->spq_prod_bd++;
2378 bp->spq_prod_idx++;
2379 }
2380
34f80b04 2381 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2382 bp->spq_prod_idx);
2383
34f80b04 2384 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2385 return 0;
2386}
2387
2388/* acquire split MCP access lock register */
4a37fb66 2389static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2390{
a2fbb9ea 2391 u32 i, j, val;
34f80b04 2392 int rc = 0;
a2fbb9ea
ET
2393
2394 might_sleep();
2395 i = 100;
2396 for (j = 0; j < i*10; j++) {
2397 val = (1UL << 31);
2398 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2399 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2400 if (val & (1L << 31))
2401 break;
2402
2403 msleep(5);
2404 }
a2fbb9ea 2405 if (!(val & (1L << 31))) {
19680c48 2406 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2407 rc = -EBUSY;
2408 }
2409
2410 return rc;
2411}
2412
4a37fb66
YG
2413/* release split MCP access lock register */
2414static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2415{
2416 u32 val = 0;
2417
2418 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2419}
2420
2421static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2422{
2423 struct host_def_status_block *def_sb = bp->def_status_blk;
2424 u16 rc = 0;
2425
2426 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2427 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2428 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2429 rc |= 1;
2430 }
2431 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2432 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2433 rc |= 2;
2434 }
2435 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2436 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2437 rc |= 4;
2438 }
2439 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2440 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2441 rc |= 8;
2442 }
2443 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2444 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2445 rc |= 16;
2446 }
2447 return rc;
2448}
2449
2450/*
2451 * slow path service functions
2452 */
2453
2454static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2455{
34f80b04 2456 int port = BP_PORT(bp);
5c862848
EG
2457 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2458 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2459 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2460 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2461 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2462 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2463 u32 aeu_mask;
a2fbb9ea 2464
a2fbb9ea
ET
2465 if (bp->attn_state & asserted)
2466 BNX2X_ERR("IGU ERROR\n");
2467
3fcaf2e5
EG
2468 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2469 aeu_mask = REG_RD(bp, aeu_addr);
2470
a2fbb9ea 2471 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2472 aeu_mask, asserted);
2473 aeu_mask &= ~(asserted & 0xff);
2474 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2475
3fcaf2e5
EG
2476 REG_WR(bp, aeu_addr, aeu_mask);
2477 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2478
3fcaf2e5 2479 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2480 bp->attn_state |= asserted;
3fcaf2e5 2481 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2482
2483 if (asserted & ATTN_HARD_WIRED_MASK) {
2484 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2485
877e9aa4
ET
2486 /* save nig interrupt mask */
2487 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2488 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2489
c18487ee 2490 bnx2x_link_attn(bp);
a2fbb9ea
ET
2491
2492 /* handle unicore attn? */
2493 }
2494 if (asserted & ATTN_SW_TIMER_4_FUNC)
2495 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2496
2497 if (asserted & GPIO_2_FUNC)
2498 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2499
2500 if (asserted & GPIO_3_FUNC)
2501 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2502
2503 if (asserted & GPIO_4_FUNC)
2504 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2505
2506 if (port == 0) {
2507 if (asserted & ATTN_GENERAL_ATTN_1) {
2508 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2509 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2510 }
2511 if (asserted & ATTN_GENERAL_ATTN_2) {
2512 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2513 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2514 }
2515 if (asserted & ATTN_GENERAL_ATTN_3) {
2516 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2517 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2518 }
2519 } else {
2520 if (asserted & ATTN_GENERAL_ATTN_4) {
2521 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2522 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2523 }
2524 if (asserted & ATTN_GENERAL_ATTN_5) {
2525 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2526 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2527 }
2528 if (asserted & ATTN_GENERAL_ATTN_6) {
2529 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2530 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2531 }
2532 }
2533
2534 } /* if hardwired */
2535
5c862848
EG
2536 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2537 asserted, hc_addr);
2538 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2539
2540 /* now set back the mask */
2541 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2542 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2543}
2544
877e9aa4 2545static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2546{
34f80b04 2547 int port = BP_PORT(bp);
877e9aa4
ET
2548 int reg_offset;
2549 u32 val;
2550
34f80b04
EG
2551 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2552 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2553
34f80b04 2554 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2555
2556 val = REG_RD(bp, reg_offset);
2557 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2558 REG_WR(bp, reg_offset, val);
2559
2560 BNX2X_ERR("SPIO5 hw attention\n");
2561
34f80b04 2562 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2563 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2564 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2565 /* Fan failure attention */
2566
17de50b7 2567 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2568 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2569 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2570 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2571 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2572 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2573 /* mark the failure */
c18487ee 2574 bp->link_params.ext_phy_config &=
877e9aa4 2575 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2576 bp->link_params.ext_phy_config |=
877e9aa4
ET
2577 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2578 SHMEM_WR(bp,
2579 dev_info.port_hw_config[port].
2580 external_phy_config,
c18487ee 2581 bp->link_params.ext_phy_config);
877e9aa4
ET
2582 /* log the failure */
2583 printk(KERN_ERR PFX "Fan Failure on Network"
2584 " Controller %s has caused the driver to"
2585 " shutdown the card to prevent permanent"
2586 " damage. Please contact Dell Support for"
2587 " assistance\n", bp->dev->name);
2588 break;
2589
2590 default:
2591 break;
2592 }
2593 }
34f80b04
EG
2594
2595 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2596
2597 val = REG_RD(bp, reg_offset);
2598 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2599 REG_WR(bp, reg_offset, val);
2600
2601 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2602 (attn & HW_INTERRUT_ASSERT_SET_0));
2603 bnx2x_panic();
2604 }
877e9aa4
ET
2605}
2606
2607static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2608{
2609 u32 val;
2610
2611 if (attn & BNX2X_DOORQ_ASSERT) {
2612
2613 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2614 BNX2X_ERR("DB hw attention 0x%x\n", val);
2615 /* DORQ discard attention */
2616 if (val & 0x2)
2617 BNX2X_ERR("FATAL error from DORQ\n");
2618 }
34f80b04
EG
2619
2620 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2621
2622 int port = BP_PORT(bp);
2623 int reg_offset;
2624
2625 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2626 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2627
2628 val = REG_RD(bp, reg_offset);
2629 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2630 REG_WR(bp, reg_offset, val);
2631
2632 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2633 (attn & HW_INTERRUT_ASSERT_SET_1));
2634 bnx2x_panic();
2635 }
877e9aa4
ET
2636}
2637
2638static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2639{
2640 u32 val;
2641
2642 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2643
2644 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2645 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2646 /* CFC error attention */
2647 if (val & 0x2)
2648 BNX2X_ERR("FATAL error from CFC\n");
2649 }
2650
2651 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2652
2653 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2654 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2655 /* RQ_USDMDP_FIFO_OVERFLOW */
2656 if (val & 0x18000)
2657 BNX2X_ERR("FATAL error from PXP\n");
2658 }
34f80b04
EG
2659
2660 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2661
2662 int port = BP_PORT(bp);
2663 int reg_offset;
2664
2665 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2666 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2667
2668 val = REG_RD(bp, reg_offset);
2669 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2670 REG_WR(bp, reg_offset, val);
2671
2672 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2673 (attn & HW_INTERRUT_ASSERT_SET_2));
2674 bnx2x_panic();
2675 }
877e9aa4
ET
2676}
2677
2678static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2679{
34f80b04
EG
2680 u32 val;
2681
877e9aa4
ET
2682 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2683
34f80b04
EG
2684 if (attn & BNX2X_PMF_LINK_ASSERT) {
2685 int func = BP_FUNC(bp);
2686
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2688 bnx2x__link_status_update(bp);
2689 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2690 DRV_STATUS_PMF)
2691 bnx2x_pmf_update(bp);
2692
2693 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2694
2695 BNX2X_ERR("MC assert!\n");
2696 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2697 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2698 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2699 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2700 bnx2x_panic();
2701
2702 } else if (attn & BNX2X_MCP_ASSERT) {
2703
2704 BNX2X_ERR("MCP assert!\n");
2705 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2706 bnx2x_fw_dump(bp);
877e9aa4
ET
2707
2708 } else
2709 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2710 }
2711
2712 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2713 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2714 if (attn & BNX2X_GRC_TIMEOUT) {
2715 val = CHIP_IS_E1H(bp) ?
2716 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2717 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2718 }
2719 if (attn & BNX2X_GRC_RSV) {
2720 val = CHIP_IS_E1H(bp) ?
2721 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2722 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2723 }
877e9aa4 2724 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2725 }
2726}
2727
2728static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2729{
a2fbb9ea
ET
2730 struct attn_route attn;
2731 struct attn_route group_mask;
34f80b04 2732 int port = BP_PORT(bp);
877e9aa4 2733 int index;
a2fbb9ea
ET
2734 u32 reg_addr;
2735 u32 val;
3fcaf2e5 2736 u32 aeu_mask;
a2fbb9ea
ET
2737
2738 /* need to take HW lock because MCP or other port might also
2739 try to handle this event */
4a37fb66 2740 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2741
2742 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2743 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2744 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2745 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2746 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2747 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2748
2749 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2750 if (deasserted & (1 << index)) {
2751 group_mask = bp->attn_group[index];
2752
34f80b04
EG
2753 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2754 index, group_mask.sig[0], group_mask.sig[1],
2755 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2756
877e9aa4
ET
2757 bnx2x_attn_int_deasserted3(bp,
2758 attn.sig[3] & group_mask.sig[3]);
2759 bnx2x_attn_int_deasserted1(bp,
2760 attn.sig[1] & group_mask.sig[1]);
2761 bnx2x_attn_int_deasserted2(bp,
2762 attn.sig[2] & group_mask.sig[2]);
2763 bnx2x_attn_int_deasserted0(bp,
2764 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2765
a2fbb9ea
ET
2766 if ((attn.sig[0] & group_mask.sig[0] &
2767 HW_PRTY_ASSERT_SET_0) ||
2768 (attn.sig[1] & group_mask.sig[1] &
2769 HW_PRTY_ASSERT_SET_1) ||
2770 (attn.sig[2] & group_mask.sig[2] &
2771 HW_PRTY_ASSERT_SET_2))
6378c025 2772 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2773 }
2774 }
2775
4a37fb66 2776 bnx2x_release_alr(bp);
a2fbb9ea 2777
5c862848 2778 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2779
2780 val = ~deasserted;
3fcaf2e5
EG
2781 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2782 val, reg_addr);
5c862848 2783 REG_WR(bp, reg_addr, val);
a2fbb9ea 2784
a2fbb9ea 2785 if (~bp->attn_state & deasserted)
3fcaf2e5 2786 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2787
2788 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2789 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2790
3fcaf2e5
EG
2791 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2792 aeu_mask = REG_RD(bp, reg_addr);
2793
2794 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2795 aeu_mask, deasserted);
2796 aeu_mask |= (deasserted & 0xff);
2797 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2798
3fcaf2e5
EG
2799 REG_WR(bp, reg_addr, aeu_mask);
2800 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2801
2802 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2803 bp->attn_state &= ~deasserted;
2804 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2805}
2806
2807static void bnx2x_attn_int(struct bnx2x *bp)
2808{
2809 /* read local copy of bits */
2810 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2811 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2812 u32 attn_state = bp->attn_state;
2813
2814 /* look for changed bits */
2815 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2816 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2817
2818 DP(NETIF_MSG_HW,
2819 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2820 attn_bits, attn_ack, asserted, deasserted);
2821
2822 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2823 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2824
2825 /* handle bits that were raised */
2826 if (asserted)
2827 bnx2x_attn_int_asserted(bp, asserted);
2828
2829 if (deasserted)
2830 bnx2x_attn_int_deasserted(bp, deasserted);
2831}
2832
2833static void bnx2x_sp_task(struct work_struct *work)
2834{
1cf167f2 2835 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2836 u16 status;
2837
34f80b04 2838
a2fbb9ea
ET
2839 /* Return here if interrupt is disabled */
2840 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2841 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2842 return;
2843 }
2844
2845 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2846/* if (status == 0) */
2847/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2848
3196a88a 2849 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2850
877e9aa4
ET
2851 /* HW attentions */
2852 if (status & 0x1)
a2fbb9ea 2853 bnx2x_attn_int(bp);
a2fbb9ea 2854
bb2a0f7a
YG
2855 /* CStorm events: query_stats, port delete ramrod */
2856 if (status & 0x2)
2857 bp->stats_pending = 0;
2858
a2fbb9ea
ET
2859 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2860 IGU_INT_NOP, 1);
2861 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2862 IGU_INT_NOP, 1);
2863 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2864 IGU_INT_NOP, 1);
2865 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2866 IGU_INT_NOP, 1);
2867 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2868 IGU_INT_ENABLE, 1);
877e9aa4 2869
a2fbb9ea
ET
2870}
2871
2872static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2873{
2874 struct net_device *dev = dev_instance;
2875 struct bnx2x *bp = netdev_priv(dev);
2876
2877 /* Return here if interrupt is disabled */
2878 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2879 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2880 return IRQ_HANDLED;
2881 }
2882
877e9aa4 2883 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2884
2885#ifdef BNX2X_STOP_ON_ERROR
2886 if (unlikely(bp->panic))
2887 return IRQ_HANDLED;
2888#endif
2889
1cf167f2 2890 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2891
2892 return IRQ_HANDLED;
2893}
2894
2895/* end of slow path */
2896
2897/* Statistics */
2898
2899/****************************************************************************
2900* Macros
2901****************************************************************************/
2902
a2fbb9ea
ET
2903/* sum[hi:lo] += add[hi:lo] */
2904#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2905 do { \
2906 s_lo += a_lo; \
2907 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2908 } while (0)
2909
2910/* difference = minuend - subtrahend */
2911#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2912 do { \
bb2a0f7a
YG
2913 if (m_lo < s_lo) { \
2914 /* underflow */ \
a2fbb9ea 2915 d_hi = m_hi - s_hi; \
bb2a0f7a 2916 if (d_hi > 0) { \
6378c025 2917 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2918 d_hi--; \
2919 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2920 } else { \
6378c025 2921 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2922 d_hi = 0; \
2923 d_lo = 0; \
2924 } \
bb2a0f7a
YG
2925 } else { \
2926 /* m_lo >= s_lo */ \
a2fbb9ea 2927 if (m_hi < s_hi) { \
bb2a0f7a
YG
2928 d_hi = 0; \
2929 d_lo = 0; \
2930 } else { \
6378c025 2931 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2932 d_hi = m_hi - s_hi; \
2933 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2934 } \
2935 } \
2936 } while (0)
2937
bb2a0f7a 2938#define UPDATE_STAT64(s, t) \
a2fbb9ea 2939 do { \
bb2a0f7a
YG
2940 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2941 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2942 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2943 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2944 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2945 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2946 } while (0)
2947
bb2a0f7a 2948#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2949 do { \
bb2a0f7a
YG
2950 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2951 diff.lo, new->s##_lo, old->s##_lo); \
2952 ADD_64(estats->t##_hi, diff.hi, \
2953 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2954 } while (0)
2955
2956/* sum[hi:lo] += add */
2957#define ADD_EXTEND_64(s_hi, s_lo, a) \
2958 do { \
2959 s_lo += a; \
2960 s_hi += (s_lo < a) ? 1 : 0; \
2961 } while (0)
2962
bb2a0f7a 2963#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2964 do { \
bb2a0f7a
YG
2965 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2966 pstats->mac_stx[1].s##_lo, \
2967 new->s); \
a2fbb9ea
ET
2968 } while (0)
2969
bb2a0f7a 2970#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2971 do { \
2972 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2973 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2974 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2975 } while (0)
2976
2977#define UPDATE_EXTEND_XSTAT(s, t) \
2978 do { \
2979 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2980 old_xclient->s = le32_to_cpu(xclient->s); \
2981 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2982 } while (0)
2983
2984/*
2985 * General service functions
2986 */
2987
2988static inline long bnx2x_hilo(u32 *hiref)
2989{
2990 u32 lo = *(hiref + 1);
2991#if (BITS_PER_LONG == 64)
2992 u32 hi = *hiref;
2993
2994 return HILO_U64(hi, lo);
2995#else
2996 return lo;
2997#endif
2998}
2999
3000/*
3001 * Init service functions
3002 */
3003
bb2a0f7a
YG
3004static void bnx2x_storm_stats_post(struct bnx2x *bp)
3005{
3006 if (!bp->stats_pending) {
3007 struct eth_query_ramrod_data ramrod_data = {0};
3008 int rc;
3009
3010 ramrod_data.drv_counter = bp->stats_counter++;
3011 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3012 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3013
3014 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3015 ((u32 *)&ramrod_data)[1],
3016 ((u32 *)&ramrod_data)[0], 0);
3017 if (rc == 0) {
3018 /* stats ramrod has it's own slot on the spq */
3019 bp->spq_left++;
3020 bp->stats_pending = 1;
3021 }
3022 }
3023}
3024
3025static void bnx2x_stats_init(struct bnx2x *bp)
3026{
3027 int port = BP_PORT(bp);
3028
3029 bp->executer_idx = 0;
3030 bp->stats_counter = 0;
3031
3032 /* port stats */
3033 if (!BP_NOMCP(bp))
3034 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3035 else
3036 bp->port.port_stx = 0;
3037 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3038
3039 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3040 bp->port.old_nig_stats.brb_discard =
3041 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3042 bp->port.old_nig_stats.brb_truncate =
3043 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3044 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3045 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3046 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3047 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3048
3049 /* function stats */
3050 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3051 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3052 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3053 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3054
3055 bp->stats_state = STATS_STATE_DISABLED;
3056 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3057 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3058}
3059
3060static void bnx2x_hw_stats_post(struct bnx2x *bp)
3061{
3062 struct dmae_command *dmae = &bp->stats_dmae;
3063 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3064
3065 *stats_comp = DMAE_COMP_VAL;
3066
3067 /* loader */
3068 if (bp->executer_idx) {
3069 int loader_idx = PMF_DMAE_C(bp);
3070
3071 memset(dmae, 0, sizeof(struct dmae_command));
3072
3073 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3074 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3075 DMAE_CMD_DST_RESET |
3076#ifdef __BIG_ENDIAN
3077 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3078#else
3079 DMAE_CMD_ENDIANITY_DW_SWAP |
3080#endif
3081 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3082 DMAE_CMD_PORT_0) |
3083 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3084 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3085 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3086 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3087 sizeof(struct dmae_command) *
3088 (loader_idx + 1)) >> 2;
3089 dmae->dst_addr_hi = 0;
3090 dmae->len = sizeof(struct dmae_command) >> 2;
3091 if (CHIP_IS_E1(bp))
3092 dmae->len--;
3093 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3094 dmae->comp_addr_hi = 0;
3095 dmae->comp_val = 1;
3096
3097 *stats_comp = 0;
3098 bnx2x_post_dmae(bp, dmae, loader_idx);
3099
3100 } else if (bp->func_stx) {
3101 *stats_comp = 0;
3102 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3103 }
3104}
3105
3106static int bnx2x_stats_comp(struct bnx2x *bp)
3107{
3108 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3109 int cnt = 10;
3110
3111 might_sleep();
3112 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3113 if (!cnt) {
3114 BNX2X_ERR("timeout waiting for stats finished\n");
3115 break;
3116 }
3117 cnt--;
12469401 3118 msleep(1);
bb2a0f7a
YG
3119 }
3120 return 1;
3121}
3122
3123/*
3124 * Statistics service functions
3125 */
3126
3127static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3128{
3129 struct dmae_command *dmae;
3130 u32 opcode;
3131 int loader_idx = PMF_DMAE_C(bp);
3132 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3133
3134 /* sanity */
3135 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3136 BNX2X_ERR("BUG!\n");
3137 return;
3138 }
3139
3140 bp->executer_idx = 0;
3141
3142 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3143 DMAE_CMD_C_ENABLE |
3144 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3145#ifdef __BIG_ENDIAN
3146 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3147#else
3148 DMAE_CMD_ENDIANITY_DW_SWAP |
3149#endif
3150 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3151 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3152
3153 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3154 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3155 dmae->src_addr_lo = bp->port.port_stx >> 2;
3156 dmae->src_addr_hi = 0;
3157 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3158 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3159 dmae->len = DMAE_LEN32_RD_MAX;
3160 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3161 dmae->comp_addr_hi = 0;
3162 dmae->comp_val = 1;
3163
3164 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3165 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3166 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3167 dmae->src_addr_hi = 0;
7a9b2557
VZ
3168 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3169 DMAE_LEN32_RD_MAX * 4);
3170 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3171 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3172 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3173 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3174 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3175 dmae->comp_val = DMAE_COMP_VAL;
3176
3177 *stats_comp = 0;
3178 bnx2x_hw_stats_post(bp);
3179 bnx2x_stats_comp(bp);
3180}
3181
3182static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3183{
3184 struct dmae_command *dmae;
34f80b04 3185 int port = BP_PORT(bp);
bb2a0f7a 3186 int vn = BP_E1HVN(bp);
a2fbb9ea 3187 u32 opcode;
bb2a0f7a 3188 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3189 u32 mac_addr;
bb2a0f7a
YG
3190 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3191
3192 /* sanity */
3193 if (!bp->link_vars.link_up || !bp->port.pmf) {
3194 BNX2X_ERR("BUG!\n");
3195 return;
3196 }
a2fbb9ea
ET
3197
3198 bp->executer_idx = 0;
bb2a0f7a
YG
3199
3200 /* MCP */
3201 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3202 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3203 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3204#ifdef __BIG_ENDIAN
bb2a0f7a 3205 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3206#else
bb2a0f7a 3207 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3208#endif
bb2a0f7a
YG
3209 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3210 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3211
bb2a0f7a 3212 if (bp->port.port_stx) {
a2fbb9ea
ET
3213
3214 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3215 dmae->opcode = opcode;
bb2a0f7a
YG
3216 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3217 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3218 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3219 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3220 dmae->len = sizeof(struct host_port_stats) >> 2;
3221 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3222 dmae->comp_addr_hi = 0;
3223 dmae->comp_val = 1;
a2fbb9ea
ET
3224 }
3225
bb2a0f7a
YG
3226 if (bp->func_stx) {
3227
3228 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3229 dmae->opcode = opcode;
3230 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3231 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3232 dmae->dst_addr_lo = bp->func_stx >> 2;
3233 dmae->dst_addr_hi = 0;
3234 dmae->len = sizeof(struct host_func_stats) >> 2;
3235 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3236 dmae->comp_addr_hi = 0;
3237 dmae->comp_val = 1;
a2fbb9ea
ET
3238 }
3239
bb2a0f7a 3240 /* MAC */
a2fbb9ea
ET
3241 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3242 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3243 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3244#ifdef __BIG_ENDIAN
3245 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3246#else
3247 DMAE_CMD_ENDIANITY_DW_SWAP |
3248#endif
bb2a0f7a
YG
3249 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3250 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3251
c18487ee 3252 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3253
3254 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3255 NIG_REG_INGRESS_BMAC0_MEM);
3256
3257 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3258 BIGMAC_REGISTER_TX_STAT_GTBYT */
3259 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3260 dmae->opcode = opcode;
3261 dmae->src_addr_lo = (mac_addr +
3262 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3263 dmae->src_addr_hi = 0;
3264 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3265 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3266 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3267 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3268 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3269 dmae->comp_addr_hi = 0;
3270 dmae->comp_val = 1;
3271
3272 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3273 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3275 dmae->opcode = opcode;
3276 dmae->src_addr_lo = (mac_addr +
3277 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3278 dmae->src_addr_hi = 0;
3279 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3280 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3281 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3282 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3283 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3284 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3285 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3286 dmae->comp_addr_hi = 0;
3287 dmae->comp_val = 1;
3288
c18487ee 3289 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3290
3291 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3292
3293 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3294 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3295 dmae->opcode = opcode;
3296 dmae->src_addr_lo = (mac_addr +
3297 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3298 dmae->src_addr_hi = 0;
3299 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3300 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3301 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3302 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3303 dmae->comp_addr_hi = 0;
3304 dmae->comp_val = 1;
3305
3306 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3307 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3308 dmae->opcode = opcode;
3309 dmae->src_addr_lo = (mac_addr +
3310 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3311 dmae->src_addr_hi = 0;
3312 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3313 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3314 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3315 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3316 dmae->len = 1;
3317 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3318 dmae->comp_addr_hi = 0;
3319 dmae->comp_val = 1;
3320
3321 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3322 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3323 dmae->opcode = opcode;
3324 dmae->src_addr_lo = (mac_addr +
3325 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3326 dmae->src_addr_hi = 0;
3327 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3328 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3329 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3330 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3331 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3332 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3333 dmae->comp_addr_hi = 0;
3334 dmae->comp_val = 1;
3335 }
3336
3337 /* NIG */
bb2a0f7a
YG
3338 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339 dmae->opcode = opcode;
3340 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3341 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3342 dmae->src_addr_hi = 0;
3343 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3344 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3345 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3346 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3347 dmae->comp_addr_hi = 0;
3348 dmae->comp_val = 1;
3349
3350 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3351 dmae->opcode = opcode;
3352 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3353 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3354 dmae->src_addr_hi = 0;
3355 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3356 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3357 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3358 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3359 dmae->len = (2*sizeof(u32)) >> 2;
3360 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3361 dmae->comp_addr_hi = 0;
3362 dmae->comp_val = 1;
3363
a2fbb9ea
ET
3364 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3365 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3366 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3367 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3368#ifdef __BIG_ENDIAN
3369 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3370#else
3371 DMAE_CMD_ENDIANITY_DW_SWAP |
3372#endif
bb2a0f7a
YG
3373 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3374 (vn << DMAE_CMD_E1HVN_SHIFT));
3375 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3376 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3377 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3378 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3379 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3380 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3381 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3382 dmae->len = (2*sizeof(u32)) >> 2;
3383 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3384 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3385 dmae->comp_val = DMAE_COMP_VAL;
3386
3387 *stats_comp = 0;
a2fbb9ea
ET
3388}
3389
bb2a0f7a 3390static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3391{
bb2a0f7a
YG
3392 struct dmae_command *dmae = &bp->stats_dmae;
3393 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3394
bb2a0f7a
YG
3395 /* sanity */
3396 if (!bp->func_stx) {
3397 BNX2X_ERR("BUG!\n");
3398 return;
3399 }
a2fbb9ea 3400
bb2a0f7a
YG
3401 bp->executer_idx = 0;
3402 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3403
bb2a0f7a
YG
3404 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3405 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3406 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3407#ifdef __BIG_ENDIAN
3408 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3409#else
3410 DMAE_CMD_ENDIANITY_DW_SWAP |
3411#endif
3412 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3413 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3414 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3415 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3416 dmae->dst_addr_lo = bp->func_stx >> 2;
3417 dmae->dst_addr_hi = 0;
3418 dmae->len = sizeof(struct host_func_stats) >> 2;
3419 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3420 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3421 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3422
bb2a0f7a
YG
3423 *stats_comp = 0;
3424}
a2fbb9ea 3425
bb2a0f7a
YG
3426static void bnx2x_stats_start(struct bnx2x *bp)
3427{
3428 if (bp->port.pmf)
3429 bnx2x_port_stats_init(bp);
3430
3431 else if (bp->func_stx)
3432 bnx2x_func_stats_init(bp);
3433
3434 bnx2x_hw_stats_post(bp);
3435 bnx2x_storm_stats_post(bp);
3436}
3437
3438static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3439{
3440 bnx2x_stats_comp(bp);
3441 bnx2x_stats_pmf_update(bp);
3442 bnx2x_stats_start(bp);
3443}
3444
3445static void bnx2x_stats_restart(struct bnx2x *bp)
3446{
3447 bnx2x_stats_comp(bp);
3448 bnx2x_stats_start(bp);
3449}
3450
3451static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3452{
3453 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3454 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3455 struct regpair diff;
3456
3457 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3458 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3459 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3460 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3461 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3462 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3463 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3464 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3465 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3466 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3467 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3468 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3469 UPDATE_STAT64(tx_stat_gt127,
3470 tx_stat_etherstatspkts65octetsto127octets);
3471 UPDATE_STAT64(tx_stat_gt255,
3472 tx_stat_etherstatspkts128octetsto255octets);
3473 UPDATE_STAT64(tx_stat_gt511,
3474 tx_stat_etherstatspkts256octetsto511octets);
3475 UPDATE_STAT64(tx_stat_gt1023,
3476 tx_stat_etherstatspkts512octetsto1023octets);
3477 UPDATE_STAT64(tx_stat_gt1518,
3478 tx_stat_etherstatspkts1024octetsto1522octets);
3479 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3480 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3481 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3482 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3483 UPDATE_STAT64(tx_stat_gterr,
3484 tx_stat_dot3statsinternalmactransmiterrors);
3485 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3486}
3487
3488static void bnx2x_emac_stats_update(struct bnx2x *bp)
3489{
3490 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3491 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3492
3493 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3494 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3495 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3496 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3497 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3498 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3499 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3500 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3501 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3502 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3503 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3504 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3505 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3506 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3507 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3508 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3509 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3511 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3512 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3513 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3514 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3515 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3516 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3517 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3518 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3519 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3520 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3521 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3522 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3523 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3524}
3525
3526static int bnx2x_hw_stats_update(struct bnx2x *bp)
3527{
3528 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3529 struct nig_stats *old = &(bp->port.old_nig_stats);
3530 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3531 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3532 struct regpair diff;
3533
3534 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3535 bnx2x_bmac_stats_update(bp);
3536
3537 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3538 bnx2x_emac_stats_update(bp);
3539
3540 else { /* unreached */
3541 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3542 return -1;
3543 }
a2fbb9ea 3544
bb2a0f7a
YG
3545 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3546 new->brb_discard - old->brb_discard);
66e855f3
YG
3547 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3548 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3549
bb2a0f7a
YG
3550 UPDATE_STAT64_NIG(egress_mac_pkt0,
3551 etherstatspkts1024octetsto1522octets);
3552 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3553
bb2a0f7a 3554 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3555
bb2a0f7a
YG
3556 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3557 sizeof(struct mac_stx));
3558 estats->brb_drop_hi = pstats->brb_drop_hi;
3559 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3560
bb2a0f7a 3561 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3562
bb2a0f7a 3563 return 0;
a2fbb9ea
ET
3564}
3565
bb2a0f7a 3566static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3567{
3568 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3569 int cl_id = BP_CL_ID(bp);
3570 struct tstorm_per_port_stats *tport =
3571 &stats->tstorm_common.port_statistics;
a2fbb9ea 3572 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3573 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3574 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3575 struct xstorm_per_client_stats *xclient =
3576 &stats->xstorm_common.client_statistics[cl_id];
3577 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3578 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3579 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3580 u32 diff;
3581
bb2a0f7a
YG
3582 /* are storm stats valid? */
3583 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3584 bp->stats_counter) {
3585 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3586 " tstorm counter (%d) != stats_counter (%d)\n",
3587 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3588 return -1;
3589 }
bb2a0f7a
YG
3590 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3591 bp->stats_counter) {
3592 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3593 " xstorm counter (%d) != stats_counter (%d)\n",
3594 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3595 return -2;
3596 }
a2fbb9ea 3597
bb2a0f7a
YG
3598 fstats->total_bytes_received_hi =
3599 fstats->valid_bytes_received_hi =
a2fbb9ea 3600 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3601 fstats->total_bytes_received_lo =
3602 fstats->valid_bytes_received_lo =
a2fbb9ea 3603 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3604
3605 estats->error_bytes_received_hi =
3606 le32_to_cpu(tclient->rcv_error_bytes.hi);
3607 estats->error_bytes_received_lo =
3608 le32_to_cpu(tclient->rcv_error_bytes.lo);
3609 ADD_64(estats->error_bytes_received_hi,
3610 estats->rx_stat_ifhcinbadoctets_hi,
3611 estats->error_bytes_received_lo,
3612 estats->rx_stat_ifhcinbadoctets_lo);
3613
3614 ADD_64(fstats->total_bytes_received_hi,
3615 estats->error_bytes_received_hi,
3616 fstats->total_bytes_received_lo,
3617 estats->error_bytes_received_lo);
3618
3619 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3620 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3621 total_multicast_packets_received);
a2fbb9ea 3622 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3623 total_broadcast_packets_received);
3624
3625 fstats->total_bytes_transmitted_hi =
3626 le32_to_cpu(xclient->total_sent_bytes.hi);
3627 fstats->total_bytes_transmitted_lo =
3628 le32_to_cpu(xclient->total_sent_bytes.lo);
3629
3630 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3631 total_unicast_packets_transmitted);
3632 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3633 total_multicast_packets_transmitted);
3634 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3635 total_broadcast_packets_transmitted);
3636
3637 memcpy(estats, &(fstats->total_bytes_received_hi),
3638 sizeof(struct host_func_stats) - 2*sizeof(u32));
3639
3640 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3641 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3642 estats->brb_truncate_discard =
3643 le32_to_cpu(tport->brb_truncate_discard);
3644 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3645
3646 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3647 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3648 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3649 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3650 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3651 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3652 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3653 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3654 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3655 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3656 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3657 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3658 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3659
bb2a0f7a
YG
3660 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3661 old_tclient->packets_too_big_discard =
a2fbb9ea 3662 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3663 estats->no_buff_discard =
3664 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3665 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3666
3667 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3668 old_xclient->unicast_bytes_sent.hi =
3669 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3670 old_xclient->unicast_bytes_sent.lo =
3671 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3672 old_xclient->multicast_bytes_sent.hi =
3673 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3674 old_xclient->multicast_bytes_sent.lo =
3675 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3676 old_xclient->broadcast_bytes_sent.hi =
3677 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3678 old_xclient->broadcast_bytes_sent.lo =
3679 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3680
3681 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3682
3683 return 0;
3684}
3685
bb2a0f7a 3686static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3687{
bb2a0f7a
YG
3688 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3689 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3690 struct net_device_stats *nstats = &bp->dev->stats;
3691
3692 nstats->rx_packets =
3693 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3694 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3695 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3696
3697 nstats->tx_packets =
3698 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3699 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3700 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3701
bb2a0f7a 3702 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3703
0e39e645 3704 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3705
bb2a0f7a
YG
3706 nstats->rx_dropped = old_tclient->checksum_discard +
3707 estats->mac_discard;
a2fbb9ea
ET
3708 nstats->tx_dropped = 0;
3709
3710 nstats->multicast =
3711 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3712
bb2a0f7a
YG
3713 nstats->collisions =
3714 estats->tx_stat_dot3statssinglecollisionframes_lo +
3715 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3716 estats->tx_stat_dot3statslatecollisions_lo +
3717 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3718
bb2a0f7a
YG
3719 estats->jabber_packets_received =
3720 old_tclient->packets_too_big_discard +
3721 estats->rx_stat_dot3statsframestoolong_lo;
3722
3723 nstats->rx_length_errors =
3724 estats->rx_stat_etherstatsundersizepkts_lo +
3725 estats->jabber_packets_received;
66e855f3 3726 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3727 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3728 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3729 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3730 nstats->rx_missed_errors = estats->xxoverflow_discard;
3731
3732 nstats->rx_errors = nstats->rx_length_errors +
3733 nstats->rx_over_errors +
3734 nstats->rx_crc_errors +
3735 nstats->rx_frame_errors +
0e39e645
ET
3736 nstats->rx_fifo_errors +
3737 nstats->rx_missed_errors;
a2fbb9ea 3738
bb2a0f7a
YG
3739 nstats->tx_aborted_errors =
3740 estats->tx_stat_dot3statslatecollisions_lo +
3741 estats->tx_stat_dot3statsexcessivecollisions_lo;
3742 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3743 nstats->tx_fifo_errors = 0;
3744 nstats->tx_heartbeat_errors = 0;
3745 nstats->tx_window_errors = 0;
3746
3747 nstats->tx_errors = nstats->tx_aborted_errors +
3748 nstats->tx_carrier_errors;
a2fbb9ea
ET
3749}
3750
bb2a0f7a 3751static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3752{
bb2a0f7a
YG
3753 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3754 int update = 0;
a2fbb9ea 3755
bb2a0f7a
YG
3756 if (*stats_comp != DMAE_COMP_VAL)
3757 return;
3758
3759 if (bp->port.pmf)
3760 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3761
bb2a0f7a 3762 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3763
bb2a0f7a
YG
3764 if (update)
3765 bnx2x_net_stats_update(bp);
a2fbb9ea 3766
bb2a0f7a
YG
3767 else {
3768 if (bp->stats_pending) {
3769 bp->stats_pending++;
3770 if (bp->stats_pending == 3) {
3771 BNX2X_ERR("stats not updated for 3 times\n");
3772 bnx2x_panic();
3773 return;
3774 }
3775 }
a2fbb9ea
ET
3776 }
3777
3778 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3779 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3780 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3781 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3782 int i;
a2fbb9ea
ET
3783
3784 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3785 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3786 " tx pkt (%lx)\n",
3787 bnx2x_tx_avail(bp->fp),
7a9b2557 3788 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3789 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3790 " rx pkt (%lx)\n",
7a9b2557
VZ
3791 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3792 bp->fp->rx_comp_cons),
3793 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3794 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3795 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3796 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3797 printk(KERN_DEBUG "tstats: checksum_discard %u "
3798 "packets_too_big_discard %u no_buff_discard %u "
3799 "mac_discard %u mac_filter_discard %u "
3800 "xxovrflow_discard %u brb_truncate_discard %u "
3801 "ttl0_discard %u\n",
bb2a0f7a
YG
3802 old_tclient->checksum_discard,
3803 old_tclient->packets_too_big_discard,
3804 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3805 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3806 estats->brb_truncate_discard,
3807 old_tclient->ttl0_discard);
a2fbb9ea
ET
3808
3809 for_each_queue(bp, i) {
3810 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3811 bnx2x_fp(bp, i, tx_pkt),
3812 bnx2x_fp(bp, i, rx_pkt),
3813 bnx2x_fp(bp, i, rx_calls));
3814 }
3815 }
3816
bb2a0f7a
YG
3817 bnx2x_hw_stats_post(bp);
3818 bnx2x_storm_stats_post(bp);
3819}
a2fbb9ea 3820
bb2a0f7a
YG
3821static void bnx2x_port_stats_stop(struct bnx2x *bp)
3822{
3823 struct dmae_command *dmae;
3824 u32 opcode;
3825 int loader_idx = PMF_DMAE_C(bp);
3826 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3827
bb2a0f7a 3828 bp->executer_idx = 0;
a2fbb9ea 3829
bb2a0f7a
YG
3830 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3831 DMAE_CMD_C_ENABLE |
3832 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3833#ifdef __BIG_ENDIAN
bb2a0f7a 3834 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3835#else
bb2a0f7a 3836 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3837#endif
bb2a0f7a
YG
3838 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3839 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3840
3841 if (bp->port.port_stx) {
3842
3843 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3844 if (bp->func_stx)
3845 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3846 else
3847 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3848 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3849 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3850 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3851 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3852 dmae->len = sizeof(struct host_port_stats) >> 2;
3853 if (bp->func_stx) {
3854 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3855 dmae->comp_addr_hi = 0;
3856 dmae->comp_val = 1;
3857 } else {
3858 dmae->comp_addr_lo =
3859 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3860 dmae->comp_addr_hi =
3861 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3862 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3863
bb2a0f7a
YG
3864 *stats_comp = 0;
3865 }
a2fbb9ea
ET
3866 }
3867
bb2a0f7a
YG
3868 if (bp->func_stx) {
3869
3870 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3871 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3872 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3873 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3874 dmae->dst_addr_lo = bp->func_stx >> 2;
3875 dmae->dst_addr_hi = 0;
3876 dmae->len = sizeof(struct host_func_stats) >> 2;
3877 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3878 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3879 dmae->comp_val = DMAE_COMP_VAL;
3880
3881 *stats_comp = 0;
a2fbb9ea 3882 }
bb2a0f7a
YG
3883}
3884
3885static void bnx2x_stats_stop(struct bnx2x *bp)
3886{
3887 int update = 0;
3888
3889 bnx2x_stats_comp(bp);
3890
3891 if (bp->port.pmf)
3892 update = (bnx2x_hw_stats_update(bp) == 0);
3893
3894 update |= (bnx2x_storm_stats_update(bp) == 0);
3895
3896 if (update) {
3897 bnx2x_net_stats_update(bp);
a2fbb9ea 3898
bb2a0f7a
YG
3899 if (bp->port.pmf)
3900 bnx2x_port_stats_stop(bp);
3901
3902 bnx2x_hw_stats_post(bp);
3903 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3904 }
3905}
3906
bb2a0f7a
YG
3907static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3908{
3909}
3910
3911static const struct {
3912 void (*action)(struct bnx2x *bp);
3913 enum bnx2x_stats_state next_state;
3914} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3915/* state event */
3916{
3917/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3918/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3919/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3920/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3921},
3922{
3923/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3924/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3925/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3926/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3927}
3928};
3929
3930static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3931{
3932 enum bnx2x_stats_state state = bp->stats_state;
3933
3934 bnx2x_stats_stm[state][event].action(bp);
3935 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3936
3937 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3938 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3939 state, event, bp->stats_state);
3940}
3941
a2fbb9ea
ET
3942static void bnx2x_timer(unsigned long data)
3943{
3944 struct bnx2x *bp = (struct bnx2x *) data;
3945
3946 if (!netif_running(bp->dev))
3947 return;
3948
3949 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3950 goto timer_restart;
a2fbb9ea
ET
3951
3952 if (poll) {
3953 struct bnx2x_fastpath *fp = &bp->fp[0];
3954 int rc;
3955
3956 bnx2x_tx_int(fp, 1000);
3957 rc = bnx2x_rx_int(fp, 1000);
3958 }
3959
34f80b04
EG
3960 if (!BP_NOMCP(bp)) {
3961 int func = BP_FUNC(bp);
a2fbb9ea
ET
3962 u32 drv_pulse;
3963 u32 mcp_pulse;
3964
3965 ++bp->fw_drv_pulse_wr_seq;
3966 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3967 /* TBD - add SYSTEM_TIME */
3968 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3969 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3970
34f80b04 3971 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3972 MCP_PULSE_SEQ_MASK);
3973 /* The delta between driver pulse and mcp response
3974 * should be 1 (before mcp response) or 0 (after mcp response)
3975 */
3976 if ((drv_pulse != mcp_pulse) &&
3977 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3978 /* someone lost a heartbeat... */
3979 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3980 drv_pulse, mcp_pulse);
3981 }
3982 }
3983
bb2a0f7a
YG
3984 if ((bp->state == BNX2X_STATE_OPEN) ||
3985 (bp->state == BNX2X_STATE_DISABLED))
3986 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3987
f1410647 3988timer_restart:
a2fbb9ea
ET
3989 mod_timer(&bp->timer, jiffies + bp->current_interval);
3990}
3991
3992/* end of Statistics */
3993
3994/* nic init */
3995
3996/*
3997 * nic init service functions
3998 */
3999
34f80b04 4000static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4001{
34f80b04
EG
4002 int port = BP_PORT(bp);
4003
4004 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4005 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4006 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4007 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4008 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4009 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4010}
4011
5c862848
EG
4012static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4013 dma_addr_t mapping, int sb_id)
34f80b04
EG
4014{
4015 int port = BP_PORT(bp);
bb2a0f7a 4016 int func = BP_FUNC(bp);
a2fbb9ea 4017 int index;
34f80b04 4018 u64 section;
a2fbb9ea
ET
4019
4020 /* USTORM */
4021 section = ((u64)mapping) + offsetof(struct host_status_block,
4022 u_status_block);
34f80b04 4023 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4024
4025 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4026 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4027 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4028 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4029 U64_HI(section));
bb2a0f7a
YG
4030 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4031 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4032
4033 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4034 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4035 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4036
4037 /* CSTORM */
4038 section = ((u64)mapping) + offsetof(struct host_status_block,
4039 c_status_block);
34f80b04 4040 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4041
4042 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4043 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4044 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4045 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4046 U64_HI(section));
7a9b2557
VZ
4047 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4048 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4049
4050 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4051 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4052 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4053
4054 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4055}
4056
4057static void bnx2x_zero_def_sb(struct bnx2x *bp)
4058{
4059 int func = BP_FUNC(bp);
a2fbb9ea 4060
34f80b04
EG
4061 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4062 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4063 sizeof(struct ustorm_def_status_block)/4);
4064 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4065 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4066 sizeof(struct cstorm_def_status_block)/4);
4067 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4068 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4069 sizeof(struct xstorm_def_status_block)/4);
4070 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4071 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4072 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4073}
4074
4075static void bnx2x_init_def_sb(struct bnx2x *bp,
4076 struct host_def_status_block *def_sb,
34f80b04 4077 dma_addr_t mapping, int sb_id)
a2fbb9ea 4078{
34f80b04
EG
4079 int port = BP_PORT(bp);
4080 int func = BP_FUNC(bp);
a2fbb9ea
ET
4081 int index, val, reg_offset;
4082 u64 section;
4083
4084 /* ATTN */
4085 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4086 atten_status_block);
34f80b04 4087 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4088
49d66772
ET
4089 bp->attn_state = 0;
4090
a2fbb9ea
ET
4091 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4092 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4093
34f80b04 4094 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4095 bp->attn_group[index].sig[0] = REG_RD(bp,
4096 reg_offset + 0x10*index);
4097 bp->attn_group[index].sig[1] = REG_RD(bp,
4098 reg_offset + 0x4 + 0x10*index);
4099 bp->attn_group[index].sig[2] = REG_RD(bp,
4100 reg_offset + 0x8 + 0x10*index);
4101 bp->attn_group[index].sig[3] = REG_RD(bp,
4102 reg_offset + 0xc + 0x10*index);
4103 }
4104
a2fbb9ea
ET
4105 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4106 HC_REG_ATTN_MSG0_ADDR_L);
4107
4108 REG_WR(bp, reg_offset, U64_LO(section));
4109 REG_WR(bp, reg_offset + 4, U64_HI(section));
4110
4111 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4112
4113 val = REG_RD(bp, reg_offset);
34f80b04 4114 val |= sb_id;
a2fbb9ea
ET
4115 REG_WR(bp, reg_offset, val);
4116
4117 /* USTORM */
4118 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4119 u_def_status_block);
34f80b04 4120 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4121
4122 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4123 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4124 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4125 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4126 U64_HI(section));
5c862848 4127 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4128 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4129
4130 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4131 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4132 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4133
4134 /* CSTORM */
4135 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4136 c_def_status_block);
34f80b04 4137 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4138
4139 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4140 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4141 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4142 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4143 U64_HI(section));
5c862848 4144 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4145 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4146
4147 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4148 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4149 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4150
4151 /* TSTORM */
4152 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4153 t_def_status_block);
34f80b04 4154 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4155
4156 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4157 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4158 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4159 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4160 U64_HI(section));
5c862848 4161 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4162 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4163
4164 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4165 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4166 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4167
4168 /* XSTORM */
4169 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4170 x_def_status_block);
34f80b04 4171 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4172
4173 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4174 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4175 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4176 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4177 U64_HI(section));
5c862848 4178 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4179 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4180
4181 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4182 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4183 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4184
bb2a0f7a 4185 bp->stats_pending = 0;
66e855f3 4186 bp->set_mac_pending = 0;
bb2a0f7a 4187
34f80b04 4188 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4189}
4190
4191static void bnx2x_update_coalesce(struct bnx2x *bp)
4192{
34f80b04 4193 int port = BP_PORT(bp);
a2fbb9ea
ET
4194 int i;
4195
4196 for_each_queue(bp, i) {
34f80b04 4197 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4198
4199 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4200 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4201 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4202 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4203 bp->rx_ticks/12);
a2fbb9ea 4204 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4205 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4206 U_SB_ETH_RX_CQ_INDEX),
4207 bp->rx_ticks ? 0 : 1);
4208 REG_WR16(bp, BAR_USTRORM_INTMEM +
4209 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4210 U_SB_ETH_RX_BD_INDEX),
34f80b04 4211 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4212
4213 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4214 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4215 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4216 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4217 bp->tx_ticks/12);
a2fbb9ea 4218 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4219 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4220 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4221 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4222 }
4223}
4224
7a9b2557
VZ
4225static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4226 struct bnx2x_fastpath *fp, int last)
4227{
4228 int i;
4229
4230 for (i = 0; i < last; i++) {
4231 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4232 struct sk_buff *skb = rx_buf->skb;
4233
4234 if (skb == NULL) {
4235 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4236 continue;
4237 }
4238
4239 if (fp->tpa_state[i] == BNX2X_TPA_START)
4240 pci_unmap_single(bp->pdev,
4241 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4242 bp->rx_buf_size,
7a9b2557
VZ
4243 PCI_DMA_FROMDEVICE);
4244
4245 dev_kfree_skb(skb);
4246 rx_buf->skb = NULL;
4247 }
4248}
4249
a2fbb9ea
ET
4250static void bnx2x_init_rx_rings(struct bnx2x *bp)
4251{
7a9b2557 4252 int func = BP_FUNC(bp);
32626230
EG
4253 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4254 ETH_MAX_AGGREGATION_QUEUES_E1H;
4255 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4256 int i, j;
a2fbb9ea 4257
437cf2f1
EG
4258 bp->rx_buf_size = bp->dev->mtu;
4259 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4260 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4261
7a9b2557
VZ
4262 if (bp->flags & TPA_ENABLE_FLAG) {
4263 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4264 "rx_buf_size %d effective_mtu %d\n",
4265 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557
VZ
4266
4267 for_each_queue(bp, j) {
32626230 4268 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4269
32626230 4270 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4271 fp->tpa_pool[i].skb =
4272 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4273 if (!fp->tpa_pool[i].skb) {
4274 BNX2X_ERR("Failed to allocate TPA "
4275 "skb pool for queue[%d] - "
4276 "disabling TPA on this "
4277 "queue!\n", j);
4278 bnx2x_free_tpa_pool(bp, fp, i);
4279 fp->disable_tpa = 1;
4280 break;
4281 }
4282 pci_unmap_addr_set((struct sw_rx_bd *)
4283 &bp->fp->tpa_pool[i],
4284 mapping, 0);
4285 fp->tpa_state[i] = BNX2X_TPA_STOP;
4286 }
4287 }
4288 }
4289
a2fbb9ea
ET
4290 for_each_queue(bp, j) {
4291 struct bnx2x_fastpath *fp = &bp->fp[j];
4292
4293 fp->rx_bd_cons = 0;
4294 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4295 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4296
4297 /* "next page" elements initialization */
4298 /* SGE ring */
4299 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4300 struct eth_rx_sge *sge;
4301
4302 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4303 sge->addr_hi =
4304 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4305 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4306 sge->addr_lo =
4307 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4308 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4309 }
4310
4311 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4312
7a9b2557 4313 /* RX BD ring */
a2fbb9ea
ET
4314 for (i = 1; i <= NUM_RX_RINGS; i++) {
4315 struct eth_rx_bd *rx_bd;
4316
4317 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4318 rx_bd->addr_hi =
4319 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4320 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4321 rx_bd->addr_lo =
4322 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4323 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4324 }
4325
34f80b04 4326 /* CQ ring */
a2fbb9ea
ET
4327 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4328 struct eth_rx_cqe_next_page *nextpg;
4329
4330 nextpg = (struct eth_rx_cqe_next_page *)
4331 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4332 nextpg->addr_hi =
4333 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4334 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4335 nextpg->addr_lo =
4336 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4337 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4338 }
4339
7a9b2557
VZ
4340 /* Allocate SGEs and initialize the ring elements */
4341 for (i = 0, ring_prod = 0;
4342 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4343
7a9b2557
VZ
4344 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4345 BNX2X_ERR("was only able to allocate "
4346 "%d rx sges\n", i);
4347 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4348 /* Cleanup already allocated elements */
4349 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4350 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4351 fp->disable_tpa = 1;
4352 ring_prod = 0;
4353 break;
4354 }
4355 ring_prod = NEXT_SGE_IDX(ring_prod);
4356 }
4357 fp->rx_sge_prod = ring_prod;
4358
4359 /* Allocate BDs and initialize BD ring */
66e855f3 4360 fp->rx_comp_cons = 0;
7a9b2557 4361 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4362 for (i = 0; i < bp->rx_ring_size; i++) {
4363 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4364 BNX2X_ERR("was only able to allocate "
4365 "%d rx skbs\n", i);
66e855f3 4366 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4367 break;
4368 }
4369 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4370 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4371 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4372 }
4373
7a9b2557
VZ
4374 fp->rx_bd_prod = ring_prod;
4375 /* must not have more available CQEs than BDs */
4376 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4377 cqe_ring_prod);
a2fbb9ea
ET
4378 fp->rx_pkt = fp->rx_calls = 0;
4379
7a9b2557
VZ
4380 /* Warning!
4381 * this will generate an interrupt (to the TSTORM)
4382 * must only be done after chip is initialized
4383 */
4384 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4385 fp->rx_sge_prod);
a2fbb9ea
ET
4386 if (j != 0)
4387 continue;
4388
4389 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4390 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4391 U64_LO(fp->rx_comp_mapping));
4392 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4393 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4394 U64_HI(fp->rx_comp_mapping));
4395 }
4396}
4397
4398static void bnx2x_init_tx_ring(struct bnx2x *bp)
4399{
4400 int i, j;
4401
4402 for_each_queue(bp, j) {
4403 struct bnx2x_fastpath *fp = &bp->fp[j];
4404
4405 for (i = 1; i <= NUM_TX_RINGS; i++) {
4406 struct eth_tx_bd *tx_bd =
4407 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4408
4409 tx_bd->addr_hi =
4410 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4411 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4412 tx_bd->addr_lo =
4413 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4414 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4415 }
4416
4417 fp->tx_pkt_prod = 0;
4418 fp->tx_pkt_cons = 0;
4419 fp->tx_bd_prod = 0;
4420 fp->tx_bd_cons = 0;
4421 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4422 fp->tx_pkt = 0;
4423 }
4424}
4425
4426static void bnx2x_init_sp_ring(struct bnx2x *bp)
4427{
34f80b04 4428 int func = BP_FUNC(bp);
a2fbb9ea
ET
4429
4430 spin_lock_init(&bp->spq_lock);
4431
4432 bp->spq_left = MAX_SPQ_PENDING;
4433 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4434 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4435 bp->spq_prod_bd = bp->spq;
4436 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4437
34f80b04 4438 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4439 U64_LO(bp->spq_mapping));
34f80b04
EG
4440 REG_WR(bp,
4441 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4442 U64_HI(bp->spq_mapping));
4443
34f80b04 4444 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4445 bp->spq_prod_idx);
4446}
4447
4448static void bnx2x_init_context(struct bnx2x *bp)
4449{
4450 int i;
4451
4452 for_each_queue(bp, i) {
4453 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4454 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4455 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4456
4457 context->xstorm_st_context.tx_bd_page_base_hi =
4458 U64_HI(fp->tx_desc_mapping);
4459 context->xstorm_st_context.tx_bd_page_base_lo =
4460 U64_LO(fp->tx_desc_mapping);
4461 context->xstorm_st_context.db_data_addr_hi =
4462 U64_HI(fp->tx_prods_mapping);
4463 context->xstorm_st_context.db_data_addr_lo =
4464 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4465 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4466 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4467
4468 context->ustorm_st_context.common.sb_index_numbers =
4469 BNX2X_RX_SB_INDEX_NUM;
4470 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4471 context->ustorm_st_context.common.status_block_id = sb_id;
4472 context->ustorm_st_context.common.flags =
4473 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
437cf2f1
EG
4474 context->ustorm_st_context.common.mc_alignment_size =
4475 BCM_RX_ETH_PAYLOAD_ALIGN;
34f80b04 4476 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4477 bp->rx_buf_size;
34f80b04 4478 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4479 U64_HI(fp->rx_desc_mapping);
34f80b04 4480 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4481 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4482 if (!fp->disable_tpa) {
4483 context->ustorm_st_context.common.flags |=
4484 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4485 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4486 context->ustorm_st_context.common.sge_buff_size =
4487 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4488 context->ustorm_st_context.common.sge_page_base_hi =
4489 U64_HI(fp->rx_sge_mapping);
4490 context->ustorm_st_context.common.sge_page_base_lo =
4491 U64_LO(fp->rx_sge_mapping);
4492 }
4493
a2fbb9ea 4494 context->cstorm_st_context.sb_index_number =
5c862848 4495 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4496 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4497
4498 context->xstorm_ag_context.cdu_reserved =
4499 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4500 CDU_REGION_NUMBER_XCM_AG,
4501 ETH_CONNECTION_TYPE);
4502 context->ustorm_ag_context.cdu_usage =
4503 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4504 CDU_REGION_NUMBER_UCM_AG,
4505 ETH_CONNECTION_TYPE);
4506 }
4507}
4508
4509static void bnx2x_init_ind_table(struct bnx2x *bp)
4510{
34f80b04 4511 int port = BP_PORT(bp);
a2fbb9ea
ET
4512 int i;
4513
4514 if (!is_multi(bp))
4515 return;
4516
34f80b04 4517 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4518 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4519 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4520 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4521 i % bp->num_queues);
4522
4523 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4524}
4525
49d66772
ET
4526static void bnx2x_set_client_config(struct bnx2x *bp)
4527{
49d66772 4528 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4529 int port = BP_PORT(bp);
4530 int i;
49d66772 4531
34f80b04 4532 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4533 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4534 tstorm_client.config_flags =
4535 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4536#ifdef BCM_VLAN
34f80b04 4537 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4538 tstorm_client.config_flags |=
4539 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4540 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4541 }
4542#endif
49d66772 4543
7a9b2557
VZ
4544 if (bp->flags & TPA_ENABLE_FLAG) {
4545 tstorm_client.max_sges_for_packet =
4f40f2cb 4546 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4547 tstorm_client.max_sges_for_packet =
4548 ((tstorm_client.max_sges_for_packet +
4549 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4550 PAGES_PER_SGE_SHIFT;
4551
4552 tstorm_client.config_flags |=
4553 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4554 }
4555
49d66772
ET
4556 for_each_queue(bp, i) {
4557 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4558 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4559 ((u32 *)&tstorm_client)[0]);
4560 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4561 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4562 ((u32 *)&tstorm_client)[1]);
4563 }
4564
34f80b04
EG
4565 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4566 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4567}
4568
a2fbb9ea
ET
4569static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4570{
a2fbb9ea 4571 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4572 int mode = bp->rx_mode;
4573 int mask = (1 << BP_L_ID(bp));
4574 int func = BP_FUNC(bp);
a2fbb9ea
ET
4575 int i;
4576
3196a88a 4577 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4578
4579 switch (mode) {
4580 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4581 tstorm_mac_filter.ucast_drop_all = mask;
4582 tstorm_mac_filter.mcast_drop_all = mask;
4583 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4584 break;
4585 case BNX2X_RX_MODE_NORMAL:
34f80b04 4586 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4587 break;
4588 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4589 tstorm_mac_filter.mcast_accept_all = mask;
4590 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4591 break;
4592 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4593 tstorm_mac_filter.ucast_accept_all = mask;
4594 tstorm_mac_filter.mcast_accept_all = mask;
4595 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4596 break;
4597 default:
34f80b04
EG
4598 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4599 break;
a2fbb9ea
ET
4600 }
4601
4602 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4603 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4604 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4605 ((u32 *)&tstorm_mac_filter)[i]);
4606
34f80b04 4607/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4608 ((u32 *)&tstorm_mac_filter)[i]); */
4609 }
a2fbb9ea 4610
49d66772
ET
4611 if (mode != BNX2X_RX_MODE_NONE)
4612 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4613}
4614
471de716
EG
4615static void bnx2x_init_internal_common(struct bnx2x *bp)
4616{
4617 int i;
4618
3cdf1db7
YG
4619 if (bp->flags & TPA_ENABLE_FLAG) {
4620 struct tstorm_eth_tpa_exist tpa = {0};
4621
4622 tpa.tpa_exist = 1;
4623
4624 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4625 ((u32 *)&tpa)[0]);
4626 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4627 ((u32 *)&tpa)[1]);
4628 }
4629
471de716
EG
4630 /* Zero this manually as its initialization is
4631 currently missing in the initTool */
4632 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4633 REG_WR(bp, BAR_USTRORM_INTMEM +
4634 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4635}
4636
4637static void bnx2x_init_internal_port(struct bnx2x *bp)
4638{
4639 int port = BP_PORT(bp);
4640
4641 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4642 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4643 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4644 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4645}
4646
4647static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4648{
a2fbb9ea
ET
4649 struct tstorm_eth_function_common_config tstorm_config = {0};
4650 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4651 int port = BP_PORT(bp);
4652 int func = BP_FUNC(bp);
4653 int i;
471de716 4654 u16 max_agg_size;
a2fbb9ea
ET
4655
4656 if (is_multi(bp)) {
4657 tstorm_config.config_flags = MULTI_FLAGS;
4658 tstorm_config.rss_result_mask = MULTI_MASK;
4659 }
4660
34f80b04
EG
4661 tstorm_config.leading_client_id = BP_L_ID(bp);
4662
a2fbb9ea 4663 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4664 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4665 (*(u32 *)&tstorm_config));
4666
c14423fe 4667 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4668 bnx2x_set_storm_rx_mode(bp);
4669
66e855f3
YG
4670 /* reset xstorm per client statistics */
4671 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4672 REG_WR(bp, BAR_XSTRORM_INTMEM +
4673 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4674 i*4, 0);
4675 }
4676 /* reset tstorm per client statistics */
4677 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4678 REG_WR(bp, BAR_TSTRORM_INTMEM +
4679 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4680 i*4, 0);
4681 }
4682
4683 /* Init statistics related context */
34f80b04 4684 stats_flags.collect_eth = 1;
a2fbb9ea 4685
66e855f3 4686 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4687 ((u32 *)&stats_flags)[0]);
66e855f3 4688 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4689 ((u32 *)&stats_flags)[1]);
4690
66e855f3 4691 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4692 ((u32 *)&stats_flags)[0]);
66e855f3 4693 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4694 ((u32 *)&stats_flags)[1]);
4695
66e855f3 4696 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4697 ((u32 *)&stats_flags)[0]);
66e855f3 4698 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4699 ((u32 *)&stats_flags)[1]);
4700
66e855f3
YG
4701 REG_WR(bp, BAR_XSTRORM_INTMEM +
4702 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4703 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4704 REG_WR(bp, BAR_XSTRORM_INTMEM +
4705 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4706 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4707
4708 REG_WR(bp, BAR_TSTRORM_INTMEM +
4709 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4710 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4711 REG_WR(bp, BAR_TSTRORM_INTMEM +
4712 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4713 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4714
4715 if (CHIP_IS_E1H(bp)) {
4716 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4717 IS_E1HMF(bp));
4718 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4719 IS_E1HMF(bp));
4720 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4721 IS_E1HMF(bp));
4722 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4723 IS_E1HMF(bp));
4724
7a9b2557
VZ
4725 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4726 bp->e1hov);
34f80b04
EG
4727 }
4728
4f40f2cb
EG
4729 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4730 max_agg_size =
4731 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4732 SGE_PAGE_SIZE * PAGES_PER_SGE),
4733 (u32)0xffff);
7a9b2557
VZ
4734 for_each_queue(bp, i) {
4735 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4736
4737 REG_WR(bp, BAR_USTRORM_INTMEM +
4738 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4739 U64_LO(fp->rx_comp_mapping));
4740 REG_WR(bp, BAR_USTRORM_INTMEM +
4741 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4742 U64_HI(fp->rx_comp_mapping));
4743
7a9b2557
VZ
4744 REG_WR16(bp, BAR_USTRORM_INTMEM +
4745 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4746 max_agg_size);
4747 }
a2fbb9ea
ET
4748}
4749
471de716
EG
4750static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4751{
4752 switch (load_code) {
4753 case FW_MSG_CODE_DRV_LOAD_COMMON:
4754 bnx2x_init_internal_common(bp);
4755 /* no break */
4756
4757 case FW_MSG_CODE_DRV_LOAD_PORT:
4758 bnx2x_init_internal_port(bp);
4759 /* no break */
4760
4761 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4762 bnx2x_init_internal_func(bp);
4763 break;
4764
4765 default:
4766 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4767 break;
4768 }
4769}
4770
4771static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4772{
4773 int i;
4774
4775 for_each_queue(bp, i) {
4776 struct bnx2x_fastpath *fp = &bp->fp[i];
4777
34f80b04 4778 fp->bp = bp;
a2fbb9ea 4779 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4780 fp->index = i;
34f80b04
EG
4781 fp->cl_id = BP_L_ID(bp) + i;
4782 fp->sb_id = fp->cl_id;
4783 DP(NETIF_MSG_IFUP,
4784 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4785 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4786 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4787 FP_SB_ID(fp));
4788 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4789 }
4790
5c862848
EG
4791 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4792 DEF_SB_ID);
4793 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4794 bnx2x_update_coalesce(bp);
4795 bnx2x_init_rx_rings(bp);
4796 bnx2x_init_tx_ring(bp);
4797 bnx2x_init_sp_ring(bp);
4798 bnx2x_init_context(bp);
471de716 4799 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4800 bnx2x_init_ind_table(bp);
615f8fd9 4801 bnx2x_int_enable(bp);
a2fbb9ea
ET
4802}
4803
4804/* end of nic init */
4805
4806/*
4807 * gzip service functions
4808 */
4809
4810static int bnx2x_gunzip_init(struct bnx2x *bp)
4811{
4812 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4813 &bp->gunzip_mapping);
4814 if (bp->gunzip_buf == NULL)
4815 goto gunzip_nomem1;
4816
4817 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4818 if (bp->strm == NULL)
4819 goto gunzip_nomem2;
4820
4821 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4822 GFP_KERNEL);
4823 if (bp->strm->workspace == NULL)
4824 goto gunzip_nomem3;
4825
4826 return 0;
4827
4828gunzip_nomem3:
4829 kfree(bp->strm);
4830 bp->strm = NULL;
4831
4832gunzip_nomem2:
4833 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4834 bp->gunzip_mapping);
4835 bp->gunzip_buf = NULL;
4836
4837gunzip_nomem1:
4838 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4839 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4840 return -ENOMEM;
4841}
4842
4843static void bnx2x_gunzip_end(struct bnx2x *bp)
4844{
4845 kfree(bp->strm->workspace);
4846
4847 kfree(bp->strm);
4848 bp->strm = NULL;
4849
4850 if (bp->gunzip_buf) {
4851 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4852 bp->gunzip_mapping);
4853 bp->gunzip_buf = NULL;
4854 }
4855}
4856
4857static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4858{
4859 int n, rc;
4860
4861 /* check gzip header */
4862 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4863 return -EINVAL;
4864
4865 n = 10;
4866
34f80b04 4867#define FNAME 0x8
a2fbb9ea
ET
4868
4869 if (zbuf[3] & FNAME)
4870 while ((zbuf[n++] != 0) && (n < len));
4871
4872 bp->strm->next_in = zbuf + n;
4873 bp->strm->avail_in = len - n;
4874 bp->strm->next_out = bp->gunzip_buf;
4875 bp->strm->avail_out = FW_BUF_SIZE;
4876
4877 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4878 if (rc != Z_OK)
4879 return rc;
4880
4881 rc = zlib_inflate(bp->strm, Z_FINISH);
4882 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4883 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4884 bp->dev->name, bp->strm->msg);
4885
4886 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4887 if (bp->gunzip_outlen & 0x3)
4888 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4889 " gunzip_outlen (%d) not aligned\n",
4890 bp->dev->name, bp->gunzip_outlen);
4891 bp->gunzip_outlen >>= 2;
4892
4893 zlib_inflateEnd(bp->strm);
4894
4895 if (rc == Z_STREAM_END)
4896 return 0;
4897
4898 return rc;
4899}
4900
4901/* nic load/unload */
4902
4903/*
34f80b04 4904 * General service functions
a2fbb9ea
ET
4905 */
4906
4907/* send a NIG loopback debug packet */
4908static void bnx2x_lb_pckt(struct bnx2x *bp)
4909{
a2fbb9ea 4910 u32 wb_write[3];
a2fbb9ea
ET
4911
4912 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4913 wb_write[0] = 0x55555555;
4914 wb_write[1] = 0x55555555;
34f80b04 4915 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4916 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4917
4918 /* NON-IP protocol */
a2fbb9ea
ET
4919 wb_write[0] = 0x09000000;
4920 wb_write[1] = 0x55555555;
34f80b04 4921 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4922 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4923}
4924
4925/* some of the internal memories
4926 * are not directly readable from the driver
4927 * to test them we send debug packets
4928 */
4929static int bnx2x_int_mem_test(struct bnx2x *bp)
4930{
4931 int factor;
4932 int count, i;
4933 u32 val = 0;
4934
ad8d3948 4935 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4936 factor = 120;
ad8d3948
EG
4937 else if (CHIP_REV_IS_EMUL(bp))
4938 factor = 200;
4939 else
a2fbb9ea 4940 factor = 1;
a2fbb9ea
ET
4941
4942 DP(NETIF_MSG_HW, "start part1\n");
4943
4944 /* Disable inputs of parser neighbor blocks */
4945 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4946 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4947 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4948 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4949
4950 /* Write 0 to parser credits for CFC search request */
4951 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4952
4953 /* send Ethernet packet */
4954 bnx2x_lb_pckt(bp);
4955
4956 /* TODO do i reset NIG statistic? */
4957 /* Wait until NIG register shows 1 packet of size 0x10 */
4958 count = 1000 * factor;
4959 while (count) {
34f80b04 4960
a2fbb9ea
ET
4961 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4962 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4963 if (val == 0x10)
4964 break;
4965
4966 msleep(10);
4967 count--;
4968 }
4969 if (val != 0x10) {
4970 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4971 return -1;
4972 }
4973
4974 /* Wait until PRS register shows 1 packet */
4975 count = 1000 * factor;
4976 while (count) {
4977 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4978 if (val == 1)
4979 break;
4980
4981 msleep(10);
4982 count--;
4983 }
4984 if (val != 0x1) {
4985 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4986 return -2;
4987 }
4988
4989 /* Reset and init BRB, PRS */
34f80b04 4990 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4991 msleep(50);
34f80b04 4992 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4993 msleep(50);
4994 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4995 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4996
4997 DP(NETIF_MSG_HW, "part2\n");
4998
4999 /* Disable inputs of parser neighbor blocks */
5000 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5001 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5002 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5003 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5004
5005 /* Write 0 to parser credits for CFC search request */
5006 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5007
5008 /* send 10 Ethernet packets */
5009 for (i = 0; i < 10; i++)
5010 bnx2x_lb_pckt(bp);
5011
5012 /* Wait until NIG register shows 10 + 1
5013 packets of size 11*0x10 = 0xb0 */
5014 count = 1000 * factor;
5015 while (count) {
34f80b04 5016
a2fbb9ea
ET
5017 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5018 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5019 if (val == 0xb0)
5020 break;
5021
5022 msleep(10);
5023 count--;
5024 }
5025 if (val != 0xb0) {
5026 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5027 return -3;
5028 }
5029
5030 /* Wait until PRS register shows 2 packets */
5031 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5032 if (val != 2)
5033 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5034
5035 /* Write 1 to parser credits for CFC search request */
5036 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5037
5038 /* Wait until PRS register shows 3 packets */
5039 msleep(10 * factor);
5040 /* Wait until NIG register shows 1 packet of size 0x10 */
5041 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5042 if (val != 3)
5043 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5044
5045 /* clear NIG EOP FIFO */
5046 for (i = 0; i < 11; i++)
5047 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5048 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5049 if (val != 1) {
5050 BNX2X_ERR("clear of NIG failed\n");
5051 return -4;
5052 }
5053
5054 /* Reset and init BRB, PRS, NIG */
5055 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5056 msleep(50);
5057 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5058 msleep(50);
5059 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5060 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5061#ifndef BCM_ISCSI
5062 /* set NIC mode */
5063 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5064#endif
5065
5066 /* Enable inputs of parser neighbor blocks */
5067 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5068 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5069 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5070 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5071
5072 DP(NETIF_MSG_HW, "done\n");
5073
5074 return 0; /* OK */
5075}
5076
5077static void enable_blocks_attention(struct bnx2x *bp)
5078{
5079 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5080 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5081 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5082 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5083 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5084 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5085 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5086 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5087 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5088/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5089/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5090 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5091 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5092 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5093/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5094/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5095 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5096 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5097 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5098 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5099/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5100/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5101 if (CHIP_REV_IS_FPGA(bp))
5102 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5103 else
5104 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5105 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5106 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5107 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5108/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5109/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5110 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5111 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5112/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5113 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5114}
5115
34f80b04
EG
5116
5117static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5118{
a2fbb9ea 5119 u32 val, i;
a2fbb9ea 5120
34f80b04 5121 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5122
34f80b04
EG
5123 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5124 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5125
34f80b04
EG
5126 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5127 if (CHIP_IS_E1H(bp))
5128 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5129
34f80b04
EG
5130 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5131 msleep(30);
5132 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5133
34f80b04
EG
5134 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5135 if (CHIP_IS_E1(bp)) {
5136 /* enable HW interrupt from PXP on USDM overflow
5137 bit 16 on INT_MASK_0 */
5138 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5139 }
a2fbb9ea 5140
34f80b04
EG
5141 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5142 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5143
5144#ifdef __BIG_ENDIAN
34f80b04
EG
5145 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5146 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5147 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5148 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5149 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5150 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5151
5152/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5153 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5154 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5155 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5156 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5157#endif
5158
34f80b04 5159 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5160#ifdef BCM_ISCSI
34f80b04
EG
5161 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5162 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5163 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5164#endif
5165
34f80b04
EG
5166 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5167 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5168
34f80b04
EG
5169 /* let the HW do it's magic ... */
5170 msleep(100);
5171 /* finish PXP init */
5172 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5173 if (val != 1) {
5174 BNX2X_ERR("PXP2 CFG failed\n");
5175 return -EBUSY;
5176 }
5177 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5178 if (val != 1) {
5179 BNX2X_ERR("PXP2 RD_INIT failed\n");
5180 return -EBUSY;
5181 }
a2fbb9ea 5182
34f80b04
EG
5183 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5184 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5185
34f80b04 5186 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5187
34f80b04
EG
5188 /* clean the DMAE memory */
5189 bp->dmae_ready = 1;
5190 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5191
34f80b04
EG
5192 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5193 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5194 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5195 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5196
34f80b04
EG
5197 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5198 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5199 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5200 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5201
5202 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5203 /* soft reset pulse */
5204 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5205 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5206
5207#ifdef BCM_ISCSI
34f80b04 5208 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5209#endif
a2fbb9ea 5210
34f80b04
EG
5211 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5212 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5213 if (!CHIP_REV_IS_SLOW(bp)) {
5214 /* enable hw interrupt from doorbell Q */
5215 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5216 }
a2fbb9ea 5217
34f80b04
EG
5218 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5219 if (CHIP_REV_IS_SLOW(bp)) {
5220 /* fix for emulation and FPGA for no pause */
5221 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5222 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5223 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5224 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5225 }
a2fbb9ea 5226
34f80b04 5227 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3196a88a
EG
5228 /* set NIC mode */
5229 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5230 if (CHIP_IS_E1H(bp))
5231 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5232
34f80b04
EG
5233 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5234 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5235 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5236 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5237
34f80b04
EG
5238 if (CHIP_IS_E1H(bp)) {
5239 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5240 STORM_INTMEM_SIZE_E1H/2);
5241 bnx2x_init_fill(bp,
5242 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5243 0, STORM_INTMEM_SIZE_E1H/2);
5244 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1H/2);
5246 bnx2x_init_fill(bp,
5247 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5248 0, STORM_INTMEM_SIZE_E1H/2);
5249 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5250 STORM_INTMEM_SIZE_E1H/2);
5251 bnx2x_init_fill(bp,
5252 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5253 0, STORM_INTMEM_SIZE_E1H/2);
5254 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5255 STORM_INTMEM_SIZE_E1H/2);
5256 bnx2x_init_fill(bp,
5257 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5258 0, STORM_INTMEM_SIZE_E1H/2);
5259 } else { /* E1 */
ad8d3948
EG
5260 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5261 STORM_INTMEM_SIZE_E1);
5262 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5263 STORM_INTMEM_SIZE_E1);
5264 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5265 STORM_INTMEM_SIZE_E1);
5266 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5267 STORM_INTMEM_SIZE_E1);
34f80b04 5268 }
a2fbb9ea 5269
34f80b04
EG
5270 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5271 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5272 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5273 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5274
34f80b04
EG
5275 /* sync semi rtc */
5276 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5277 0x80000000);
5278 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5279 0x80000000);
a2fbb9ea 5280
34f80b04
EG
5281 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5282 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5283 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5284
34f80b04
EG
5285 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5286 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5287 REG_WR(bp, i, 0xc0cac01a);
5288 /* TODO: replace with something meaningful */
5289 }
5290 if (CHIP_IS_E1H(bp))
5291 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5292 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5293
34f80b04
EG
5294 if (sizeof(union cdu_context) != 1024)
5295 /* we currently assume that a context is 1024 bytes */
5296 printk(KERN_ALERT PFX "please adjust the size of"
5297 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5298
34f80b04
EG
5299 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5300 val = (4 << 24) + (0 << 12) + 1024;
5301 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5302 if (CHIP_IS_E1(bp)) {
5303 /* !!! fix pxp client crdit until excel update */
5304 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5305 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5306 }
a2fbb9ea 5307
34f80b04
EG
5308 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5309 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5310
34f80b04
EG
5311 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5312 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5313
34f80b04
EG
5314 /* PXPCS COMMON comes here */
5315 /* Reset PCIE errors for debug */
5316 REG_WR(bp, 0x2814, 0xffffffff);
5317 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5318
34f80b04
EG
5319 /* EMAC0 COMMON comes here */
5320 /* EMAC1 COMMON comes here */
5321 /* DBU COMMON comes here */
5322 /* DBG COMMON comes here */
5323
5324 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5325 if (CHIP_IS_E1H(bp)) {
5326 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5327 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5328 }
5329
5330 if (CHIP_REV_IS_SLOW(bp))
5331 msleep(200);
5332
5333 /* finish CFC init */
5334 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5335 if (val != 1) {
5336 BNX2X_ERR("CFC LL_INIT failed\n");
5337 return -EBUSY;
5338 }
5339 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5340 if (val != 1) {
5341 BNX2X_ERR("CFC AC_INIT failed\n");
5342 return -EBUSY;
5343 }
5344 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5345 if (val != 1) {
5346 BNX2X_ERR("CFC CAM_INIT failed\n");
5347 return -EBUSY;
5348 }
5349 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5350
34f80b04
EG
5351 /* read NIG statistic
5352 to see if this is our first up since powerup */
5353 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5354 val = *bnx2x_sp(bp, wb_data[0]);
5355
5356 /* do internal memory self test */
5357 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5358 BNX2X_ERR("internal mem self test failed\n");
5359 return -EBUSY;
5360 }
5361
5362 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5363 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5364 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5365 /* Fan failure is indicated by SPIO 5 */
5366 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5367 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5368
5369 /* set to active low mode */
5370 val = REG_RD(bp, MISC_REG_SPIO_INT);
5371 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5372 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5373 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5374
34f80b04
EG
5375 /* enable interrupt to signal the IGU */
5376 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5377 val |= (1 << MISC_REGISTERS_SPIO_5);
5378 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5379 break;
f1410647 5380
34f80b04
EG
5381 default:
5382 break;
5383 }
f1410647 5384
34f80b04
EG
5385 /* clear PXP2 attentions */
5386 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5387
34f80b04 5388 enable_blocks_attention(bp);
a2fbb9ea 5389
6bbca910
YR
5390 if (!BP_NOMCP(bp)) {
5391 bnx2x_acquire_phy_lock(bp);
5392 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5393 bnx2x_release_phy_lock(bp);
5394 } else
5395 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5396
34f80b04
EG
5397 return 0;
5398}
a2fbb9ea 5399
34f80b04
EG
5400static int bnx2x_init_port(struct bnx2x *bp)
5401{
5402 int port = BP_PORT(bp);
5403 u32 val;
a2fbb9ea 5404
34f80b04
EG
5405 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5406
5407 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5408
5409 /* Port PXP comes here */
5410 /* Port PXP2 comes here */
a2fbb9ea
ET
5411#ifdef BCM_ISCSI
5412 /* Port0 1
5413 * Port1 385 */
5414 i++;
5415 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5416 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5417 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5418 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5419
5420 /* Port0 2
5421 * Port1 386 */
5422 i++;
5423 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5424 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5425 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5426 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5427
5428 /* Port0 3
5429 * Port1 387 */
5430 i++;
5431 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5432 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5433 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5434 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5435#endif
34f80b04 5436 /* Port CMs come here */
a2fbb9ea
ET
5437
5438 /* Port QM comes here */
a2fbb9ea
ET
5439#ifdef BCM_ISCSI
5440 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5441 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5442
5443 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5444 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5445#endif
5446 /* Port DQ comes here */
5447 /* Port BRB1 comes here */
ad8d3948 5448 /* Port PRS comes here */
a2fbb9ea
ET
5449 /* Port TSDM comes here */
5450 /* Port CSDM comes here */
5451 /* Port USDM comes here */
5452 /* Port XSDM comes here */
34f80b04
EG
5453 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5454 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5455 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5456 port ? USEM_PORT1_END : USEM_PORT0_END);
5457 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5458 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5459 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5460 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5461 /* Port UPB comes here */
34f80b04
EG
5462 /* Port XPB comes here */
5463
5464 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5465 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5466
5467 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5468 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5469
5470 /* update threshold */
34f80b04 5471 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5472 /* update init credit */
34f80b04 5473 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5474
5475 /* probe changes */
34f80b04 5476 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5477 msleep(5);
34f80b04 5478 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5479
5480#ifdef BCM_ISCSI
5481 /* tell the searcher where the T2 table is */
5482 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5483
5484 wb_write[0] = U64_LO(bp->t2_mapping);
5485 wb_write[1] = U64_HI(bp->t2_mapping);
5486 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5487 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5488 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5489 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5490
5491 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5492 /* Port SRCH comes here */
5493#endif
5494 /* Port CDU comes here */
5495 /* Port CFC comes here */
34f80b04
EG
5496
5497 if (CHIP_IS_E1(bp)) {
5498 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5499 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5500 }
5501 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5502 port ? HC_PORT1_END : HC_PORT0_END);
5503
5504 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5505 MISC_AEU_PORT0_START,
34f80b04
EG
5506 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5507 /* init aeu_mask_attn_func_0/1:
5508 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5509 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5510 * bits 4-7 are used for "per vn group attention" */
5511 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5512 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5513
a2fbb9ea
ET
5514 /* Port PXPCS comes here */
5515 /* Port EMAC0 comes here */
5516 /* Port EMAC1 comes here */
5517 /* Port DBU comes here */
5518 /* Port DBG comes here */
34f80b04
EG
5519 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5520 port ? NIG_PORT1_END : NIG_PORT0_END);
5521
5522 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5523
5524 if (CHIP_IS_E1H(bp)) {
5525 u32 wsum;
5526 struct cmng_struct_per_port m_cmng_port;
5527 int vn;
5528
5529 /* 0x2 disable e1hov, 0x1 enable */
5530 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5531 (IS_E1HMF(bp) ? 0x1 : 0x2));
5532
5533 /* Init RATE SHAPING and FAIRNESS contexts.
5534 Initialize as if there is 10G link. */
5535 wsum = bnx2x_calc_vn_wsum(bp);
5536 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5537 if (IS_E1HMF(bp))
5538 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5539 bnx2x_init_vn_minmax(bp, 2*vn + port,
5540 wsum, 10000, &m_cmng_port);
5541 }
5542
a2fbb9ea
ET
5543 /* Port MCP comes here */
5544 /* Port DMAE comes here */
5545
34f80b04 5546 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5547 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5548 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5549 /* add SPIO 5 to group 0 */
5550 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5551 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5552 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5553 break;
5554
5555 default:
5556 break;
5557 }
5558
c18487ee 5559 bnx2x__link_reset(bp);
a2fbb9ea 5560
34f80b04
EG
5561 return 0;
5562}
5563
5564#define ILT_PER_FUNC (768/2)
5565#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5566/* the phys address is shifted right 12 bits and has an added
5567 1=valid bit added to the 53rd bit
5568 then since this is a wide register(TM)
5569 we split it into two 32 bit writes
5570 */
5571#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5572#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5573#define PXP_ONE_ILT(x) (((x) << 10) | x)
5574#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5575
5576#define CNIC_ILT_LINES 0
5577
5578static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5579{
5580 int reg;
5581
5582 if (CHIP_IS_E1H(bp))
5583 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5584 else /* E1 */
5585 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5586
5587 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5588}
5589
5590static int bnx2x_init_func(struct bnx2x *bp)
5591{
5592 int port = BP_PORT(bp);
5593 int func = BP_FUNC(bp);
5594 int i;
5595
5596 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5597
5598 i = FUNC_ILT_BASE(func);
5599
5600 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5601 if (CHIP_IS_E1H(bp)) {
5602 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5603 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5604 } else /* E1 */
5605 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5606 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5607
5608
5609 if (CHIP_IS_E1H(bp)) {
5610 for (i = 0; i < 9; i++)
5611 bnx2x_init_block(bp,
5612 cm_start[func][i], cm_end[func][i]);
5613
5614 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5615 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5616 }
5617
5618 /* HC init per function */
5619 if (CHIP_IS_E1H(bp)) {
5620 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5621
5622 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5623 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5624 }
5625 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5626
5627 if (CHIP_IS_E1H(bp))
5628 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5629
c14423fe 5630 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5631 REG_WR(bp, 0x2114, 0xffffffff);
5632 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5633
34f80b04
EG
5634 return 0;
5635}
5636
5637static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5638{
5639 int i, rc = 0;
a2fbb9ea 5640
34f80b04
EG
5641 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5642 BP_FUNC(bp), load_code);
a2fbb9ea 5643
34f80b04
EG
5644 bp->dmae_ready = 0;
5645 mutex_init(&bp->dmae_mutex);
5646 bnx2x_gunzip_init(bp);
a2fbb9ea 5647
34f80b04
EG
5648 switch (load_code) {
5649 case FW_MSG_CODE_DRV_LOAD_COMMON:
5650 rc = bnx2x_init_common(bp);
5651 if (rc)
5652 goto init_hw_err;
5653 /* no break */
5654
5655 case FW_MSG_CODE_DRV_LOAD_PORT:
5656 bp->dmae_ready = 1;
5657 rc = bnx2x_init_port(bp);
5658 if (rc)
5659 goto init_hw_err;
5660 /* no break */
5661
5662 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5663 bp->dmae_ready = 1;
5664 rc = bnx2x_init_func(bp);
5665 if (rc)
5666 goto init_hw_err;
5667 break;
5668
5669 default:
5670 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5671 break;
5672 }
5673
5674 if (!BP_NOMCP(bp)) {
5675 int func = BP_FUNC(bp);
a2fbb9ea
ET
5676
5677 bp->fw_drv_pulse_wr_seq =
34f80b04 5678 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5679 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5680 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5681 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5682 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5683 } else
5684 bp->func_stx = 0;
a2fbb9ea 5685
34f80b04
EG
5686 /* this needs to be done before gunzip end */
5687 bnx2x_zero_def_sb(bp);
5688 for_each_queue(bp, i)
5689 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5690
5691init_hw_err:
5692 bnx2x_gunzip_end(bp);
5693
5694 return rc;
a2fbb9ea
ET
5695}
5696
c14423fe 5697/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5698static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5699{
34f80b04 5700 int func = BP_FUNC(bp);
f1410647
ET
5701 u32 seq = ++bp->fw_seq;
5702 u32 rc = 0;
19680c48
EG
5703 u32 cnt = 1;
5704 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5705
34f80b04 5706 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5707 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5708
19680c48
EG
5709 do {
5710 /* let the FW do it's magic ... */
5711 msleep(delay);
a2fbb9ea 5712
19680c48 5713 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5714
19680c48
EG
5715 /* Give the FW up to 2 second (200*10ms) */
5716 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5717
5718 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5719 cnt*delay, rc, seq);
a2fbb9ea
ET
5720
5721 /* is this a reply to our command? */
5722 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5723 rc &= FW_MSG_CODE_MASK;
f1410647 5724
a2fbb9ea
ET
5725 } else {
5726 /* FW BUG! */
5727 BNX2X_ERR("FW failed to respond!\n");
5728 bnx2x_fw_dump(bp);
5729 rc = 0;
5730 }
f1410647 5731
a2fbb9ea
ET
5732 return rc;
5733}
5734
5735static void bnx2x_free_mem(struct bnx2x *bp)
5736{
5737
5738#define BNX2X_PCI_FREE(x, y, size) \
5739 do { \
5740 if (x) { \
5741 pci_free_consistent(bp->pdev, size, x, y); \
5742 x = NULL; \
5743 y = 0; \
5744 } \
5745 } while (0)
5746
5747#define BNX2X_FREE(x) \
5748 do { \
5749 if (x) { \
5750 vfree(x); \
5751 x = NULL; \
5752 } \
5753 } while (0)
5754
5755 int i;
5756
5757 /* fastpath */
5758 for_each_queue(bp, i) {
5759
5760 /* Status blocks */
5761 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5762 bnx2x_fp(bp, i, status_blk_mapping),
5763 sizeof(struct host_status_block) +
5764 sizeof(struct eth_tx_db_data));
5765
5766 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5767 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5768 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5769 bnx2x_fp(bp, i, tx_desc_mapping),
5770 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5771
5772 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5773 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5774 bnx2x_fp(bp, i, rx_desc_mapping),
5775 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5776
5777 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5778 bnx2x_fp(bp, i, rx_comp_mapping),
5779 sizeof(struct eth_fast_path_rx_cqe) *
5780 NUM_RCQ_BD);
a2fbb9ea 5781
7a9b2557 5782 /* SGE ring */
32626230 5783 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5784 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5785 bnx2x_fp(bp, i, rx_sge_mapping),
5786 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5787 }
a2fbb9ea
ET
5788 /* end of fastpath */
5789
5790 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5791 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5792
5793 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5794 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5795
5796#ifdef BCM_ISCSI
5797 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5798 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5799 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5800 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5801#endif
7a9b2557 5802 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5803
5804#undef BNX2X_PCI_FREE
5805#undef BNX2X_KFREE
5806}
5807
5808static int bnx2x_alloc_mem(struct bnx2x *bp)
5809{
5810
5811#define BNX2X_PCI_ALLOC(x, y, size) \
5812 do { \
5813 x = pci_alloc_consistent(bp->pdev, size, y); \
5814 if (x == NULL) \
5815 goto alloc_mem_err; \
5816 memset(x, 0, size); \
5817 } while (0)
5818
5819#define BNX2X_ALLOC(x, size) \
5820 do { \
5821 x = vmalloc(size); \
5822 if (x == NULL) \
5823 goto alloc_mem_err; \
5824 memset(x, 0, size); \
5825 } while (0)
5826
5827 int i;
5828
5829 /* fastpath */
a2fbb9ea
ET
5830 for_each_queue(bp, i) {
5831 bnx2x_fp(bp, i, bp) = bp;
5832
5833 /* Status blocks */
5834 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5835 &bnx2x_fp(bp, i, status_blk_mapping),
5836 sizeof(struct host_status_block) +
5837 sizeof(struct eth_tx_db_data));
5838
5839 bnx2x_fp(bp, i, hw_tx_prods) =
5840 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5841
5842 bnx2x_fp(bp, i, tx_prods_mapping) =
5843 bnx2x_fp(bp, i, status_blk_mapping) +
5844 sizeof(struct host_status_block);
5845
5846 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5847 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5848 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5849 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5850 &bnx2x_fp(bp, i, tx_desc_mapping),
5851 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5852
5853 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5854 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5855 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5856 &bnx2x_fp(bp, i, rx_desc_mapping),
5857 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5858
5859 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5860 &bnx2x_fp(bp, i, rx_comp_mapping),
5861 sizeof(struct eth_fast_path_rx_cqe) *
5862 NUM_RCQ_BD);
5863
7a9b2557
VZ
5864 /* SGE ring */
5865 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5866 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5867 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5868 &bnx2x_fp(bp, i, rx_sge_mapping),
5869 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5870 }
5871 /* end of fastpath */
5872
5873 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5874 sizeof(struct host_def_status_block));
5875
5876 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5877 sizeof(struct bnx2x_slowpath));
5878
5879#ifdef BCM_ISCSI
5880 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5881
5882 /* Initialize T1 */
5883 for (i = 0; i < 64*1024; i += 64) {
5884 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5885 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5886 }
5887
5888 /* allocate searcher T2 table
5889 we allocate 1/4 of alloc num for T2
5890 (which is not entered into the ILT) */
5891 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5892
5893 /* Initialize T2 */
5894 for (i = 0; i < 16*1024; i += 64)
5895 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5896
c14423fe 5897 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5898 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5899
5900 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5901 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5902
5903 /* QM queues (128*MAX_CONN) */
5904 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5905#endif
5906
5907 /* Slow path ring */
5908 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5909
5910 return 0;
5911
5912alloc_mem_err:
5913 bnx2x_free_mem(bp);
5914 return -ENOMEM;
5915
5916#undef BNX2X_PCI_ALLOC
5917#undef BNX2X_ALLOC
5918}
5919
5920static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5921{
5922 int i;
5923
5924 for_each_queue(bp, i) {
5925 struct bnx2x_fastpath *fp = &bp->fp[i];
5926
5927 u16 bd_cons = fp->tx_bd_cons;
5928 u16 sw_prod = fp->tx_pkt_prod;
5929 u16 sw_cons = fp->tx_pkt_cons;
5930
a2fbb9ea
ET
5931 while (sw_cons != sw_prod) {
5932 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5933 sw_cons++;
5934 }
5935 }
5936}
5937
5938static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5939{
5940 int i, j;
5941
5942 for_each_queue(bp, j) {
5943 struct bnx2x_fastpath *fp = &bp->fp[j];
5944
a2fbb9ea
ET
5945 for (i = 0; i < NUM_RX_BD; i++) {
5946 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5947 struct sk_buff *skb = rx_buf->skb;
5948
5949 if (skb == NULL)
5950 continue;
5951
5952 pci_unmap_single(bp->pdev,
5953 pci_unmap_addr(rx_buf, mapping),
437cf2f1 5954 bp->rx_buf_size,
a2fbb9ea
ET
5955 PCI_DMA_FROMDEVICE);
5956
5957 rx_buf->skb = NULL;
5958 dev_kfree_skb(skb);
5959 }
7a9b2557 5960 if (!fp->disable_tpa)
32626230
EG
5961 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5962 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5963 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5964 }
5965}
5966
5967static void bnx2x_free_skbs(struct bnx2x *bp)
5968{
5969 bnx2x_free_tx_skbs(bp);
5970 bnx2x_free_rx_skbs(bp);
5971}
5972
5973static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5974{
34f80b04 5975 int i, offset = 1;
a2fbb9ea
ET
5976
5977 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5978 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5979 bp->msix_table[0].vector);
5980
5981 for_each_queue(bp, i) {
c14423fe 5982 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5983 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5984 bnx2x_fp(bp, i, state));
5985
228241eb
ET
5986 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5987 BNX2X_ERR("IRQ of fp #%d being freed while "
5988 "state != closed\n", i);
a2fbb9ea 5989
34f80b04 5990 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5991 }
a2fbb9ea
ET
5992}
5993
5994static void bnx2x_free_irq(struct bnx2x *bp)
5995{
a2fbb9ea 5996 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5997 bnx2x_free_msix_irqs(bp);
5998 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5999 bp->flags &= ~USING_MSIX_FLAG;
6000
6001 } else
6002 free_irq(bp->pdev->irq, bp->dev);
6003}
6004
6005static int bnx2x_enable_msix(struct bnx2x *bp)
6006{
34f80b04 6007 int i, rc, offset;
a2fbb9ea
ET
6008
6009 bp->msix_table[0].entry = 0;
34f80b04
EG
6010 offset = 1;
6011 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6012
34f80b04
EG
6013 for_each_queue(bp, i) {
6014 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6015
34f80b04
EG
6016 bp->msix_table[i + offset].entry = igu_vec;
6017 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6018 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6019 }
6020
34f80b04
EG
6021 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6022 bp->num_queues + offset);
6023 if (rc) {
6024 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6025 return -1;
6026 }
a2fbb9ea
ET
6027 bp->flags |= USING_MSIX_FLAG;
6028
6029 return 0;
a2fbb9ea
ET
6030}
6031
a2fbb9ea
ET
6032static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6033{
34f80b04 6034 int i, rc, offset = 1;
a2fbb9ea 6035
a2fbb9ea
ET
6036 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6037 bp->dev->name, bp->dev);
a2fbb9ea
ET
6038 if (rc) {
6039 BNX2X_ERR("request sp irq failed\n");
6040 return -EBUSY;
6041 }
6042
6043 for_each_queue(bp, i) {
34f80b04 6044 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6045 bnx2x_msix_fp_int, 0,
6046 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6047 if (rc) {
3196a88a
EG
6048 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6049 i + offset, -rc);
a2fbb9ea
ET
6050 bnx2x_free_msix_irqs(bp);
6051 return -EBUSY;
6052 }
6053
6054 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6055 }
6056
6057 return 0;
a2fbb9ea
ET
6058}
6059
6060static int bnx2x_req_irq(struct bnx2x *bp)
6061{
34f80b04 6062 int rc;
a2fbb9ea 6063
34f80b04
EG
6064 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6065 bp->dev->name, bp->dev);
a2fbb9ea
ET
6066 if (!rc)
6067 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6068
6069 return rc;
a2fbb9ea
ET
6070}
6071
65abd74d
YG
6072static void bnx2x_napi_enable(struct bnx2x *bp)
6073{
6074 int i;
6075
6076 for_each_queue(bp, i)
6077 napi_enable(&bnx2x_fp(bp, i, napi));
6078}
6079
6080static void bnx2x_napi_disable(struct bnx2x *bp)
6081{
6082 int i;
6083
6084 for_each_queue(bp, i)
6085 napi_disable(&bnx2x_fp(bp, i, napi));
6086}
6087
6088static void bnx2x_netif_start(struct bnx2x *bp)
6089{
6090 if (atomic_dec_and_test(&bp->intr_sem)) {
6091 if (netif_running(bp->dev)) {
6092 if (bp->state == BNX2X_STATE_OPEN)
6093 netif_wake_queue(bp->dev);
6094 bnx2x_napi_enable(bp);
6095 bnx2x_int_enable(bp);
6096 }
6097 }
6098}
6099
f8ef6e44 6100static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6101{
f8ef6e44 6102 bnx2x_int_disable_sync(bp, disable_hw);
65abd74d
YG
6103 if (netif_running(bp->dev)) {
6104 bnx2x_napi_disable(bp);
6105 netif_tx_disable(bp->dev);
6106 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6107 }
6108}
6109
a2fbb9ea
ET
6110/*
6111 * Init service functions
6112 */
6113
3101c2bc 6114static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6115{
6116 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6117 int port = BP_PORT(bp);
a2fbb9ea
ET
6118
6119 /* CAM allocation
6120 * unicasts 0-31:port0 32-63:port1
6121 * multicast 64-127:port0 128-191:port1
6122 */
6123 config->hdr.length_6b = 2;
34f80b04
EG
6124 config->hdr.offset = port ? 31 : 0;
6125 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6126 config->hdr.reserved1 = 0;
6127
6128 /* primary MAC */
6129 config->config_table[0].cam_entry.msb_mac_addr =
6130 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6131 config->config_table[0].cam_entry.middle_mac_addr =
6132 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6133 config->config_table[0].cam_entry.lsb_mac_addr =
6134 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6135 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6136 if (set)
6137 config->config_table[0].target_table_entry.flags = 0;
6138 else
6139 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6140 config->config_table[0].target_table_entry.client_id = 0;
6141 config->config_table[0].target_table_entry.vlan_id = 0;
6142
3101c2bc
YG
6143 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6144 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6145 config->config_table[0].cam_entry.msb_mac_addr,
6146 config->config_table[0].cam_entry.middle_mac_addr,
6147 config->config_table[0].cam_entry.lsb_mac_addr);
6148
6149 /* broadcast */
6150 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6151 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6152 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6153 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6154 if (set)
6155 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6156 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6157 else
6158 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6159 config->config_table[1].target_table_entry.client_id = 0;
6160 config->config_table[1].target_table_entry.vlan_id = 0;
6161
6162 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6163 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6164 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6165}
6166
3101c2bc 6167static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6168{
6169 struct mac_configuration_cmd_e1h *config =
6170 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6171
3101c2bc 6172 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6173 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6174 return;
6175 }
6176
6177 /* CAM allocation for E1H
6178 * unicasts: by func number
6179 * multicast: 20+FUNC*20, 20 each
6180 */
6181 config->hdr.length_6b = 1;
6182 config->hdr.offset = BP_FUNC(bp);
6183 config->hdr.client_id = BP_CL_ID(bp);
6184 config->hdr.reserved1 = 0;
6185
6186 /* primary MAC */
6187 config->config_table[0].msb_mac_addr =
6188 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6189 config->config_table[0].middle_mac_addr =
6190 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6191 config->config_table[0].lsb_mac_addr =
6192 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6193 config->config_table[0].client_id = BP_L_ID(bp);
6194 config->config_table[0].vlan_id = 0;
6195 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6196 if (set)
6197 config->config_table[0].flags = BP_PORT(bp);
6198 else
6199 config->config_table[0].flags =
6200 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6201
3101c2bc
YG
6202 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6203 (set ? "setting" : "clearing"),
34f80b04
EG
6204 config->config_table[0].msb_mac_addr,
6205 config->config_table[0].middle_mac_addr,
6206 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6207
6208 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6209 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6210 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6211}
6212
a2fbb9ea
ET
6213static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6214 int *state_p, int poll)
6215{
6216 /* can take a while if any port is running */
34f80b04 6217 int cnt = 500;
a2fbb9ea 6218
c14423fe
ET
6219 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6220 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6221
6222 might_sleep();
34f80b04 6223 while (cnt--) {
a2fbb9ea
ET
6224 if (poll) {
6225 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6226 /* if index is different from 0
6227 * the reply for some commands will
3101c2bc 6228 * be on the non default queue
a2fbb9ea
ET
6229 */
6230 if (idx)
6231 bnx2x_rx_int(&bp->fp[idx], 10);
6232 }
a2fbb9ea 6233
3101c2bc 6234 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6235 if (*state_p == state)
a2fbb9ea
ET
6236 return 0;
6237
a2fbb9ea 6238 msleep(1);
a2fbb9ea
ET
6239 }
6240
a2fbb9ea 6241 /* timeout! */
49d66772
ET
6242 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6243 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6244#ifdef BNX2X_STOP_ON_ERROR
6245 bnx2x_panic();
6246#endif
a2fbb9ea 6247
49d66772 6248 return -EBUSY;
a2fbb9ea
ET
6249}
6250
6251static int bnx2x_setup_leading(struct bnx2x *bp)
6252{
34f80b04 6253 int rc;
a2fbb9ea 6254
c14423fe 6255 /* reset IGU state */
34f80b04 6256 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6257
6258 /* SETUP ramrod */
6259 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6260
34f80b04
EG
6261 /* Wait for completion */
6262 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6263
34f80b04 6264 return rc;
a2fbb9ea
ET
6265}
6266
6267static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6268{
a2fbb9ea 6269 /* reset IGU state */
34f80b04 6270 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6271
228241eb 6272 /* SETUP ramrod */
a2fbb9ea
ET
6273 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6274 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6275
6276 /* Wait for completion */
6277 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6278 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6279}
6280
a2fbb9ea
ET
6281static int bnx2x_poll(struct napi_struct *napi, int budget);
6282static void bnx2x_set_rx_mode(struct net_device *dev);
6283
34f80b04
EG
6284/* must be called with rtnl_lock */
6285static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6286{
228241eb 6287 u32 load_code;
34f80b04 6288 int i, rc;
34f80b04
EG
6289#ifdef BNX2X_STOP_ON_ERROR
6290 if (unlikely(bp->panic))
6291 return -EPERM;
6292#endif
a2fbb9ea
ET
6293
6294 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6295
34f80b04
EG
6296 /* Send LOAD_REQUEST command to MCP
6297 Returns the type of LOAD command:
6298 if it is the first port to be initialized
6299 common blocks should be initialized, otherwise - not
a2fbb9ea 6300 */
34f80b04 6301 if (!BP_NOMCP(bp)) {
228241eb
ET
6302 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6303 if (!load_code) {
da5a662a 6304 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6305 return -EBUSY;
6306 }
34f80b04 6307 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6308 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6309
a2fbb9ea 6310 } else {
da5a662a
VZ
6311 int port = BP_PORT(bp);
6312
34f80b04
EG
6313 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6314 load_count[0], load_count[1], load_count[2]);
6315 load_count[0]++;
da5a662a 6316 load_count[1 + port]++;
34f80b04
EG
6317 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6318 load_count[0], load_count[1], load_count[2]);
6319 if (load_count[0] == 1)
6320 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6321 else if (load_count[1 + port] == 1)
34f80b04
EG
6322 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6323 else
6324 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6325 }
6326
34f80b04
EG
6327 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6328 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6329 bp->port.pmf = 1;
6330 else
6331 bp->port.pmf = 0;
6332 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6333
6334 /* if we can't use MSI-X we only need one fp,
6335 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6336 * and fallback to inta with one fp
6337 */
34f80b04
EG
6338 if (use_inta) {
6339 bp->num_queues = 1;
6340
6341 } else {
6342 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6343 /* user requested number */
6344 bp->num_queues = use_multi;
6345
6346 else if (use_multi)
6347 bp->num_queues = min_t(u32, num_online_cpus(),
6348 BP_MAX_QUEUES(bp));
6349 else
a2fbb9ea 6350 bp->num_queues = 1;
34f80b04
EG
6351
6352 if (bnx2x_enable_msix(bp)) {
6353 /* failed to enable MSI-X */
6354 bp->num_queues = 1;
6355 if (use_multi)
6356 BNX2X_ERR("Multi requested but failed"
6357 " to enable MSI-X\n");
a2fbb9ea
ET
6358 }
6359 }
34f80b04
EG
6360 DP(NETIF_MSG_IFUP,
6361 "set number of queues to %d\n", bp->num_queues);
c14423fe 6362
a2fbb9ea
ET
6363 if (bnx2x_alloc_mem(bp))
6364 return -ENOMEM;
6365
7a9b2557
VZ
6366 for_each_queue(bp, i)
6367 bnx2x_fp(bp, i, disable_tpa) =
6368 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6369
34f80b04
EG
6370 if (bp->flags & USING_MSIX_FLAG) {
6371 rc = bnx2x_req_msix_irqs(bp);
6372 if (rc) {
6373 pci_disable_msix(bp->pdev);
6374 goto load_error;
6375 }
6376 } else {
6377 bnx2x_ack_int(bp);
6378 rc = bnx2x_req_irq(bp);
6379 if (rc) {
6380 BNX2X_ERR("IRQ request failed, aborting\n");
6381 goto load_error;
a2fbb9ea
ET
6382 }
6383 }
6384
6385 for_each_queue(bp, i)
6386 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6387 bnx2x_poll, 128);
6388
a2fbb9ea 6389 /* Initialize HW */
34f80b04
EG
6390 rc = bnx2x_init_hw(bp, load_code);
6391 if (rc) {
a2fbb9ea 6392 BNX2X_ERR("HW init failed, aborting\n");
d1014634 6393 goto load_int_disable;
a2fbb9ea
ET
6394 }
6395
a2fbb9ea 6396 /* Setup NIC internals and enable interrupts */
471de716 6397 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6398
6399 /* Send LOAD_DONE command to MCP */
34f80b04 6400 if (!BP_NOMCP(bp)) {
228241eb
ET
6401 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6402 if (!load_code) {
da5a662a 6403 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6404 rc = -EBUSY;
d1014634 6405 goto load_rings_free;
a2fbb9ea
ET
6406 }
6407 }
6408
bb2a0f7a
YG
6409 bnx2x_stats_init(bp);
6410
a2fbb9ea
ET
6411 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6412
6413 /* Enable Rx interrupt handling before sending the ramrod
6414 as it's completed on Rx FP queue */
65abd74d 6415 bnx2x_napi_enable(bp);
a2fbb9ea 6416
da5a662a
VZ
6417 /* Enable interrupt handling */
6418 atomic_set(&bp->intr_sem, 0);
6419
34f80b04
EG
6420 rc = bnx2x_setup_leading(bp);
6421 if (rc) {
da5a662a 6422 BNX2X_ERR("Setup leading failed!\n");
d1014634 6423 goto load_netif_stop;
34f80b04 6424 }
a2fbb9ea 6425
34f80b04
EG
6426 if (CHIP_IS_E1H(bp))
6427 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6428 BNX2X_ERR("!!! mf_cfg function disabled\n");
6429 bp->state = BNX2X_STATE_DISABLED;
6430 }
a2fbb9ea 6431
34f80b04
EG
6432 if (bp->state == BNX2X_STATE_OPEN)
6433 for_each_nondefault_queue(bp, i) {
6434 rc = bnx2x_setup_multi(bp, i);
6435 if (rc)
d1014634 6436 goto load_netif_stop;
34f80b04 6437 }
a2fbb9ea 6438
34f80b04 6439 if (CHIP_IS_E1(bp))
3101c2bc 6440 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6441 else
3101c2bc 6442 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6443
6444 if (bp->port.pmf)
6445 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6446
6447 /* Start fast path */
34f80b04
EG
6448 switch (load_mode) {
6449 case LOAD_NORMAL:
6450 /* Tx queue should be only reenabled */
6451 netif_wake_queue(bp->dev);
6452 bnx2x_set_rx_mode(bp->dev);
6453 break;
6454
6455 case LOAD_OPEN:
a2fbb9ea 6456 netif_start_queue(bp->dev);
34f80b04 6457 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6458 if (bp->flags & USING_MSIX_FLAG)
6459 printk(KERN_INFO PFX "%s: using MSI-X\n",
6460 bp->dev->name);
34f80b04 6461 break;
a2fbb9ea 6462
34f80b04 6463 case LOAD_DIAG:
a2fbb9ea 6464 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6465 bp->state = BNX2X_STATE_DIAG;
6466 break;
6467
6468 default:
6469 break;
a2fbb9ea
ET
6470 }
6471
34f80b04
EG
6472 if (!bp->port.pmf)
6473 bnx2x__link_status_update(bp);
6474
a2fbb9ea
ET
6475 /* start the timer */
6476 mod_timer(&bp->timer, jiffies + bp->current_interval);
6477
34f80b04 6478
a2fbb9ea
ET
6479 return 0;
6480
d1014634 6481load_netif_stop:
65abd74d 6482 bnx2x_napi_disable(bp);
d1014634 6483load_rings_free:
7a9b2557
VZ
6484 /* Free SKBs, SGEs, TPA pool and driver internals */
6485 bnx2x_free_skbs(bp);
6486 for_each_queue(bp, i)
3196a88a 6487 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d1014634 6488load_int_disable:
f8ef6e44 6489 bnx2x_int_disable_sync(bp, 1);
d1014634
YG
6490 /* Release IRQs */
6491 bnx2x_free_irq(bp);
228241eb 6492load_error:
a2fbb9ea 6493 bnx2x_free_mem(bp);
9a035440 6494 bp->port.pmf = 0;
a2fbb9ea
ET
6495
6496 /* TBD we really need to reset the chip
6497 if we want to recover from this */
34f80b04 6498 return rc;
a2fbb9ea
ET
6499}
6500
6501static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6502{
a2fbb9ea
ET
6503 int rc;
6504
c14423fe 6505 /* halt the connection */
a2fbb9ea 6506 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
231fd58a 6507 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
a2fbb9ea 6508
34f80b04 6509 /* Wait for completion */
a2fbb9ea 6510 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6511 &(bp->fp[index].state), 1);
c14423fe 6512 if (rc) /* timeout */
a2fbb9ea
ET
6513 return rc;
6514
6515 /* delete cfc entry */
6516 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6517
34f80b04
EG
6518 /* Wait for completion */
6519 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6520 &(bp->fp[index].state), 1);
6521 return rc;
a2fbb9ea
ET
6522}
6523
da5a662a 6524static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6525{
49d66772 6526 u16 dsb_sp_prod_idx;
c14423fe 6527 /* if the other port is handling traffic,
a2fbb9ea 6528 this can take a lot of time */
34f80b04
EG
6529 int cnt = 500;
6530 int rc;
a2fbb9ea
ET
6531
6532 might_sleep();
6533
6534 /* Send HALT ramrod */
6535 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6536 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6537
34f80b04
EG
6538 /* Wait for completion */
6539 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6540 &(bp->fp[0].state), 1);
6541 if (rc) /* timeout */
da5a662a 6542 return rc;
a2fbb9ea 6543
49d66772 6544 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6545
228241eb 6546 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6547 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6548
49d66772 6549 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6550 we are going to reset the chip anyway
6551 so there is not much to do if this times out
6552 */
34f80b04 6553 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6554 if (!cnt) {
6555 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6556 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6557 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6558#ifdef BNX2X_STOP_ON_ERROR
6559 bnx2x_panic();
da5a662a
VZ
6560#else
6561 rc = -EBUSY;
34f80b04
EG
6562#endif
6563 break;
6564 }
6565 cnt--;
da5a662a 6566 msleep(1);
49d66772
ET
6567 }
6568 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6569 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6570
6571 return rc;
a2fbb9ea
ET
6572}
6573
34f80b04
EG
6574static void bnx2x_reset_func(struct bnx2x *bp)
6575{
6576 int port = BP_PORT(bp);
6577 int func = BP_FUNC(bp);
6578 int base, i;
6579
6580 /* Configure IGU */
6581 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6582 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6583
6584 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6585
6586 /* Clear ILT */
6587 base = FUNC_ILT_BASE(func);
6588 for (i = base; i < base + ILT_PER_FUNC; i++)
6589 bnx2x_ilt_wr(bp, i, 0);
6590}
6591
6592static void bnx2x_reset_port(struct bnx2x *bp)
6593{
6594 int port = BP_PORT(bp);
6595 u32 val;
6596
6597 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6598
6599 /* Do not rcv packets to BRB */
6600 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6601 /* Do not direct rcv packets that are not for MCP to the BRB */
6602 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6603 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6604
6605 /* Configure AEU */
6606 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6607
6608 msleep(100);
6609 /* Check for BRB port occupancy */
6610 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6611 if (val)
6612 DP(NETIF_MSG_IFDOWN,
33471629 6613 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6614
6615 /* TODO: Close Doorbell port? */
6616}
6617
6618static void bnx2x_reset_common(struct bnx2x *bp)
6619{
6620 /* reset_common */
6621 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6622 0xd3ffff7f);
6623 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6624}
6625
6626static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6627{
6628 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6629 BP_FUNC(bp), reset_code);
6630
6631 switch (reset_code) {
6632 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6633 bnx2x_reset_port(bp);
6634 bnx2x_reset_func(bp);
6635 bnx2x_reset_common(bp);
6636 break;
6637
6638 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6639 bnx2x_reset_port(bp);
6640 bnx2x_reset_func(bp);
6641 break;
6642
6643 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6644 bnx2x_reset_func(bp);
6645 break;
49d66772 6646
34f80b04
EG
6647 default:
6648 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6649 break;
6650 }
6651}
6652
33471629 6653/* must be called with rtnl_lock */
34f80b04 6654static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6655{
da5a662a 6656 int port = BP_PORT(bp);
a2fbb9ea 6657 u32 reset_code = 0;
da5a662a 6658 int i, cnt, rc;
a2fbb9ea
ET
6659
6660 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6661
228241eb
ET
6662 bp->rx_mode = BNX2X_RX_MODE_NONE;
6663 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6664
f8ef6e44 6665 bnx2x_netif_stop(bp, 1);
65abd74d
YG
6666 if (!netif_running(bp->dev))
6667 bnx2x_napi_disable(bp);
34f80b04
EG
6668 del_timer_sync(&bp->timer);
6669 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6670 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6671 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6672
da5a662a 6673 /* Wait until tx fast path tasks complete */
228241eb
ET
6674 for_each_queue(bp, i) {
6675 struct bnx2x_fastpath *fp = &bp->fp[i];
6676
34f80b04
EG
6677 cnt = 1000;
6678 smp_rmb();
da5a662a
VZ
6679 while (BNX2X_HAS_TX_WORK(fp)) {
6680
65abd74d 6681 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6682 if (!cnt) {
6683 BNX2X_ERR("timeout waiting for queue[%d]\n",
6684 i);
6685#ifdef BNX2X_STOP_ON_ERROR
6686 bnx2x_panic();
6687 return -EBUSY;
6688#else
6689 break;
6690#endif
6691 }
6692 cnt--;
da5a662a 6693 msleep(1);
34f80b04
EG
6694 smp_rmb();
6695 }
228241eb 6696 }
da5a662a
VZ
6697 /* Give HW time to discard old tx messages */
6698 msleep(1);
a2fbb9ea 6699
34f80b04
EG
6700 /* Release IRQs */
6701 bnx2x_free_irq(bp);
6702
3101c2bc
YG
6703 if (CHIP_IS_E1(bp)) {
6704 struct mac_configuration_cmd *config =
6705 bnx2x_sp(bp, mcast_config);
6706
6707 bnx2x_set_mac_addr_e1(bp, 0);
6708
6709 for (i = 0; i < config->hdr.length_6b; i++)
6710 CAM_INVALIDATE(config->config_table[i]);
6711
6712 config->hdr.length_6b = i;
6713 if (CHIP_REV_IS_SLOW(bp))
6714 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6715 else
6716 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6717 config->hdr.client_id = BP_CL_ID(bp);
6718 config->hdr.reserved1 = 0;
6719
6720 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6721 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6722 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6723
6724 } else { /* E1H */
65abd74d
YG
6725 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6726
3101c2bc
YG
6727 bnx2x_set_mac_addr_e1h(bp, 0);
6728
6729 for (i = 0; i < MC_HASH_SIZE; i++)
6730 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6731 }
6732
65abd74d
YG
6733 if (unload_mode == UNLOAD_NORMAL)
6734 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6735
6736 else if (bp->flags & NO_WOL_FLAG) {
6737 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6738 if (CHIP_IS_E1H(bp))
6739 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6740
6741 } else if (bp->wol) {
6742 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6743 u8 *mac_addr = bp->dev->dev_addr;
6744 u32 val;
6745 /* The mac address is written to entries 1-4 to
6746 preserve entry 0 which is used by the PMF */
6747 u8 entry = (BP_E1HVN(bp) + 1)*8;
6748
6749 val = (mac_addr[0] << 8) | mac_addr[1];
6750 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6751
6752 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6753 (mac_addr[4] << 8) | mac_addr[5];
6754 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6755
6756 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6757
6758 } else
6759 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6760
34f80b04
EG
6761 /* Close multi and leading connections
6762 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6763 for_each_nondefault_queue(bp, i)
6764 if (bnx2x_stop_multi(bp, i))
228241eb 6765 goto unload_error;
a2fbb9ea 6766
da5a662a
VZ
6767 rc = bnx2x_stop_leading(bp);
6768 if (rc) {
34f80b04 6769 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6770#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6771 return -EBUSY;
da5a662a
VZ
6772#else
6773 goto unload_error;
34f80b04 6774#endif
228241eb
ET
6775 }
6776
6777unload_error:
34f80b04 6778 if (!BP_NOMCP(bp))
228241eb 6779 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6780 else {
6781 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6782 load_count[0], load_count[1], load_count[2]);
6783 load_count[0]--;
da5a662a 6784 load_count[1 + port]--;
34f80b04
EG
6785 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6786 load_count[0], load_count[1], load_count[2]);
6787 if (load_count[0] == 0)
6788 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6789 else if (load_count[1 + port] == 0)
34f80b04
EG
6790 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6791 else
6792 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6793 }
a2fbb9ea 6794
34f80b04
EG
6795 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6796 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6797 bnx2x__link_reset(bp);
a2fbb9ea
ET
6798
6799 /* Reset the chip */
228241eb 6800 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6801
6802 /* Report UNLOAD_DONE to MCP */
34f80b04 6803 if (!BP_NOMCP(bp))
a2fbb9ea 6804 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6805 bp->port.pmf = 0;
a2fbb9ea 6806
7a9b2557 6807 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6808 bnx2x_free_skbs(bp);
7a9b2557 6809 for_each_queue(bp, i)
3196a88a 6810 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
a2fbb9ea
ET
6811 bnx2x_free_mem(bp);
6812
6813 bp->state = BNX2X_STATE_CLOSED;
228241eb 6814
a2fbb9ea
ET
6815 netif_carrier_off(bp->dev);
6816
6817 return 0;
6818}
6819
34f80b04
EG
6820static void bnx2x_reset_task(struct work_struct *work)
6821{
6822 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6823
6824#ifdef BNX2X_STOP_ON_ERROR
6825 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6826 " so reset not done to allow debug dump,\n"
6827 KERN_ERR " you will need to reboot when done\n");
6828 return;
6829#endif
6830
6831 rtnl_lock();
6832
6833 if (!netif_running(bp->dev))
6834 goto reset_task_exit;
6835
6836 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6837 bnx2x_nic_load(bp, LOAD_NORMAL);
6838
6839reset_task_exit:
6840 rtnl_unlock();
6841}
6842
a2fbb9ea
ET
6843/* end of nic load/unload */
6844
6845/* ethtool_ops */
6846
6847/*
6848 * Init service functions
6849 */
6850
34f80b04
EG
6851static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6852{
6853 u32 val;
6854
6855 /* Check if there is any driver already loaded */
6856 val = REG_RD(bp, MISC_REG_UNPREPARED);
6857 if (val == 0x1) {
6858 /* Check if it is the UNDI driver
6859 * UNDI driver initializes CID offset for normal bell to 0x7
6860 */
4a37fb66 6861 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04 6862 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
76b190c5
EG
6863 if (val == 0x7)
6864 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6865 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6866
34f80b04
EG
6867 if (val == 0x7) {
6868 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6869 /* save our func */
34f80b04 6870 int func = BP_FUNC(bp);
da5a662a
VZ
6871 u32 swap_en;
6872 u32 swap_val;
34f80b04
EG
6873
6874 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6875
6876 /* try unload UNDI on port 0 */
6877 bp->func = 0;
da5a662a
VZ
6878 bp->fw_seq =
6879 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6880 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6881 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6882
6883 /* if UNDI is loaded on the other port */
6884 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6885
da5a662a
VZ
6886 /* send "DONE" for previous unload */
6887 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6888
6889 /* unload UNDI on port 1 */
34f80b04 6890 bp->func = 1;
da5a662a
VZ
6891 bp->fw_seq =
6892 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6893 DRV_MSG_SEQ_NUMBER_MASK);
6894 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6895
6896 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6897 }
6898
da5a662a
VZ
6899 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6900 HC_REG_CONFIG_0), 0x1000);
6901
6902 /* close input traffic and wait for it */
6903 /* Do not rcv packets to BRB */
6904 REG_WR(bp,
6905 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6906 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6907 /* Do not direct rcv packets that are not for MCP to
6908 * the BRB */
6909 REG_WR(bp,
6910 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6911 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6912 /* clear AEU */
6913 REG_WR(bp,
6914 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6915 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6916 msleep(10);
6917
6918 /* save NIG port swap info */
6919 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6920 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6921 /* reset device */
6922 REG_WR(bp,
6923 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6924 0xd3ffffff);
34f80b04
EG
6925 REG_WR(bp,
6926 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6927 0x1403);
da5a662a
VZ
6928 /* take the NIG out of reset and restore swap values */
6929 REG_WR(bp,
6930 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6931 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6932 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6933 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6934
6935 /* send unload done to the MCP */
6936 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6937
6938 /* restore our func and fw_seq */
6939 bp->func = func;
6940 bp->fw_seq =
6941 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6942 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
6943 }
6944 }
6945}
6946
6947static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6948{
6949 u32 val, val2, val3, val4, id;
72ce58c3 6950 u16 pmc;
34f80b04
EG
6951
6952 /* Get the chip revision id and number. */
6953 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6954 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6955 id = ((val & 0xffff) << 16);
6956 val = REG_RD(bp, MISC_REG_CHIP_REV);
6957 id |= ((val & 0xf) << 12);
6958 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6959 id |= ((val & 0xff) << 4);
6960 REG_RD(bp, MISC_REG_BOND_ID);
6961 id |= (val & 0xf);
6962 bp->common.chip_id = id;
6963 bp->link_params.chip_id = bp->common.chip_id;
6964 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6965
6966 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6967 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6968 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6969 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6970 bp->common.flash_size, bp->common.flash_size);
6971
6972 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6973 bp->link_params.shmem_base = bp->common.shmem_base;
6974 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6975
6976 if (!bp->common.shmem_base ||
6977 (bp->common.shmem_base < 0xA0000) ||
6978 (bp->common.shmem_base >= 0xC0000)) {
6979 BNX2X_DEV_INFO("MCP not active\n");
6980 bp->flags |= NO_MCP_FLAG;
6981 return;
6982 }
6983
6984 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6985 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6986 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6987 BNX2X_ERR("BAD MCP validity signature\n");
6988
6989 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6990 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6991
6992 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6993 bp->common.hw_config, bp->common.board);
6994
6995 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6996 SHARED_HW_CFG_LED_MODE_MASK) >>
6997 SHARED_HW_CFG_LED_MODE_SHIFT);
6998
6999 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7000 bp->common.bc_ver = val;
7001 BNX2X_DEV_INFO("bc_ver %X\n", val);
7002 if (val < BNX2X_BC_VER) {
7003 /* for now only warn
7004 * later we might need to enforce this */
7005 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7006 " please upgrade BC\n", BNX2X_BC_VER, val);
7007 }
72ce58c3
EG
7008
7009 if (BP_E1HVN(bp) == 0) {
7010 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7011 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7012 } else {
7013 /* no WOL capability for E1HVN != 0 */
7014 bp->flags |= NO_WOL_FLAG;
7015 }
7016 BNX2X_DEV_INFO("%sWoL capable\n",
7017 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7018
7019 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7020 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7021 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7022 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7023
7024 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7025 val, val2, val3, val4);
7026}
7027
7028static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7029 u32 switch_cfg)
a2fbb9ea 7030{
34f80b04 7031 int port = BP_PORT(bp);
a2fbb9ea
ET
7032 u32 ext_phy_type;
7033
a2fbb9ea
ET
7034 switch (switch_cfg) {
7035 case SWITCH_CFG_1G:
7036 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7037
c18487ee
YR
7038 ext_phy_type =
7039 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7040 switch (ext_phy_type) {
7041 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7042 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7043 ext_phy_type);
7044
34f80b04
EG
7045 bp->port.supported |= (SUPPORTED_10baseT_Half |
7046 SUPPORTED_10baseT_Full |
7047 SUPPORTED_100baseT_Half |
7048 SUPPORTED_100baseT_Full |
7049 SUPPORTED_1000baseT_Full |
7050 SUPPORTED_2500baseX_Full |
7051 SUPPORTED_TP |
7052 SUPPORTED_FIBRE |
7053 SUPPORTED_Autoneg |
7054 SUPPORTED_Pause |
7055 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7056 break;
7057
7058 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7059 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7060 ext_phy_type);
7061
34f80b04
EG
7062 bp->port.supported |= (SUPPORTED_10baseT_Half |
7063 SUPPORTED_10baseT_Full |
7064 SUPPORTED_100baseT_Half |
7065 SUPPORTED_100baseT_Full |
7066 SUPPORTED_1000baseT_Full |
7067 SUPPORTED_TP |
7068 SUPPORTED_FIBRE |
7069 SUPPORTED_Autoneg |
7070 SUPPORTED_Pause |
7071 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7072 break;
7073
7074 default:
7075 BNX2X_ERR("NVRAM config error. "
7076 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7077 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7078 return;
7079 }
7080
34f80b04
EG
7081 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7082 port*0x10);
7083 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7084 break;
7085
7086 case SWITCH_CFG_10G:
7087 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7088
c18487ee
YR
7089 ext_phy_type =
7090 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7091 switch (ext_phy_type) {
7092 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7093 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7094 ext_phy_type);
7095
34f80b04
EG
7096 bp->port.supported |= (SUPPORTED_10baseT_Half |
7097 SUPPORTED_10baseT_Full |
7098 SUPPORTED_100baseT_Half |
7099 SUPPORTED_100baseT_Full |
7100 SUPPORTED_1000baseT_Full |
7101 SUPPORTED_2500baseX_Full |
7102 SUPPORTED_10000baseT_Full |
7103 SUPPORTED_TP |
7104 SUPPORTED_FIBRE |
7105 SUPPORTED_Autoneg |
7106 SUPPORTED_Pause |
7107 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7108 break;
7109
7110 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7111 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7112 ext_phy_type);
f1410647 7113
34f80b04
EG
7114 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7115 SUPPORTED_FIBRE |
7116 SUPPORTED_Pause |
7117 SUPPORTED_Asym_Pause);
f1410647
ET
7118 break;
7119
a2fbb9ea 7120 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7121 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7122 ext_phy_type);
7123
34f80b04
EG
7124 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7125 SUPPORTED_1000baseT_Full |
7126 SUPPORTED_FIBRE |
7127 SUPPORTED_Pause |
7128 SUPPORTED_Asym_Pause);
f1410647
ET
7129 break;
7130
7131 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7132 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7133 ext_phy_type);
7134
34f80b04
EG
7135 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7136 SUPPORTED_1000baseT_Full |
7137 SUPPORTED_FIBRE |
7138 SUPPORTED_Autoneg |
7139 SUPPORTED_Pause |
7140 SUPPORTED_Asym_Pause);
f1410647
ET
7141 break;
7142
c18487ee
YR
7143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7144 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7145 ext_phy_type);
7146
34f80b04
EG
7147 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7148 SUPPORTED_2500baseX_Full |
7149 SUPPORTED_1000baseT_Full |
7150 SUPPORTED_FIBRE |
7151 SUPPORTED_Autoneg |
7152 SUPPORTED_Pause |
7153 SUPPORTED_Asym_Pause);
c18487ee
YR
7154 break;
7155
f1410647
ET
7156 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7157 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7158 ext_phy_type);
7159
34f80b04
EG
7160 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7161 SUPPORTED_TP |
7162 SUPPORTED_Autoneg |
7163 SUPPORTED_Pause |
7164 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7165 break;
7166
c18487ee
YR
7167 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7168 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7169 bp->link_params.ext_phy_config);
7170 break;
7171
a2fbb9ea
ET
7172 default:
7173 BNX2X_ERR("NVRAM config error. "
7174 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7175 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7176 return;
7177 }
7178
34f80b04
EG
7179 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7180 port*0x18);
7181 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7182
a2fbb9ea
ET
7183 break;
7184
7185 default:
7186 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7187 bp->port.link_config);
a2fbb9ea
ET
7188 return;
7189 }
34f80b04 7190 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7191
7192 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7193 if (!(bp->link_params.speed_cap_mask &
7194 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7195 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7196
c18487ee
YR
7197 if (!(bp->link_params.speed_cap_mask &
7198 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7199 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7200
c18487ee
YR
7201 if (!(bp->link_params.speed_cap_mask &
7202 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7203 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7204
c18487ee
YR
7205 if (!(bp->link_params.speed_cap_mask &
7206 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7207 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7208
c18487ee
YR
7209 if (!(bp->link_params.speed_cap_mask &
7210 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7211 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7212 SUPPORTED_1000baseT_Full);
a2fbb9ea 7213
c18487ee
YR
7214 if (!(bp->link_params.speed_cap_mask &
7215 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7216 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7217
c18487ee
YR
7218 if (!(bp->link_params.speed_cap_mask &
7219 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7220 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7221
34f80b04 7222 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7223}
7224
34f80b04 7225static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7226{
c18487ee 7227 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7228
34f80b04 7229 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7230 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7231 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7232 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7233 bp->port.advertising = bp->port.supported;
a2fbb9ea 7234 } else {
c18487ee
YR
7235 u32 ext_phy_type =
7236 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7237
7238 if ((ext_phy_type ==
7239 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7240 (ext_phy_type ==
7241 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7242 /* force 10G, no AN */
c18487ee 7243 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7244 bp->port.advertising =
a2fbb9ea
ET
7245 (ADVERTISED_10000baseT_Full |
7246 ADVERTISED_FIBRE);
7247 break;
7248 }
7249 BNX2X_ERR("NVRAM config error. "
7250 "Invalid link_config 0x%x"
7251 " Autoneg not supported\n",
34f80b04 7252 bp->port.link_config);
a2fbb9ea
ET
7253 return;
7254 }
7255 break;
7256
7257 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7258 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7259 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7260 bp->port.advertising = (ADVERTISED_10baseT_Full |
7261 ADVERTISED_TP);
a2fbb9ea
ET
7262 } else {
7263 BNX2X_ERR("NVRAM config error. "
7264 "Invalid link_config 0x%x"
7265 " speed_cap_mask 0x%x\n",
34f80b04 7266 bp->port.link_config,
c18487ee 7267 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7268 return;
7269 }
7270 break;
7271
7272 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7273 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7274 bp->link_params.req_line_speed = SPEED_10;
7275 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7276 bp->port.advertising = (ADVERTISED_10baseT_Half |
7277 ADVERTISED_TP);
a2fbb9ea
ET
7278 } else {
7279 BNX2X_ERR("NVRAM config error. "
7280 "Invalid link_config 0x%x"
7281 " speed_cap_mask 0x%x\n",
34f80b04 7282 bp->port.link_config,
c18487ee 7283 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7284 return;
7285 }
7286 break;
7287
7288 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7289 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7290 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7291 bp->port.advertising = (ADVERTISED_100baseT_Full |
7292 ADVERTISED_TP);
a2fbb9ea
ET
7293 } else {
7294 BNX2X_ERR("NVRAM config error. "
7295 "Invalid link_config 0x%x"
7296 " speed_cap_mask 0x%x\n",
34f80b04 7297 bp->port.link_config,
c18487ee 7298 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7299 return;
7300 }
7301 break;
7302
7303 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7304 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7305 bp->link_params.req_line_speed = SPEED_100;
7306 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7307 bp->port.advertising = (ADVERTISED_100baseT_Half |
7308 ADVERTISED_TP);
a2fbb9ea
ET
7309 } else {
7310 BNX2X_ERR("NVRAM config error. "
7311 "Invalid link_config 0x%x"
7312 " speed_cap_mask 0x%x\n",
34f80b04 7313 bp->port.link_config,
c18487ee 7314 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7315 return;
7316 }
7317 break;
7318
7319 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7320 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7321 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7322 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7323 ADVERTISED_TP);
a2fbb9ea
ET
7324 } else {
7325 BNX2X_ERR("NVRAM config error. "
7326 "Invalid link_config 0x%x"
7327 " speed_cap_mask 0x%x\n",
34f80b04 7328 bp->port.link_config,
c18487ee 7329 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7330 return;
7331 }
7332 break;
7333
7334 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7335 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7336 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7337 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7338 ADVERTISED_TP);
a2fbb9ea
ET
7339 } else {
7340 BNX2X_ERR("NVRAM config error. "
7341 "Invalid link_config 0x%x"
7342 " speed_cap_mask 0x%x\n",
34f80b04 7343 bp->port.link_config,
c18487ee 7344 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7345 return;
7346 }
7347 break;
7348
7349 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7350 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7351 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7352 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7353 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7354 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7355 ADVERTISED_FIBRE);
a2fbb9ea
ET
7356 } else {
7357 BNX2X_ERR("NVRAM config error. "
7358 "Invalid link_config 0x%x"
7359 " speed_cap_mask 0x%x\n",
34f80b04 7360 bp->port.link_config,
c18487ee 7361 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7362 return;
7363 }
7364 break;
7365
7366 default:
7367 BNX2X_ERR("NVRAM config error. "
7368 "BAD link speed link_config 0x%x\n",
34f80b04 7369 bp->port.link_config);
c18487ee 7370 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7371 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7372 break;
7373 }
a2fbb9ea 7374
34f80b04
EG
7375 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7376 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7377 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7378 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7379 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7380
c18487ee 7381 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7382 " advertising 0x%x\n",
c18487ee
YR
7383 bp->link_params.req_line_speed,
7384 bp->link_params.req_duplex,
34f80b04 7385 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7386}
7387
34f80b04 7388static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7389{
34f80b04
EG
7390 int port = BP_PORT(bp);
7391 u32 val, val2;
a2fbb9ea 7392
c18487ee 7393 bp->link_params.bp = bp;
34f80b04 7394 bp->link_params.port = port;
c18487ee 7395
c18487ee 7396 bp->link_params.serdes_config =
f1410647 7397 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7398 bp->link_params.lane_config =
a2fbb9ea 7399 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7400 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7401 SHMEM_RD(bp,
7402 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7403 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7404 SHMEM_RD(bp,
7405 dev_info.port_hw_config[port].speed_capability_mask);
7406
34f80b04 7407 bp->port.link_config =
a2fbb9ea
ET
7408 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7409
34f80b04
EG
7410 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7411 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7412 " link_config 0x%08x\n",
c18487ee
YR
7413 bp->link_params.serdes_config,
7414 bp->link_params.lane_config,
7415 bp->link_params.ext_phy_config,
34f80b04 7416 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7417
34f80b04 7418 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7419 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7420 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7421
7422 bnx2x_link_settings_requested(bp);
7423
7424 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7425 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7426 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7427 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7428 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7429 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7430 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7431 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7432 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7433 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7434}
7435
7436static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7437{
7438 int func = BP_FUNC(bp);
7439 u32 val, val2;
7440 int rc = 0;
a2fbb9ea 7441
34f80b04 7442 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7443
34f80b04
EG
7444 bp->e1hov = 0;
7445 bp->e1hmf = 0;
7446 if (CHIP_IS_E1H(bp)) {
7447 bp->mf_config =
7448 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7449
3196a88a
EG
7450 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7451 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7452 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7453
34f80b04
EG
7454 bp->e1hov = val;
7455 bp->e1hmf = 1;
7456 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7457 "(0x%04x)\n",
7458 func, bp->e1hov, bp->e1hov);
7459 } else {
7460 BNX2X_DEV_INFO("Single function mode\n");
7461 if (BP_E1HVN(bp)) {
7462 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7463 " aborting\n", func);
7464 rc = -EPERM;
7465 }
7466 }
7467 }
a2fbb9ea 7468
34f80b04
EG
7469 if (!BP_NOMCP(bp)) {
7470 bnx2x_get_port_hwinfo(bp);
7471
7472 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7473 DRV_MSG_SEQ_NUMBER_MASK);
7474 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7475 }
7476
7477 if (IS_E1HMF(bp)) {
7478 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7479 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7480 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7481 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7482 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7483 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7484 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7485 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7486 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7487 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7488 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7489 ETH_ALEN);
7490 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7491 ETH_ALEN);
a2fbb9ea 7492 }
34f80b04
EG
7493
7494 return rc;
a2fbb9ea
ET
7495 }
7496
34f80b04
EG
7497 if (BP_NOMCP(bp)) {
7498 /* only supposed to happen on emulation/FPGA */
33471629 7499 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7500 random_ether_addr(bp->dev->dev_addr);
7501 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7502 }
a2fbb9ea 7503
34f80b04
EG
7504 return rc;
7505}
7506
7507static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7508{
7509 int func = BP_FUNC(bp);
7510 int rc;
7511
da5a662a
VZ
7512 /* Disable interrupt handling until HW is initialized */
7513 atomic_set(&bp->intr_sem, 1);
7514
34f80b04 7515 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7516
1cf167f2 7517 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7518 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7519
7520 rc = bnx2x_get_hwinfo(bp);
7521
7522 /* need to reset chip if undi was active */
7523 if (!BP_NOMCP(bp))
7524 bnx2x_undi_unload(bp);
7525
7526 if (CHIP_REV_IS_FPGA(bp))
7527 printk(KERN_ERR PFX "FPGA detected\n");
7528
7529 if (BP_NOMCP(bp) && (func == 0))
7530 printk(KERN_ERR PFX
7531 "MCP disabled, must load devices in order!\n");
7532
7a9b2557
VZ
7533 /* Set TPA flags */
7534 if (disable_tpa) {
7535 bp->flags &= ~TPA_ENABLE_FLAG;
7536 bp->dev->features &= ~NETIF_F_LRO;
7537 } else {
7538 bp->flags |= TPA_ENABLE_FLAG;
7539 bp->dev->features |= NETIF_F_LRO;
7540 }
7541
7542
34f80b04
EG
7543 bp->tx_ring_size = MAX_TX_AVAIL;
7544 bp->rx_ring_size = MAX_RX_AVAIL;
7545
7546 bp->rx_csum = 1;
7547 bp->rx_offset = 0;
7548
7549 bp->tx_ticks = 50;
7550 bp->rx_ticks = 25;
7551
34f80b04
EG
7552 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7553 bp->current_interval = (poll ? poll : bp->timer_interval);
7554
7555 init_timer(&bp->timer);
7556 bp->timer.expires = jiffies + bp->current_interval;
7557 bp->timer.data = (unsigned long) bp;
7558 bp->timer.function = bnx2x_timer;
7559
7560 return rc;
a2fbb9ea
ET
7561}
7562
7563/*
7564 * ethtool service functions
7565 */
7566
7567/* All ethtool functions called with rtnl_lock */
7568
7569static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7570{
7571 struct bnx2x *bp = netdev_priv(dev);
7572
34f80b04
EG
7573 cmd->supported = bp->port.supported;
7574 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7575
7576 if (netif_carrier_ok(dev)) {
c18487ee
YR
7577 cmd->speed = bp->link_vars.line_speed;
7578 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7579 } else {
c18487ee
YR
7580 cmd->speed = bp->link_params.req_line_speed;
7581 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7582 }
34f80b04
EG
7583 if (IS_E1HMF(bp)) {
7584 u16 vn_max_rate;
7585
7586 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7587 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7588 if (vn_max_rate < cmd->speed)
7589 cmd->speed = vn_max_rate;
7590 }
a2fbb9ea 7591
c18487ee
YR
7592 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7593 u32 ext_phy_type =
7594 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7595
7596 switch (ext_phy_type) {
7597 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7598 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7599 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7600 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7601 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7602 cmd->port = PORT_FIBRE;
7603 break;
7604
7605 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7606 cmd->port = PORT_TP;
7607 break;
7608
c18487ee
YR
7609 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7610 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7611 bp->link_params.ext_phy_config);
7612 break;
7613
f1410647
ET
7614 default:
7615 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7616 bp->link_params.ext_phy_config);
7617 break;
f1410647
ET
7618 }
7619 } else
a2fbb9ea 7620 cmd->port = PORT_TP;
a2fbb9ea 7621
34f80b04 7622 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7623 cmd->transceiver = XCVR_INTERNAL;
7624
c18487ee 7625 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7626 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7627 else
a2fbb9ea 7628 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7629
7630 cmd->maxtxpkt = 0;
7631 cmd->maxrxpkt = 0;
7632
7633 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7634 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7635 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7636 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7637 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7638 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7639 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7640
7641 return 0;
7642}
7643
7644static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7645{
7646 struct bnx2x *bp = netdev_priv(dev);
7647 u32 advertising;
7648
34f80b04
EG
7649 if (IS_E1HMF(bp))
7650 return 0;
7651
a2fbb9ea
ET
7652 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7653 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7654 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7655 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7656 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7657 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7658 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7659
a2fbb9ea 7660 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7661 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7662 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7663 return -EINVAL;
f1410647 7664 }
a2fbb9ea
ET
7665
7666 /* advertise the requested speed and duplex if supported */
34f80b04 7667 cmd->advertising &= bp->port.supported;
a2fbb9ea 7668
c18487ee
YR
7669 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7670 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7671 bp->port.advertising |= (ADVERTISED_Autoneg |
7672 cmd->advertising);
a2fbb9ea
ET
7673
7674 } else { /* forced speed */
7675 /* advertise the requested speed and duplex if supported */
7676 switch (cmd->speed) {
7677 case SPEED_10:
7678 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7679 if (!(bp->port.supported &
f1410647
ET
7680 SUPPORTED_10baseT_Full)) {
7681 DP(NETIF_MSG_LINK,
7682 "10M full not supported\n");
a2fbb9ea 7683 return -EINVAL;
f1410647 7684 }
a2fbb9ea
ET
7685
7686 advertising = (ADVERTISED_10baseT_Full |
7687 ADVERTISED_TP);
7688 } else {
34f80b04 7689 if (!(bp->port.supported &
f1410647
ET
7690 SUPPORTED_10baseT_Half)) {
7691 DP(NETIF_MSG_LINK,
7692 "10M half not supported\n");
a2fbb9ea 7693 return -EINVAL;
f1410647 7694 }
a2fbb9ea
ET
7695
7696 advertising = (ADVERTISED_10baseT_Half |
7697 ADVERTISED_TP);
7698 }
7699 break;
7700
7701 case SPEED_100:
7702 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7703 if (!(bp->port.supported &
f1410647
ET
7704 SUPPORTED_100baseT_Full)) {
7705 DP(NETIF_MSG_LINK,
7706 "100M full not supported\n");
a2fbb9ea 7707 return -EINVAL;
f1410647 7708 }
a2fbb9ea
ET
7709
7710 advertising = (ADVERTISED_100baseT_Full |
7711 ADVERTISED_TP);
7712 } else {
34f80b04 7713 if (!(bp->port.supported &
f1410647
ET
7714 SUPPORTED_100baseT_Half)) {
7715 DP(NETIF_MSG_LINK,
7716 "100M half not supported\n");
a2fbb9ea 7717 return -EINVAL;
f1410647 7718 }
a2fbb9ea
ET
7719
7720 advertising = (ADVERTISED_100baseT_Half |
7721 ADVERTISED_TP);
7722 }
7723 break;
7724
7725 case SPEED_1000:
f1410647
ET
7726 if (cmd->duplex != DUPLEX_FULL) {
7727 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7728 return -EINVAL;
f1410647 7729 }
a2fbb9ea 7730
34f80b04 7731 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7732 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7733 return -EINVAL;
f1410647 7734 }
a2fbb9ea
ET
7735
7736 advertising = (ADVERTISED_1000baseT_Full |
7737 ADVERTISED_TP);
7738 break;
7739
7740 case SPEED_2500:
f1410647
ET
7741 if (cmd->duplex != DUPLEX_FULL) {
7742 DP(NETIF_MSG_LINK,
7743 "2.5G half not supported\n");
a2fbb9ea 7744 return -EINVAL;
f1410647 7745 }
a2fbb9ea 7746
34f80b04 7747 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7748 DP(NETIF_MSG_LINK,
7749 "2.5G full not supported\n");
a2fbb9ea 7750 return -EINVAL;
f1410647 7751 }
a2fbb9ea 7752
f1410647 7753 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7754 ADVERTISED_TP);
7755 break;
7756
7757 case SPEED_10000:
f1410647
ET
7758 if (cmd->duplex != DUPLEX_FULL) {
7759 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7760 return -EINVAL;
f1410647 7761 }
a2fbb9ea 7762
34f80b04 7763 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7764 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7765 return -EINVAL;
f1410647 7766 }
a2fbb9ea
ET
7767
7768 advertising = (ADVERTISED_10000baseT_Full |
7769 ADVERTISED_FIBRE);
7770 break;
7771
7772 default:
f1410647 7773 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7774 return -EINVAL;
7775 }
7776
c18487ee
YR
7777 bp->link_params.req_line_speed = cmd->speed;
7778 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7779 bp->port.advertising = advertising;
a2fbb9ea
ET
7780 }
7781
c18487ee 7782 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7783 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7784 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7785 bp->port.advertising);
a2fbb9ea 7786
34f80b04 7787 if (netif_running(dev)) {
bb2a0f7a 7788 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7789 bnx2x_link_set(bp);
7790 }
a2fbb9ea
ET
7791
7792 return 0;
7793}
7794
c18487ee
YR
7795#define PHY_FW_VER_LEN 10
7796
a2fbb9ea
ET
7797static void bnx2x_get_drvinfo(struct net_device *dev,
7798 struct ethtool_drvinfo *info)
7799{
7800 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7801 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7802
7803 strcpy(info->driver, DRV_MODULE_NAME);
7804 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7805
7806 phy_fw_ver[0] = '\0';
34f80b04 7807 if (bp->port.pmf) {
4a37fb66 7808 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7809 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7810 (bp->state != BNX2X_STATE_CLOSED),
7811 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7812 bnx2x_release_phy_lock(bp);
34f80b04 7813 }
c18487ee 7814
f0e53a84
EG
7815 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7816 (bp->common.bc_ver & 0xff0000) >> 16,
7817 (bp->common.bc_ver & 0xff00) >> 8,
7818 (bp->common.bc_ver & 0xff),
7819 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7820 strcpy(info->bus_info, pci_name(bp->pdev));
7821 info->n_stats = BNX2X_NUM_STATS;
7822 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7823 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7824 info->regdump_len = 0;
7825}
7826
7827static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7828{
7829 struct bnx2x *bp = netdev_priv(dev);
7830
7831 if (bp->flags & NO_WOL_FLAG) {
7832 wol->supported = 0;
7833 wol->wolopts = 0;
7834 } else {
7835 wol->supported = WAKE_MAGIC;
7836 if (bp->wol)
7837 wol->wolopts = WAKE_MAGIC;
7838 else
7839 wol->wolopts = 0;
7840 }
7841 memset(&wol->sopass, 0, sizeof(wol->sopass));
7842}
7843
7844static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7845{
7846 struct bnx2x *bp = netdev_priv(dev);
7847
7848 if (wol->wolopts & ~WAKE_MAGIC)
7849 return -EINVAL;
7850
7851 if (wol->wolopts & WAKE_MAGIC) {
7852 if (bp->flags & NO_WOL_FLAG)
7853 return -EINVAL;
7854
7855 bp->wol = 1;
34f80b04 7856 } else
a2fbb9ea 7857 bp->wol = 0;
34f80b04 7858
a2fbb9ea
ET
7859 return 0;
7860}
7861
7862static u32 bnx2x_get_msglevel(struct net_device *dev)
7863{
7864 struct bnx2x *bp = netdev_priv(dev);
7865
7866 return bp->msglevel;
7867}
7868
7869static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7870{
7871 struct bnx2x *bp = netdev_priv(dev);
7872
7873 if (capable(CAP_NET_ADMIN))
7874 bp->msglevel = level;
7875}
7876
7877static int bnx2x_nway_reset(struct net_device *dev)
7878{
7879 struct bnx2x *bp = netdev_priv(dev);
7880
34f80b04
EG
7881 if (!bp->port.pmf)
7882 return 0;
a2fbb9ea 7883
34f80b04 7884 if (netif_running(dev)) {
bb2a0f7a 7885 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7886 bnx2x_link_set(bp);
7887 }
a2fbb9ea
ET
7888
7889 return 0;
7890}
7891
7892static int bnx2x_get_eeprom_len(struct net_device *dev)
7893{
7894 struct bnx2x *bp = netdev_priv(dev);
7895
34f80b04 7896 return bp->common.flash_size;
a2fbb9ea
ET
7897}
7898
7899static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7900{
34f80b04 7901 int port = BP_PORT(bp);
a2fbb9ea
ET
7902 int count, i;
7903 u32 val = 0;
7904
7905 /* adjust timeout for emulation/FPGA */
7906 count = NVRAM_TIMEOUT_COUNT;
7907 if (CHIP_REV_IS_SLOW(bp))
7908 count *= 100;
7909
7910 /* request access to nvram interface */
7911 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7912 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7913
7914 for (i = 0; i < count*10; i++) {
7915 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7916 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7917 break;
7918
7919 udelay(5);
7920 }
7921
7922 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7923 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7924 return -EBUSY;
7925 }
7926
7927 return 0;
7928}
7929
7930static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7931{
34f80b04 7932 int port = BP_PORT(bp);
a2fbb9ea
ET
7933 int count, i;
7934 u32 val = 0;
7935
7936 /* adjust timeout for emulation/FPGA */
7937 count = NVRAM_TIMEOUT_COUNT;
7938 if (CHIP_REV_IS_SLOW(bp))
7939 count *= 100;
7940
7941 /* relinquish nvram interface */
7942 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7943 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7944
7945 for (i = 0; i < count*10; i++) {
7946 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7947 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7948 break;
7949
7950 udelay(5);
7951 }
7952
7953 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7954 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7955 return -EBUSY;
7956 }
7957
7958 return 0;
7959}
7960
7961static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7962{
7963 u32 val;
7964
7965 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7966
7967 /* enable both bits, even on read */
7968 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7969 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7970 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7971}
7972
7973static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7974{
7975 u32 val;
7976
7977 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7978
7979 /* disable both bits, even after read */
7980 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7981 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7982 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7983}
7984
7985static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7986 u32 cmd_flags)
7987{
f1410647 7988 int count, i, rc;
a2fbb9ea
ET
7989 u32 val;
7990
7991 /* build the command word */
7992 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7993
7994 /* need to clear DONE bit separately */
7995 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7996
7997 /* address of the NVRAM to read from */
7998 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7999 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8000
8001 /* issue a read command */
8002 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8003
8004 /* adjust timeout for emulation/FPGA */
8005 count = NVRAM_TIMEOUT_COUNT;
8006 if (CHIP_REV_IS_SLOW(bp))
8007 count *= 100;
8008
8009 /* wait for completion */
8010 *ret_val = 0;
8011 rc = -EBUSY;
8012 for (i = 0; i < count; i++) {
8013 udelay(5);
8014 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8015
8016 if (val & MCPR_NVM_COMMAND_DONE) {
8017 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8018 /* we read nvram data in cpu order
8019 * but ethtool sees it as an array of bytes
8020 * converting to big-endian will do the work */
8021 val = cpu_to_be32(val);
8022 *ret_val = val;
8023 rc = 0;
8024 break;
8025 }
8026 }
8027
8028 return rc;
8029}
8030
8031static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8032 int buf_size)
8033{
8034 int rc;
8035 u32 cmd_flags;
8036 u32 val;
8037
8038 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8039 DP(BNX2X_MSG_NVM,
c14423fe 8040 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8041 offset, buf_size);
8042 return -EINVAL;
8043 }
8044
34f80b04
EG
8045 if (offset + buf_size > bp->common.flash_size) {
8046 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8047 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8048 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8049 return -EINVAL;
8050 }
8051
8052 /* request access to nvram interface */
8053 rc = bnx2x_acquire_nvram_lock(bp);
8054 if (rc)
8055 return rc;
8056
8057 /* enable access to nvram interface */
8058 bnx2x_enable_nvram_access(bp);
8059
8060 /* read the first word(s) */
8061 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8062 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8063 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8064 memcpy(ret_buf, &val, 4);
8065
8066 /* advance to the next dword */
8067 offset += sizeof(u32);
8068 ret_buf += sizeof(u32);
8069 buf_size -= sizeof(u32);
8070 cmd_flags = 0;
8071 }
8072
8073 if (rc == 0) {
8074 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8075 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8076 memcpy(ret_buf, &val, 4);
8077 }
8078
8079 /* disable access to nvram interface */
8080 bnx2x_disable_nvram_access(bp);
8081 bnx2x_release_nvram_lock(bp);
8082
8083 return rc;
8084}
8085
8086static int bnx2x_get_eeprom(struct net_device *dev,
8087 struct ethtool_eeprom *eeprom, u8 *eebuf)
8088{
8089 struct bnx2x *bp = netdev_priv(dev);
8090 int rc;
8091
34f80b04 8092 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8093 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8094 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8095 eeprom->len, eeprom->len);
8096
8097 /* parameters already validated in ethtool_get_eeprom */
8098
8099 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8100
8101 return rc;
8102}
8103
8104static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8105 u32 cmd_flags)
8106{
f1410647 8107 int count, i, rc;
a2fbb9ea
ET
8108
8109 /* build the command word */
8110 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8111
8112 /* need to clear DONE bit separately */
8113 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8114
8115 /* write the data */
8116 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8117
8118 /* address of the NVRAM to write to */
8119 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8120 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8121
8122 /* issue the write command */
8123 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8124
8125 /* adjust timeout for emulation/FPGA */
8126 count = NVRAM_TIMEOUT_COUNT;
8127 if (CHIP_REV_IS_SLOW(bp))
8128 count *= 100;
8129
8130 /* wait for completion */
8131 rc = -EBUSY;
8132 for (i = 0; i < count; i++) {
8133 udelay(5);
8134 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8135 if (val & MCPR_NVM_COMMAND_DONE) {
8136 rc = 0;
8137 break;
8138 }
8139 }
8140
8141 return rc;
8142}
8143
f1410647 8144#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8145
8146static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8147 int buf_size)
8148{
8149 int rc;
8150 u32 cmd_flags;
8151 u32 align_offset;
8152 u32 val;
8153
34f80b04
EG
8154 if (offset + buf_size > bp->common.flash_size) {
8155 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8156 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8157 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8158 return -EINVAL;
8159 }
8160
8161 /* request access to nvram interface */
8162 rc = bnx2x_acquire_nvram_lock(bp);
8163 if (rc)
8164 return rc;
8165
8166 /* enable access to nvram interface */
8167 bnx2x_enable_nvram_access(bp);
8168
8169 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8170 align_offset = (offset & ~0x03);
8171 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8172
8173 if (rc == 0) {
8174 val &= ~(0xff << BYTE_OFFSET(offset));
8175 val |= (*data_buf << BYTE_OFFSET(offset));
8176
8177 /* nvram data is returned as an array of bytes
8178 * convert it back to cpu order */
8179 val = be32_to_cpu(val);
8180
a2fbb9ea
ET
8181 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8182 cmd_flags);
8183 }
8184
8185 /* disable access to nvram interface */
8186 bnx2x_disable_nvram_access(bp);
8187 bnx2x_release_nvram_lock(bp);
8188
8189 return rc;
8190}
8191
8192static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8193 int buf_size)
8194{
8195 int rc;
8196 u32 cmd_flags;
8197 u32 val;
8198 u32 written_so_far;
8199
34f80b04 8200 if (buf_size == 1) /* ethtool */
a2fbb9ea 8201 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8202
8203 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8204 DP(BNX2X_MSG_NVM,
c14423fe 8205 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8206 offset, buf_size);
8207 return -EINVAL;
8208 }
8209
34f80b04
EG
8210 if (offset + buf_size > bp->common.flash_size) {
8211 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8212 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8213 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8214 return -EINVAL;
8215 }
8216
8217 /* request access to nvram interface */
8218 rc = bnx2x_acquire_nvram_lock(bp);
8219 if (rc)
8220 return rc;
8221
8222 /* enable access to nvram interface */
8223 bnx2x_enable_nvram_access(bp);
8224
8225 written_so_far = 0;
8226 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8227 while ((written_so_far < buf_size) && (rc == 0)) {
8228 if (written_so_far == (buf_size - sizeof(u32)))
8229 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8230 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8231 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8232 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8233 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8234
8235 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8236
8237 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8238
8239 /* advance to the next dword */
8240 offset += sizeof(u32);
8241 data_buf += sizeof(u32);
8242 written_so_far += sizeof(u32);
8243 cmd_flags = 0;
8244 }
8245
8246 /* disable access to nvram interface */
8247 bnx2x_disable_nvram_access(bp);
8248 bnx2x_release_nvram_lock(bp);
8249
8250 return rc;
8251}
8252
8253static int bnx2x_set_eeprom(struct net_device *dev,
8254 struct ethtool_eeprom *eeprom, u8 *eebuf)
8255{
8256 struct bnx2x *bp = netdev_priv(dev);
8257 int rc;
8258
9f4c9583
EG
8259 if (!netif_running(dev))
8260 return -EAGAIN;
8261
34f80b04 8262 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8263 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8264 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8265 eeprom->len, eeprom->len);
8266
8267 /* parameters already validated in ethtool_set_eeprom */
8268
c18487ee 8269 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8270 if (eeprom->magic == 0x00504859)
8271 if (bp->port.pmf) {
8272
4a37fb66 8273 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8274 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8275 bp->link_params.ext_phy_config,
8276 (bp->state != BNX2X_STATE_CLOSED),
8277 eebuf, eeprom->len);
bb2a0f7a
YG
8278 if ((bp->state == BNX2X_STATE_OPEN) ||
8279 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8280 rc |= bnx2x_link_reset(&bp->link_params,
8281 &bp->link_vars);
8282 rc |= bnx2x_phy_init(&bp->link_params,
8283 &bp->link_vars);
bb2a0f7a 8284 }
4a37fb66 8285 bnx2x_release_phy_lock(bp);
34f80b04
EG
8286
8287 } else /* Only the PMF can access the PHY */
8288 return -EINVAL;
8289 else
c18487ee 8290 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8291
8292 return rc;
8293}
8294
8295static int bnx2x_get_coalesce(struct net_device *dev,
8296 struct ethtool_coalesce *coal)
8297{
8298 struct bnx2x *bp = netdev_priv(dev);
8299
8300 memset(coal, 0, sizeof(struct ethtool_coalesce));
8301
8302 coal->rx_coalesce_usecs = bp->rx_ticks;
8303 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8304
8305 return 0;
8306}
8307
8308static int bnx2x_set_coalesce(struct net_device *dev,
8309 struct ethtool_coalesce *coal)
8310{
8311 struct bnx2x *bp = netdev_priv(dev);
8312
8313 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8314 if (bp->rx_ticks > 3000)
8315 bp->rx_ticks = 3000;
8316
8317 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8318 if (bp->tx_ticks > 0x3000)
8319 bp->tx_ticks = 0x3000;
8320
34f80b04 8321 if (netif_running(dev))
a2fbb9ea
ET
8322 bnx2x_update_coalesce(bp);
8323
8324 return 0;
8325}
8326
8327static void bnx2x_get_ringparam(struct net_device *dev,
8328 struct ethtool_ringparam *ering)
8329{
8330 struct bnx2x *bp = netdev_priv(dev);
8331
8332 ering->rx_max_pending = MAX_RX_AVAIL;
8333 ering->rx_mini_max_pending = 0;
8334 ering->rx_jumbo_max_pending = 0;
8335
8336 ering->rx_pending = bp->rx_ring_size;
8337 ering->rx_mini_pending = 0;
8338 ering->rx_jumbo_pending = 0;
8339
8340 ering->tx_max_pending = MAX_TX_AVAIL;
8341 ering->tx_pending = bp->tx_ring_size;
8342}
8343
8344static int bnx2x_set_ringparam(struct net_device *dev,
8345 struct ethtool_ringparam *ering)
8346{
8347 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8348 int rc = 0;
a2fbb9ea
ET
8349
8350 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8351 (ering->tx_pending > MAX_TX_AVAIL) ||
8352 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8353 return -EINVAL;
8354
8355 bp->rx_ring_size = ering->rx_pending;
8356 bp->tx_ring_size = ering->tx_pending;
8357
34f80b04
EG
8358 if (netif_running(dev)) {
8359 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8360 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8361 }
8362
34f80b04 8363 return rc;
a2fbb9ea
ET
8364}
8365
8366static void bnx2x_get_pauseparam(struct net_device *dev,
8367 struct ethtool_pauseparam *epause)
8368{
8369 struct bnx2x *bp = netdev_priv(dev);
8370
c0700f90 8371 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8372 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8373
c0700f90
DM
8374 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8375 BNX2X_FLOW_CTRL_RX);
8376 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8377 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8378
8379 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8380 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8381 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8382}
8383
8384static int bnx2x_set_pauseparam(struct net_device *dev,
8385 struct ethtool_pauseparam *epause)
8386{
8387 struct bnx2x *bp = netdev_priv(dev);
8388
34f80b04
EG
8389 if (IS_E1HMF(bp))
8390 return 0;
8391
a2fbb9ea
ET
8392 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8393 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8394 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8395
c0700f90 8396 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8397
f1410647 8398 if (epause->rx_pause)
c0700f90 8399 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8400
f1410647 8401 if (epause->tx_pause)
c0700f90 8402 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8403
c0700f90
DM
8404 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8405 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8406
c18487ee 8407 if (epause->autoneg) {
34f80b04 8408 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8409 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8410 return -EINVAL;
8411 }
a2fbb9ea 8412
c18487ee 8413 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8414 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8415 }
a2fbb9ea 8416
c18487ee
YR
8417 DP(NETIF_MSG_LINK,
8418 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8419
8420 if (netif_running(dev)) {
bb2a0f7a 8421 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8422 bnx2x_link_set(bp);
8423 }
a2fbb9ea
ET
8424
8425 return 0;
8426}
8427
df0f2343
VZ
8428static int bnx2x_set_flags(struct net_device *dev, u32 data)
8429{
8430 struct bnx2x *bp = netdev_priv(dev);
8431 int changed = 0;
8432 int rc = 0;
8433
8434 /* TPA requires Rx CSUM offloading */
8435 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8436 if (!(dev->features & NETIF_F_LRO)) {
8437 dev->features |= NETIF_F_LRO;
8438 bp->flags |= TPA_ENABLE_FLAG;
8439 changed = 1;
8440 }
8441
8442 } else if (dev->features & NETIF_F_LRO) {
8443 dev->features &= ~NETIF_F_LRO;
8444 bp->flags &= ~TPA_ENABLE_FLAG;
8445 changed = 1;
8446 }
8447
8448 if (changed && netif_running(dev)) {
8449 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8450 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8451 }
8452
8453 return rc;
8454}
8455
a2fbb9ea
ET
8456static u32 bnx2x_get_rx_csum(struct net_device *dev)
8457{
8458 struct bnx2x *bp = netdev_priv(dev);
8459
8460 return bp->rx_csum;
8461}
8462
8463static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8464{
8465 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8466 int rc = 0;
a2fbb9ea
ET
8467
8468 bp->rx_csum = data;
df0f2343
VZ
8469
8470 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8471 TPA'ed packets will be discarded due to wrong TCP CSUM */
8472 if (!data) {
8473 u32 flags = ethtool_op_get_flags(dev);
8474
8475 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8476 }
8477
8478 return rc;
a2fbb9ea
ET
8479}
8480
8481static int bnx2x_set_tso(struct net_device *dev, u32 data)
8482{
755735eb 8483 if (data) {
a2fbb9ea 8484 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8485 dev->features |= NETIF_F_TSO6;
8486 } else {
a2fbb9ea 8487 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8488 dev->features &= ~NETIF_F_TSO6;
8489 }
8490
a2fbb9ea
ET
8491 return 0;
8492}
8493
f3c87cdd 8494static const struct {
a2fbb9ea
ET
8495 char string[ETH_GSTRING_LEN];
8496} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8497 { "register_test (offline)" },
8498 { "memory_test (offline)" },
8499 { "loopback_test (offline)" },
8500 { "nvram_test (online)" },
8501 { "interrupt_test (online)" },
8502 { "link_test (online)" },
8503 { "idle check (online)" },
8504 { "MC errors (online)" }
a2fbb9ea
ET
8505};
8506
8507static int bnx2x_self_test_count(struct net_device *dev)
8508{
8509 return BNX2X_NUM_TESTS;
8510}
8511
f3c87cdd
YG
8512static int bnx2x_test_registers(struct bnx2x *bp)
8513{
8514 int idx, i, rc = -ENODEV;
8515 u32 wr_val = 0;
9dabc424 8516 int port = BP_PORT(bp);
f3c87cdd
YG
8517 static const struct {
8518 u32 offset0;
8519 u32 offset1;
8520 u32 mask;
8521 } reg_tbl[] = {
8522/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8523 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8524 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8525 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8526 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8527 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8528 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8529 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8530 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8531 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8532/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8533 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8534 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8535 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8536 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8537 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8538 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8539 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8540 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8541 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8542/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8543 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8544 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8545 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8546 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8547 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8548 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8549 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8550 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8551 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8552/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8553 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8554 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8555 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8556 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8557 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8558 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8559 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8560
8561 { 0xffffffff, 0, 0x00000000 }
8562 };
8563
8564 if (!netif_running(bp->dev))
8565 return rc;
8566
8567 /* Repeat the test twice:
8568 First by writing 0x00000000, second by writing 0xffffffff */
8569 for (idx = 0; idx < 2; idx++) {
8570
8571 switch (idx) {
8572 case 0:
8573 wr_val = 0;
8574 break;
8575 case 1:
8576 wr_val = 0xffffffff;
8577 break;
8578 }
8579
8580 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8581 u32 offset, mask, save_val, val;
f3c87cdd
YG
8582
8583 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8584 mask = reg_tbl[i].mask;
8585
8586 save_val = REG_RD(bp, offset);
8587
8588 REG_WR(bp, offset, wr_val);
8589 val = REG_RD(bp, offset);
8590
8591 /* Restore the original register's value */
8592 REG_WR(bp, offset, save_val);
8593
8594 /* verify that value is as expected value */
8595 if ((val & mask) != (wr_val & mask))
8596 goto test_reg_exit;
8597 }
8598 }
8599
8600 rc = 0;
8601
8602test_reg_exit:
8603 return rc;
8604}
8605
8606static int bnx2x_test_memory(struct bnx2x *bp)
8607{
8608 int i, j, rc = -ENODEV;
8609 u32 val;
8610 static const struct {
8611 u32 offset;
8612 int size;
8613 } mem_tbl[] = {
8614 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8615 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8616 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8617 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8618 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8619 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8620 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8621
8622 { 0xffffffff, 0 }
8623 };
8624 static const struct {
8625 char *name;
8626 u32 offset;
9dabc424
YG
8627 u32 e1_mask;
8628 u32 e1h_mask;
f3c87cdd 8629 } prty_tbl[] = {
9dabc424
YG
8630 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8631 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8632 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8633 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8634 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8635 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8636
8637 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8638 };
8639
8640 if (!netif_running(bp->dev))
8641 return rc;
8642
8643 /* Go through all the memories */
8644 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8645 for (j = 0; j < mem_tbl[i].size; j++)
8646 REG_RD(bp, mem_tbl[i].offset + j*4);
8647
8648 /* Check the parity status */
8649 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8650 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8651 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8652 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8653 DP(NETIF_MSG_HW,
8654 "%s is 0x%x\n", prty_tbl[i].name, val);
8655 goto test_mem_exit;
8656 }
8657 }
8658
8659 rc = 0;
8660
8661test_mem_exit:
8662 return rc;
8663}
8664
f3c87cdd
YG
8665static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8666{
8667 int cnt = 1000;
8668
8669 if (link_up)
8670 while (bnx2x_link_test(bp) && cnt--)
8671 msleep(10);
8672}
8673
8674static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8675{
8676 unsigned int pkt_size, num_pkts, i;
8677 struct sk_buff *skb;
8678 unsigned char *packet;
8679 struct bnx2x_fastpath *fp = &bp->fp[0];
8680 u16 tx_start_idx, tx_idx;
8681 u16 rx_start_idx, rx_idx;
8682 u16 pkt_prod;
8683 struct sw_tx_bd *tx_buf;
8684 struct eth_tx_bd *tx_bd;
8685 dma_addr_t mapping;
8686 union eth_rx_cqe *cqe;
8687 u8 cqe_fp_flags;
8688 struct sw_rx_bd *rx_buf;
8689 u16 len;
8690 int rc = -ENODEV;
8691
8692 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8693 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8694 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8695 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8696 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8697
8698 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8699 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8700 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8701 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8702 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8703 /* wait until link state is restored */
8704 bnx2x_wait_for_link(bp, link_up);
8705
8706 } else
8707 return -EINVAL;
8708
8709 pkt_size = 1514;
8710 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8711 if (!skb) {
8712 rc = -ENOMEM;
8713 goto test_loopback_exit;
8714 }
8715 packet = skb_put(skb, pkt_size);
8716 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8717 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8718 for (i = ETH_HLEN; i < pkt_size; i++)
8719 packet[i] = (unsigned char) (i & 0xff);
8720
8721 num_pkts = 0;
8722 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8723 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8724
8725 pkt_prod = fp->tx_pkt_prod++;
8726 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8727 tx_buf->first_bd = fp->tx_bd_prod;
8728 tx_buf->skb = skb;
8729
8730 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8731 mapping = pci_map_single(bp->pdev, skb->data,
8732 skb_headlen(skb), PCI_DMA_TODEVICE);
8733 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8734 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8735 tx_bd->nbd = cpu_to_le16(1);
8736 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8737 tx_bd->vlan = cpu_to_le16(pkt_prod);
8738 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8739 ETH_TX_BD_FLAGS_END_BD);
8740 tx_bd->general_data = ((UNICAST_ADDRESS <<
8741 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8742
58f4c4cf
EG
8743 wmb();
8744
f3c87cdd
YG
8745 fp->hw_tx_prods->bds_prod =
8746 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8747 mb(); /* FW restriction: must not reorder writing nbd and packets */
8748 fp->hw_tx_prods->packets_prod =
8749 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8750 DOORBELL(bp, FP_IDX(fp), 0);
8751
8752 mmiowb();
8753
8754 num_pkts++;
8755 fp->tx_bd_prod++;
8756 bp->dev->trans_start = jiffies;
8757
8758 udelay(100);
8759
8760 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8761 if (tx_idx != tx_start_idx + num_pkts)
8762 goto test_loopback_exit;
8763
8764 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8765 if (rx_idx != rx_start_idx + num_pkts)
8766 goto test_loopback_exit;
8767
8768 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8769 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8770 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8771 goto test_loopback_rx_exit;
8772
8773 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8774 if (len != pkt_size)
8775 goto test_loopback_rx_exit;
8776
8777 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8778 skb = rx_buf->skb;
8779 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8780 for (i = ETH_HLEN; i < pkt_size; i++)
8781 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8782 goto test_loopback_rx_exit;
8783
8784 rc = 0;
8785
8786test_loopback_rx_exit:
f3c87cdd
YG
8787
8788 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8789 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8790 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8791 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8792
8793 /* Update producers */
8794 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8795 fp->rx_sge_prod);
f3c87cdd
YG
8796
8797test_loopback_exit:
8798 bp->link_params.loopback_mode = LOOPBACK_NONE;
8799
8800 return rc;
8801}
8802
8803static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8804{
8805 int rc = 0;
8806
8807 if (!netif_running(bp->dev))
8808 return BNX2X_LOOPBACK_FAILED;
8809
f8ef6e44 8810 bnx2x_netif_stop(bp, 1);
f3c87cdd
YG
8811
8812 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8813 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8814 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8815 }
8816
8817 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8818 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8819 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8820 }
8821
8822 bnx2x_netif_start(bp);
8823
8824 return rc;
8825}
8826
8827#define CRC32_RESIDUAL 0xdebb20e3
8828
8829static int bnx2x_test_nvram(struct bnx2x *bp)
8830{
8831 static const struct {
8832 int offset;
8833 int size;
8834 } nvram_tbl[] = {
8835 { 0, 0x14 }, /* bootstrap */
8836 { 0x14, 0xec }, /* dir */
8837 { 0x100, 0x350 }, /* manuf_info */
8838 { 0x450, 0xf0 }, /* feature_info */
8839 { 0x640, 0x64 }, /* upgrade_key_info */
8840 { 0x6a4, 0x64 },
8841 { 0x708, 0x70 }, /* manuf_key_info */
8842 { 0x778, 0x70 },
8843 { 0, 0 }
8844 };
8845 u32 buf[0x350 / 4];
8846 u8 *data = (u8 *)buf;
8847 int i, rc;
8848 u32 magic, csum;
8849
8850 rc = bnx2x_nvram_read(bp, 0, data, 4);
8851 if (rc) {
8852 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8853 goto test_nvram_exit;
8854 }
8855
8856 magic = be32_to_cpu(buf[0]);
8857 if (magic != 0x669955aa) {
8858 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8859 rc = -ENODEV;
8860 goto test_nvram_exit;
8861 }
8862
8863 for (i = 0; nvram_tbl[i].size; i++) {
8864
8865 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8866 nvram_tbl[i].size);
8867 if (rc) {
8868 DP(NETIF_MSG_PROBE,
8869 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8870 goto test_nvram_exit;
8871 }
8872
8873 csum = ether_crc_le(nvram_tbl[i].size, data);
8874 if (csum != CRC32_RESIDUAL) {
8875 DP(NETIF_MSG_PROBE,
8876 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8877 rc = -ENODEV;
8878 goto test_nvram_exit;
8879 }
8880 }
8881
8882test_nvram_exit:
8883 return rc;
8884}
8885
8886static int bnx2x_test_intr(struct bnx2x *bp)
8887{
8888 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8889 int i, rc;
8890
8891 if (!netif_running(bp->dev))
8892 return -ENODEV;
8893
8894 config->hdr.length_6b = 0;
8895 config->hdr.offset = 0;
8896 config->hdr.client_id = BP_CL_ID(bp);
8897 config->hdr.reserved1 = 0;
8898
8899 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8900 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8901 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8902 if (rc == 0) {
8903 bp->set_mac_pending++;
8904 for (i = 0; i < 10; i++) {
8905 if (!bp->set_mac_pending)
8906 break;
8907 msleep_interruptible(10);
8908 }
8909 if (i == 10)
8910 rc = -ENODEV;
8911 }
8912
8913 return rc;
8914}
8915
a2fbb9ea
ET
8916static void bnx2x_self_test(struct net_device *dev,
8917 struct ethtool_test *etest, u64 *buf)
8918{
8919 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8920
8921 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8922
f3c87cdd 8923 if (!netif_running(dev))
a2fbb9ea 8924 return;
a2fbb9ea 8925
33471629 8926 /* offline tests are not supported in MF mode */
f3c87cdd
YG
8927 if (IS_E1HMF(bp))
8928 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8929
8930 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8931 u8 link_up;
8932
8933 link_up = bp->link_vars.link_up;
8934 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8935 bnx2x_nic_load(bp, LOAD_DIAG);
8936 /* wait until link state is restored */
8937 bnx2x_wait_for_link(bp, link_up);
8938
8939 if (bnx2x_test_registers(bp) != 0) {
8940 buf[0] = 1;
8941 etest->flags |= ETH_TEST_FL_FAILED;
8942 }
8943 if (bnx2x_test_memory(bp) != 0) {
8944 buf[1] = 1;
8945 etest->flags |= ETH_TEST_FL_FAILED;
8946 }
8947 buf[2] = bnx2x_test_loopback(bp, link_up);
8948 if (buf[2] != 0)
8949 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8950
f3c87cdd
YG
8951 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8952 bnx2x_nic_load(bp, LOAD_NORMAL);
8953 /* wait until link state is restored */
8954 bnx2x_wait_for_link(bp, link_up);
8955 }
8956 if (bnx2x_test_nvram(bp) != 0) {
8957 buf[3] = 1;
a2fbb9ea
ET
8958 etest->flags |= ETH_TEST_FL_FAILED;
8959 }
f3c87cdd
YG
8960 if (bnx2x_test_intr(bp) != 0) {
8961 buf[4] = 1;
8962 etest->flags |= ETH_TEST_FL_FAILED;
8963 }
8964 if (bp->port.pmf)
8965 if (bnx2x_link_test(bp) != 0) {
8966 buf[5] = 1;
8967 etest->flags |= ETH_TEST_FL_FAILED;
8968 }
8969 buf[7] = bnx2x_mc_assert(bp);
8970 if (buf[7] != 0)
8971 etest->flags |= ETH_TEST_FL_FAILED;
8972
8973#ifdef BNX2X_EXTRA_DEBUG
8974 bnx2x_panic_dump(bp);
8975#endif
a2fbb9ea
ET
8976}
8977
bb2a0f7a
YG
8978static const struct {
8979 long offset;
8980 int size;
8981 u32 flags;
66e855f3
YG
8982#define STATS_FLAGS_PORT 1
8983#define STATS_FLAGS_FUNC 2
8984 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8985} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8986/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8987 8, STATS_FLAGS_FUNC, "rx_bytes" },
8988 { STATS_OFFSET32(error_bytes_received_hi),
8989 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8990 { STATS_OFFSET32(total_bytes_transmitted_hi),
8991 8, STATS_FLAGS_FUNC, "tx_bytes" },
8992 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8993 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8994 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8995 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8996 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8997 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8998 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8999 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 9000 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 9001 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 9002 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 9003 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 9004/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 9005 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9006 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9007 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9008 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9009 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 9010 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9011 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9012 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9013 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9014 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9015 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9016 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9017 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9018 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9019 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9020 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9021 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9022 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9023 8, STATS_FLAGS_PORT, "rx_fragments" },
9024/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9025 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9026 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9027 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9028 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9029 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9030 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9031 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9032 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9033 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9034 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9035 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9036 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9037 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9038 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9039 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9040 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9041 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9042 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9043 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9044/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9045 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9046 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9047 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9048 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9049 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9050 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9051 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9052 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9053 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9054 { STATS_OFFSET32(mac_filter_discard),
9055 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9056 { STATS_OFFSET32(no_buff_discard),
9057 4, STATS_FLAGS_FUNC, "rx_discards" },
9058 { STATS_OFFSET32(xxoverflow_discard),
9059 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9060 { STATS_OFFSET32(brb_drop_hi),
9061 8, STATS_FLAGS_PORT, "brb_discard" },
9062 { STATS_OFFSET32(brb_truncate_hi),
9063 8, STATS_FLAGS_PORT, "brb_truncate" },
9064/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9065 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9066 { STATS_OFFSET32(rx_skb_alloc_failed),
9067 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9068/* 42 */{ STATS_OFFSET32(hw_csum_err),
9069 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9070};
9071
66e855f3
YG
9072#define IS_NOT_E1HMF_STAT(bp, i) \
9073 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9074
a2fbb9ea
ET
9075static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9076{
bb2a0f7a
YG
9077 struct bnx2x *bp = netdev_priv(dev);
9078 int i, j;
9079
a2fbb9ea
ET
9080 switch (stringset) {
9081 case ETH_SS_STATS:
bb2a0f7a 9082 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9083 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9084 continue;
9085 strcpy(buf + j*ETH_GSTRING_LEN,
9086 bnx2x_stats_arr[i].string);
9087 j++;
9088 }
a2fbb9ea
ET
9089 break;
9090
9091 case ETH_SS_TEST:
9092 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9093 break;
9094 }
9095}
9096
9097static int bnx2x_get_stats_count(struct net_device *dev)
9098{
bb2a0f7a
YG
9099 struct bnx2x *bp = netdev_priv(dev);
9100 int i, num_stats = 0;
9101
9102 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9103 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9104 continue;
9105 num_stats++;
9106 }
9107 return num_stats;
a2fbb9ea
ET
9108}
9109
9110static void bnx2x_get_ethtool_stats(struct net_device *dev,
9111 struct ethtool_stats *stats, u64 *buf)
9112{
9113 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9114 u32 *hw_stats = (u32 *)&bp->eth_stats;
9115 int i, j;
a2fbb9ea 9116
bb2a0f7a 9117 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9118 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9119 continue;
bb2a0f7a
YG
9120
9121 if (bnx2x_stats_arr[i].size == 0) {
9122 /* skip this counter */
9123 buf[j] = 0;
9124 j++;
a2fbb9ea
ET
9125 continue;
9126 }
bb2a0f7a 9127 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9128 /* 4-byte counter */
bb2a0f7a
YG
9129 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9130 j++;
a2fbb9ea
ET
9131 continue;
9132 }
9133 /* 8-byte counter */
bb2a0f7a
YG
9134 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9135 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9136 j++;
a2fbb9ea
ET
9137 }
9138}
9139
9140static int bnx2x_phys_id(struct net_device *dev, u32 data)
9141{
9142 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9143 int port = BP_PORT(bp);
a2fbb9ea
ET
9144 int i;
9145
34f80b04
EG
9146 if (!netif_running(dev))
9147 return 0;
9148
9149 if (!bp->port.pmf)
9150 return 0;
9151
a2fbb9ea
ET
9152 if (data == 0)
9153 data = 2;
9154
9155 for (i = 0; i < (data * 2); i++) {
c18487ee 9156 if ((i % 2) == 0)
34f80b04 9157 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9158 bp->link_params.hw_led_mode,
9159 bp->link_params.chip_id);
9160 else
34f80b04 9161 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9162 bp->link_params.hw_led_mode,
9163 bp->link_params.chip_id);
9164
a2fbb9ea
ET
9165 msleep_interruptible(500);
9166 if (signal_pending(current))
9167 break;
9168 }
9169
c18487ee 9170 if (bp->link_vars.link_up)
34f80b04 9171 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9172 bp->link_vars.line_speed,
9173 bp->link_params.hw_led_mode,
9174 bp->link_params.chip_id);
a2fbb9ea
ET
9175
9176 return 0;
9177}
9178
9179static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9180 .get_settings = bnx2x_get_settings,
9181 .set_settings = bnx2x_set_settings,
9182 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9183 .get_wol = bnx2x_get_wol,
9184 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9185 .get_msglevel = bnx2x_get_msglevel,
9186 .set_msglevel = bnx2x_set_msglevel,
9187 .nway_reset = bnx2x_nway_reset,
9188 .get_link = ethtool_op_get_link,
9189 .get_eeprom_len = bnx2x_get_eeprom_len,
9190 .get_eeprom = bnx2x_get_eeprom,
9191 .set_eeprom = bnx2x_set_eeprom,
9192 .get_coalesce = bnx2x_get_coalesce,
9193 .set_coalesce = bnx2x_set_coalesce,
9194 .get_ringparam = bnx2x_get_ringparam,
9195 .set_ringparam = bnx2x_set_ringparam,
9196 .get_pauseparam = bnx2x_get_pauseparam,
9197 .set_pauseparam = bnx2x_set_pauseparam,
9198 .get_rx_csum = bnx2x_get_rx_csum,
9199 .set_rx_csum = bnx2x_set_rx_csum,
9200 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9201 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9202 .set_flags = bnx2x_set_flags,
9203 .get_flags = ethtool_op_get_flags,
9204 .get_sg = ethtool_op_get_sg,
9205 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9206 .get_tso = ethtool_op_get_tso,
9207 .set_tso = bnx2x_set_tso,
9208 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9209 .self_test = bnx2x_self_test,
9210 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9211 .phys_id = bnx2x_phys_id,
9212 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9213 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9214};
9215
9216/* end of ethtool_ops */
9217
9218/****************************************************************************
9219* General service functions
9220****************************************************************************/
9221
9222static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9223{
9224 u16 pmcsr;
9225
9226 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9227
9228 switch (state) {
9229 case PCI_D0:
34f80b04 9230 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9231 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9232 PCI_PM_CTRL_PME_STATUS));
9233
9234 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9235 /* delay required during transition out of D3hot */
a2fbb9ea 9236 msleep(20);
34f80b04 9237 break;
a2fbb9ea 9238
34f80b04
EG
9239 case PCI_D3hot:
9240 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9241 pmcsr |= 3;
a2fbb9ea 9242
34f80b04
EG
9243 if (bp->wol)
9244 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9245
34f80b04
EG
9246 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9247 pmcsr);
a2fbb9ea 9248
34f80b04
EG
9249 /* No more memory access after this point until
9250 * device is brought back to D0.
9251 */
9252 break;
9253
9254 default:
9255 return -EINVAL;
9256 }
9257 return 0;
a2fbb9ea
ET
9258}
9259
34f80b04
EG
9260/*
9261 * net_device service functions
9262 */
9263
a2fbb9ea
ET
9264static int bnx2x_poll(struct napi_struct *napi, int budget)
9265{
9266 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9267 napi);
9268 struct bnx2x *bp = fp->bp;
9269 int work_done = 0;
2772f903 9270 u16 rx_cons_sb;
a2fbb9ea
ET
9271
9272#ifdef BNX2X_STOP_ON_ERROR
9273 if (unlikely(bp->panic))
34f80b04 9274 goto poll_panic;
a2fbb9ea
ET
9275#endif
9276
9277 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9278 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9279 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9280
9281 bnx2x_update_fpsb_idx(fp);
9282
da5a662a 9283 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9284 bnx2x_tx_int(fp, budget);
9285
2772f903
EG
9286 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9287 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9288 rx_cons_sb++;
da5a662a 9289 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9290 work_done = bnx2x_rx_int(fp, budget);
9291
da5a662a 9292 rmb(); /* BNX2X_HAS_WORK() reads the status block */
2772f903
EG
9293 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9294 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9295 rx_cons_sb++;
a2fbb9ea
ET
9296
9297 /* must not complete if we consumed full budget */
da5a662a 9298 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9299
9300#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9301poll_panic:
a2fbb9ea 9302#endif
908a7a16 9303 netif_rx_complete(napi);
a2fbb9ea 9304
34f80b04 9305 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9306 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9307 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9308 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9309 }
a2fbb9ea
ET
9310 return work_done;
9311}
9312
755735eb
EG
9313
9314/* we split the first BD into headers and data BDs
33471629 9315 * to ease the pain of our fellow microcode engineers
755735eb
EG
9316 * we use one mapping for both BDs
9317 * So far this has only been observed to happen
9318 * in Other Operating Systems(TM)
9319 */
9320static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9321 struct bnx2x_fastpath *fp,
9322 struct eth_tx_bd **tx_bd, u16 hlen,
9323 u16 bd_prod, int nbd)
9324{
9325 struct eth_tx_bd *h_tx_bd = *tx_bd;
9326 struct eth_tx_bd *d_tx_bd;
9327 dma_addr_t mapping;
9328 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9329
9330 /* first fix first BD */
9331 h_tx_bd->nbd = cpu_to_le16(nbd);
9332 h_tx_bd->nbytes = cpu_to_le16(hlen);
9333
9334 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9335 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9336 h_tx_bd->addr_lo, h_tx_bd->nbd);
9337
9338 /* now get a new data BD
9339 * (after the pbd) and fill it */
9340 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9341 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9342
9343 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9344 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9345
9346 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9347 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9348 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9349 d_tx_bd->vlan = 0;
9350 /* this marks the BD as one that has no individual mapping
9351 * the FW ignores this flag in a BD not marked start
9352 */
9353 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9354 DP(NETIF_MSG_TX_QUEUED,
9355 "TSO split data size is %d (%x:%x)\n",
9356 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9357
9358 /* update tx_bd for marking the last BD flag */
9359 *tx_bd = d_tx_bd;
9360
9361 return bd_prod;
9362}
9363
9364static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9365{
9366 if (fix > 0)
9367 csum = (u16) ~csum_fold(csum_sub(csum,
9368 csum_partial(t_header - fix, fix, 0)));
9369
9370 else if (fix < 0)
9371 csum = (u16) ~csum_fold(csum_add(csum,
9372 csum_partial(t_header, -fix, 0)));
9373
9374 return swab16(csum);
9375}
9376
9377static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9378{
9379 u32 rc;
9380
9381 if (skb->ip_summed != CHECKSUM_PARTIAL)
9382 rc = XMIT_PLAIN;
9383
9384 else {
9385 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9386 rc = XMIT_CSUM_V6;
9387 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9388 rc |= XMIT_CSUM_TCP;
9389
9390 } else {
9391 rc = XMIT_CSUM_V4;
9392 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9393 rc |= XMIT_CSUM_TCP;
9394 }
9395 }
9396
9397 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9398 rc |= XMIT_GSO_V4;
9399
9400 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9401 rc |= XMIT_GSO_V6;
9402
9403 return rc;
9404}
9405
9406/* check if packet requires linearization (packet is too fragmented) */
9407static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9408 u32 xmit_type)
9409{
9410 int to_copy = 0;
9411 int hlen = 0;
9412 int first_bd_sz = 0;
9413
9414 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9415 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9416
9417 if (xmit_type & XMIT_GSO) {
9418 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9419 /* Check if LSO packet needs to be copied:
9420 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9421 int wnd_size = MAX_FETCH_BD - 3;
33471629 9422 /* Number of windows to check */
755735eb
EG
9423 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9424 int wnd_idx = 0;
9425 int frag_idx = 0;
9426 u32 wnd_sum = 0;
9427
9428 /* Headers length */
9429 hlen = (int)(skb_transport_header(skb) - skb->data) +
9430 tcp_hdrlen(skb);
9431
9432 /* Amount of data (w/o headers) on linear part of SKB*/
9433 first_bd_sz = skb_headlen(skb) - hlen;
9434
9435 wnd_sum = first_bd_sz;
9436
9437 /* Calculate the first sum - it's special */
9438 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9439 wnd_sum +=
9440 skb_shinfo(skb)->frags[frag_idx].size;
9441
9442 /* If there was data on linear skb data - check it */
9443 if (first_bd_sz > 0) {
9444 if (unlikely(wnd_sum < lso_mss)) {
9445 to_copy = 1;
9446 goto exit_lbl;
9447 }
9448
9449 wnd_sum -= first_bd_sz;
9450 }
9451
9452 /* Others are easier: run through the frag list and
9453 check all windows */
9454 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9455 wnd_sum +=
9456 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9457
9458 if (unlikely(wnd_sum < lso_mss)) {
9459 to_copy = 1;
9460 break;
9461 }
9462 wnd_sum -=
9463 skb_shinfo(skb)->frags[wnd_idx].size;
9464 }
9465
9466 } else {
9467 /* in non-LSO too fragmented packet should always
9468 be linearized */
9469 to_copy = 1;
9470 }
9471 }
9472
9473exit_lbl:
9474 if (unlikely(to_copy))
9475 DP(NETIF_MSG_TX_QUEUED,
9476 "Linearization IS REQUIRED for %s packet. "
9477 "num_frags %d hlen %d first_bd_sz %d\n",
9478 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9479 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9480
9481 return to_copy;
9482}
9483
9484/* called with netif_tx_lock
a2fbb9ea 9485 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9486 * netif_wake_queue()
a2fbb9ea
ET
9487 */
9488static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9489{
9490 struct bnx2x *bp = netdev_priv(dev);
9491 struct bnx2x_fastpath *fp;
9492 struct sw_tx_bd *tx_buf;
9493 struct eth_tx_bd *tx_bd;
9494 struct eth_tx_parse_bd *pbd = NULL;
9495 u16 pkt_prod, bd_prod;
755735eb 9496 int nbd, fp_index;
a2fbb9ea 9497 dma_addr_t mapping;
755735eb
EG
9498 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9499 int vlan_off = (bp->e1hov ? 4 : 0);
9500 int i;
9501 u8 hlen = 0;
a2fbb9ea
ET
9502
9503#ifdef BNX2X_STOP_ON_ERROR
9504 if (unlikely(bp->panic))
9505 return NETDEV_TX_BUSY;
9506#endif
9507
755735eb 9508 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9509 fp = &bp->fp[fp_index];
755735eb 9510
231fd58a 9511 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9512 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9513 netif_stop_queue(dev);
9514 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9515 return NETDEV_TX_BUSY;
9516 }
9517
755735eb
EG
9518 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9519 " gso type %x xmit_type %x\n",
9520 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9521 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9522
33471629 9523 /* First, check if we need to linearize the skb
755735eb
EG
9524 (due to FW restrictions) */
9525 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9526 /* Statistics of linearization */
9527 bp->lin_cnt++;
9528 if (skb_linearize(skb) != 0) {
9529 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9530 "silently dropping this SKB\n");
9531 dev_kfree_skb_any(skb);
da5a662a 9532 return NETDEV_TX_OK;
755735eb
EG
9533 }
9534 }
9535
a2fbb9ea 9536 /*
755735eb 9537 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9538 then for TSO or xsum we have a parsing info BD,
755735eb 9539 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9540 (don't forget to mark the last one as last,
9541 and to unmap only AFTER you write to the BD ...)
755735eb 9542 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9543 */
9544
9545 pkt_prod = fp->tx_pkt_prod++;
755735eb 9546 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9547
755735eb 9548 /* get a tx_buf and first BD */
a2fbb9ea
ET
9549 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9550 tx_bd = &fp->tx_desc_ring[bd_prod];
9551
9552 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9553 tx_bd->general_data = (UNICAST_ADDRESS <<
9554 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9555 /* header nbd */
9556 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9557
755735eb
EG
9558 /* remember the first BD of the packet */
9559 tx_buf->first_bd = fp->tx_bd_prod;
9560 tx_buf->skb = skb;
a2fbb9ea
ET
9561
9562 DP(NETIF_MSG_TX_QUEUED,
9563 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9564 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9565
755735eb
EG
9566 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9567 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9568 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9569 vlan_off += 4;
9570 } else
9571 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9572
755735eb 9573 if (xmit_type) {
755735eb 9574 /* turn on parsing and get a BD */
a2fbb9ea
ET
9575 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9576 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9577
9578 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9579 }
9580
9581 if (xmit_type & XMIT_CSUM) {
9582 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9583
9584 /* for now NS flag is not used in Linux */
755735eb 9585 pbd->global_data = (hlen |
96fc1784 9586 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9587 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9588
755735eb
EG
9589 pbd->ip_hlen = (skb_transport_header(skb) -
9590 skb_network_header(skb)) / 2;
9591
9592 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9593
755735eb
EG
9594 pbd->total_hlen = cpu_to_le16(hlen);
9595 hlen = hlen*2 - vlan_off;
a2fbb9ea 9596
755735eb
EG
9597 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9598
9599 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9600 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9601 ETH_TX_BD_FLAGS_IP_CSUM;
9602 else
9603 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9604
9605 if (xmit_type & XMIT_CSUM_TCP) {
9606 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9607
9608 } else {
9609 s8 fix = SKB_CS_OFF(skb); /* signed! */
9610
a2fbb9ea 9611 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9612 pbd->cs_offset = fix / 2;
a2fbb9ea 9613
755735eb
EG
9614 DP(NETIF_MSG_TX_QUEUED,
9615 "hlen %d offset %d fix %d csum before fix %x\n",
9616 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9617 SKB_CS(skb));
9618
9619 /* HW bug: fixup the CSUM */
9620 pbd->tcp_pseudo_csum =
9621 bnx2x_csum_fix(skb_transport_header(skb),
9622 SKB_CS(skb), fix);
9623
9624 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9625 pbd->tcp_pseudo_csum);
9626 }
a2fbb9ea
ET
9627 }
9628
9629 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9630 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9631
9632 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9633 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9634 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9635 tx_bd->nbd = cpu_to_le16(nbd);
9636 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9637
9638 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9639 " nbytes %d flags %x vlan %x\n",
9640 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9641 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9642 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9643
755735eb 9644 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9645
9646 DP(NETIF_MSG_TX_QUEUED,
9647 "TSO packet len %d hlen %d total len %d tso size %d\n",
9648 skb->len, hlen, skb_headlen(skb),
9649 skb_shinfo(skb)->gso_size);
9650
9651 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9652
755735eb
EG
9653 if (unlikely(skb_headlen(skb) > hlen))
9654 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9655 bd_prod, ++nbd);
a2fbb9ea
ET
9656
9657 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9658 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9659 pbd->tcp_flags = pbd_tcp_flags(skb);
9660
9661 if (xmit_type & XMIT_GSO_V4) {
9662 pbd->ip_id = swab16(ip_hdr(skb)->id);
9663 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9664 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9665 ip_hdr(skb)->daddr,
9666 0, IPPROTO_TCP, 0));
755735eb
EG
9667
9668 } else
9669 pbd->tcp_pseudo_csum =
9670 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9671 &ipv6_hdr(skb)->daddr,
9672 0, IPPROTO_TCP, 0));
9673
a2fbb9ea
ET
9674 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9675 }
9676
755735eb
EG
9677 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9678 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9679
755735eb
EG
9680 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9681 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9682
755735eb
EG
9683 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9684 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9685
755735eb
EG
9686 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9687 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9688 tx_bd->nbytes = cpu_to_le16(frag->size);
9689 tx_bd->vlan = cpu_to_le16(pkt_prod);
9690 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9691
755735eb
EG
9692 DP(NETIF_MSG_TX_QUEUED,
9693 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9694 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9695 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9696 }
9697
755735eb 9698 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9699 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9700
9701 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9702 tx_bd, tx_bd->bd_flags.as_bitfield);
9703
a2fbb9ea
ET
9704 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9705
755735eb 9706 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9707 * if the packet contains or ends with it
9708 */
9709 if (TX_BD_POFF(bd_prod) < nbd)
9710 nbd++;
9711
9712 if (pbd)
9713 DP(NETIF_MSG_TX_QUEUED,
9714 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9715 " tcp_flags %x xsum %x seq %u hlen %u\n",
9716 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9717 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9718 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9719
755735eb 9720 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9721
58f4c4cf
EG
9722 /*
9723 * Make sure that the BD data is updated before updating the producer
9724 * since FW might read the BD right after the producer is updated.
9725 * This is only applicable for weak-ordered memory model archs such
9726 * as IA-64. The following barrier is also mandatory since FW will
9727 * assumes packets must have BDs.
9728 */
9729 wmb();
9730
96fc1784
ET
9731 fp->hw_tx_prods->bds_prod =
9732 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9733 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9734 fp->hw_tx_prods->packets_prod =
9735 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9736 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9737
9738 mmiowb();
9739
755735eb 9740 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9741 dev->trans_start = jiffies;
9742
9743 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
9744 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9745 if we put Tx into XOFF state. */
9746 smp_mb();
a2fbb9ea 9747 netif_stop_queue(dev);
bb2a0f7a 9748 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9749 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9750 netif_wake_queue(dev);
9751 }
9752 fp->tx_pkt++;
9753
9754 return NETDEV_TX_OK;
9755}
9756
bb2a0f7a 9757/* called with rtnl_lock */
a2fbb9ea
ET
9758static int bnx2x_open(struct net_device *dev)
9759{
9760 struct bnx2x *bp = netdev_priv(dev);
9761
9762 bnx2x_set_power_state(bp, PCI_D0);
9763
bb2a0f7a 9764 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9765}
9766
bb2a0f7a 9767/* called with rtnl_lock */
a2fbb9ea
ET
9768static int bnx2x_close(struct net_device *dev)
9769{
a2fbb9ea
ET
9770 struct bnx2x *bp = netdev_priv(dev);
9771
9772 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9773 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9774 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9775 if (!CHIP_REV_IS_SLOW(bp))
9776 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9777
9778 return 0;
9779}
9780
34f80b04
EG
9781/* called with netif_tx_lock from set_multicast */
9782static void bnx2x_set_rx_mode(struct net_device *dev)
9783{
9784 struct bnx2x *bp = netdev_priv(dev);
9785 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9786 int port = BP_PORT(bp);
9787
9788 if (bp->state != BNX2X_STATE_OPEN) {
9789 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9790 return;
9791 }
9792
9793 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9794
9795 if (dev->flags & IFF_PROMISC)
9796 rx_mode = BNX2X_RX_MODE_PROMISC;
9797
9798 else if ((dev->flags & IFF_ALLMULTI) ||
9799 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9800 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9801
9802 else { /* some multicasts */
9803 if (CHIP_IS_E1(bp)) {
9804 int i, old, offset;
9805 struct dev_mc_list *mclist;
9806 struct mac_configuration_cmd *config =
9807 bnx2x_sp(bp, mcast_config);
9808
9809 for (i = 0, mclist = dev->mc_list;
9810 mclist && (i < dev->mc_count);
9811 i++, mclist = mclist->next) {
9812
9813 config->config_table[i].
9814 cam_entry.msb_mac_addr =
9815 swab16(*(u16 *)&mclist->dmi_addr[0]);
9816 config->config_table[i].
9817 cam_entry.middle_mac_addr =
9818 swab16(*(u16 *)&mclist->dmi_addr[2]);
9819 config->config_table[i].
9820 cam_entry.lsb_mac_addr =
9821 swab16(*(u16 *)&mclist->dmi_addr[4]);
9822 config->config_table[i].cam_entry.flags =
9823 cpu_to_le16(port);
9824 config->config_table[i].
9825 target_table_entry.flags = 0;
9826 config->config_table[i].
9827 target_table_entry.client_id = 0;
9828 config->config_table[i].
9829 target_table_entry.vlan_id = 0;
9830
9831 DP(NETIF_MSG_IFUP,
9832 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9833 config->config_table[i].
9834 cam_entry.msb_mac_addr,
9835 config->config_table[i].
9836 cam_entry.middle_mac_addr,
9837 config->config_table[i].
9838 cam_entry.lsb_mac_addr);
9839 }
9840 old = config->hdr.length_6b;
9841 if (old > i) {
9842 for (; i < old; i++) {
9843 if (CAM_IS_INVALID(config->
9844 config_table[i])) {
9845 i--; /* already invalidated */
9846 break;
9847 }
9848 /* invalidate */
9849 CAM_INVALIDATE(config->
9850 config_table[i]);
9851 }
9852 }
9853
9854 if (CHIP_REV_IS_SLOW(bp))
9855 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9856 else
9857 offset = BNX2X_MAX_MULTICAST*(1 + port);
9858
9859 config->hdr.length_6b = i;
9860 config->hdr.offset = offset;
9861 config->hdr.client_id = BP_CL_ID(bp);
9862 config->hdr.reserved1 = 0;
9863
9864 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9865 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9866 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9867 0);
9868 } else { /* E1H */
9869 /* Accept one or more multicasts */
9870 struct dev_mc_list *mclist;
9871 u32 mc_filter[MC_HASH_SIZE];
9872 u32 crc, bit, regidx;
9873 int i;
9874
9875 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9876
9877 for (i = 0, mclist = dev->mc_list;
9878 mclist && (i < dev->mc_count);
9879 i++, mclist = mclist->next) {
9880
7c510e4b
JB
9881 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9882 mclist->dmi_addr);
34f80b04
EG
9883
9884 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9885 bit = (crc >> 24) & 0xff;
9886 regidx = bit >> 5;
9887 bit &= 0x1f;
9888 mc_filter[regidx] |= (1 << bit);
9889 }
9890
9891 for (i = 0; i < MC_HASH_SIZE; i++)
9892 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9893 mc_filter[i]);
9894 }
9895 }
9896
9897 bp->rx_mode = rx_mode;
9898 bnx2x_set_storm_rx_mode(bp);
9899}
9900
9901/* called with rtnl_lock */
a2fbb9ea
ET
9902static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9903{
9904 struct sockaddr *addr = p;
9905 struct bnx2x *bp = netdev_priv(dev);
9906
34f80b04 9907 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9908 return -EINVAL;
9909
9910 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9911 if (netif_running(dev)) {
9912 if (CHIP_IS_E1(bp))
3101c2bc 9913 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9914 else
3101c2bc 9915 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9916 }
a2fbb9ea
ET
9917
9918 return 0;
9919}
9920
c18487ee 9921/* called with rtnl_lock */
a2fbb9ea
ET
9922static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9923{
9924 struct mii_ioctl_data *data = if_mii(ifr);
9925 struct bnx2x *bp = netdev_priv(dev);
3196a88a 9926 int port = BP_PORT(bp);
a2fbb9ea
ET
9927 int err;
9928
9929 switch (cmd) {
9930 case SIOCGMIIPHY:
34f80b04 9931 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9932
c14423fe 9933 /* fallthrough */
c18487ee 9934
a2fbb9ea 9935 case SIOCGMIIREG: {
c18487ee 9936 u16 mii_regval;
a2fbb9ea 9937
c18487ee
YR
9938 if (!netif_running(dev))
9939 return -EAGAIN;
a2fbb9ea 9940
34f80b04 9941 mutex_lock(&bp->port.phy_mutex);
3196a88a 9942 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9943 DEFAULT_PHY_DEV_ADDR,
9944 (data->reg_num & 0x1f), &mii_regval);
9945 data->val_out = mii_regval;
34f80b04 9946 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9947 return err;
9948 }
9949
9950 case SIOCSMIIREG:
9951 if (!capable(CAP_NET_ADMIN))
9952 return -EPERM;
9953
c18487ee
YR
9954 if (!netif_running(dev))
9955 return -EAGAIN;
9956
34f80b04 9957 mutex_lock(&bp->port.phy_mutex);
3196a88a 9958 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9959 DEFAULT_PHY_DEV_ADDR,
9960 (data->reg_num & 0x1f), data->val_in);
34f80b04 9961 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9962 return err;
9963
9964 default:
9965 /* do nothing */
9966 break;
9967 }
9968
9969 return -EOPNOTSUPP;
9970}
9971
34f80b04 9972/* called with rtnl_lock */
a2fbb9ea
ET
9973static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9974{
9975 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9976 int rc = 0;
a2fbb9ea
ET
9977
9978 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9979 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9980 return -EINVAL;
9981
9982 /* This does not race with packet allocation
c14423fe 9983 * because the actual alloc size is
a2fbb9ea
ET
9984 * only updated as part of load
9985 */
9986 dev->mtu = new_mtu;
9987
9988 if (netif_running(dev)) {
34f80b04
EG
9989 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9990 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9991 }
34f80b04
EG
9992
9993 return rc;
a2fbb9ea
ET
9994}
9995
9996static void bnx2x_tx_timeout(struct net_device *dev)
9997{
9998 struct bnx2x *bp = netdev_priv(dev);
9999
10000#ifdef BNX2X_STOP_ON_ERROR
10001 if (!bp->panic)
10002 bnx2x_panic();
10003#endif
10004 /* This allows the netif to be shutdown gracefully before resetting */
10005 schedule_work(&bp->reset_task);
10006}
10007
10008#ifdef BCM_VLAN
34f80b04 10009/* called with rtnl_lock */
a2fbb9ea
ET
10010static void bnx2x_vlan_rx_register(struct net_device *dev,
10011 struct vlan_group *vlgrp)
10012{
10013 struct bnx2x *bp = netdev_priv(dev);
10014
10015 bp->vlgrp = vlgrp;
10016 if (netif_running(dev))
49d66772 10017 bnx2x_set_client_config(bp);
a2fbb9ea 10018}
34f80b04 10019
a2fbb9ea
ET
10020#endif
10021
10022#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10023static void poll_bnx2x(struct net_device *dev)
10024{
10025 struct bnx2x *bp = netdev_priv(dev);
10026
10027 disable_irq(bp->pdev->irq);
10028 bnx2x_interrupt(bp->pdev->irq, dev);
10029 enable_irq(bp->pdev->irq);
10030}
10031#endif
10032
c64213cd
SH
10033static const struct net_device_ops bnx2x_netdev_ops = {
10034 .ndo_open = bnx2x_open,
10035 .ndo_stop = bnx2x_close,
10036 .ndo_start_xmit = bnx2x_start_xmit,
10037 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10038 .ndo_set_mac_address = bnx2x_change_mac_addr,
10039 .ndo_validate_addr = eth_validate_addr,
10040 .ndo_do_ioctl = bnx2x_ioctl,
10041 .ndo_change_mtu = bnx2x_change_mtu,
10042 .ndo_tx_timeout = bnx2x_tx_timeout,
10043#ifdef BCM_VLAN
10044 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10045#endif
10046#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10047 .ndo_poll_controller = poll_bnx2x,
10048#endif
10049};
10050
10051
34f80b04
EG
10052static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10053 struct net_device *dev)
a2fbb9ea
ET
10054{
10055 struct bnx2x *bp;
10056 int rc;
10057
10058 SET_NETDEV_DEV(dev, &pdev->dev);
10059 bp = netdev_priv(dev);
10060
34f80b04
EG
10061 bp->dev = dev;
10062 bp->pdev = pdev;
a2fbb9ea 10063 bp->flags = 0;
34f80b04 10064 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10065
10066 rc = pci_enable_device(pdev);
10067 if (rc) {
10068 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10069 goto err_out;
10070 }
10071
10072 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10073 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10074 " aborting\n");
10075 rc = -ENODEV;
10076 goto err_out_disable;
10077 }
10078
10079 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10080 printk(KERN_ERR PFX "Cannot find second PCI device"
10081 " base address, aborting\n");
10082 rc = -ENODEV;
10083 goto err_out_disable;
10084 }
10085
34f80b04
EG
10086 if (atomic_read(&pdev->enable_cnt) == 1) {
10087 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10088 if (rc) {
10089 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10090 " aborting\n");
10091 goto err_out_disable;
10092 }
a2fbb9ea 10093
34f80b04
EG
10094 pci_set_master(pdev);
10095 pci_save_state(pdev);
10096 }
a2fbb9ea
ET
10097
10098 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10099 if (bp->pm_cap == 0) {
10100 printk(KERN_ERR PFX "Cannot find power management"
10101 " capability, aborting\n");
10102 rc = -EIO;
10103 goto err_out_release;
10104 }
10105
10106 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10107 if (bp->pcie_cap == 0) {
10108 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10109 " aborting\n");
10110 rc = -EIO;
10111 goto err_out_release;
10112 }
10113
10114 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10115 bp->flags |= USING_DAC_FLAG;
10116 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10117 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10118 " failed, aborting\n");
10119 rc = -EIO;
10120 goto err_out_release;
10121 }
10122
10123 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10124 printk(KERN_ERR PFX "System does not support DMA,"
10125 " aborting\n");
10126 rc = -EIO;
10127 goto err_out_release;
10128 }
10129
34f80b04
EG
10130 dev->mem_start = pci_resource_start(pdev, 0);
10131 dev->base_addr = dev->mem_start;
10132 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10133
10134 dev->irq = pdev->irq;
10135
275f165f 10136 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10137 if (!bp->regview) {
10138 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10139 rc = -ENOMEM;
10140 goto err_out_release;
10141 }
10142
34f80b04
EG
10143 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10144 min_t(u64, BNX2X_DB_SIZE,
10145 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10146 if (!bp->doorbells) {
10147 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10148 rc = -ENOMEM;
10149 goto err_out_unmap;
10150 }
10151
10152 bnx2x_set_power_state(bp, PCI_D0);
10153
34f80b04
EG
10154 /* clean indirect addresses */
10155 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10156 PCICFG_VENDOR_ID_OFFSET);
10157 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10158 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10159 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10160 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10161
34f80b04 10162 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10163
c64213cd 10164 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10165 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10166 dev->features |= NETIF_F_SG;
10167 dev->features |= NETIF_F_HW_CSUM;
10168 if (bp->flags & USING_DAC_FLAG)
10169 dev->features |= NETIF_F_HIGHDMA;
10170#ifdef BCM_VLAN
10171 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10172#endif
10173 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10174 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10175
10176 return 0;
10177
10178err_out_unmap:
10179 if (bp->regview) {
10180 iounmap(bp->regview);
10181 bp->regview = NULL;
10182 }
a2fbb9ea
ET
10183 if (bp->doorbells) {
10184 iounmap(bp->doorbells);
10185 bp->doorbells = NULL;
10186 }
10187
10188err_out_release:
34f80b04
EG
10189 if (atomic_read(&pdev->enable_cnt) == 1)
10190 pci_release_regions(pdev);
a2fbb9ea
ET
10191
10192err_out_disable:
10193 pci_disable_device(pdev);
10194 pci_set_drvdata(pdev, NULL);
10195
10196err_out:
10197 return rc;
10198}
10199
25047950
ET
10200static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10201{
10202 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10203
10204 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10205 return val;
10206}
10207
10208/* return value of 1=2.5GHz 2=5GHz */
10209static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10210{
10211 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10212
10213 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10214 return val;
10215}
10216
a2fbb9ea
ET
10217static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10218 const struct pci_device_id *ent)
10219{
10220 static int version_printed;
10221 struct net_device *dev = NULL;
10222 struct bnx2x *bp;
25047950 10223 int rc;
a2fbb9ea
ET
10224
10225 if (version_printed++ == 0)
10226 printk(KERN_INFO "%s", version);
10227
10228 /* dev zeroed in init_etherdev */
10229 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10230 if (!dev) {
10231 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10232 return -ENOMEM;
34f80b04 10233 }
a2fbb9ea 10234
a2fbb9ea
ET
10235 bp = netdev_priv(dev);
10236 bp->msglevel = debug;
10237
34f80b04 10238 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10239 if (rc < 0) {
10240 free_netdev(dev);
10241 return rc;
10242 }
10243
a2fbb9ea
ET
10244 rc = register_netdev(dev);
10245 if (rc) {
c14423fe 10246 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10247 goto init_one_exit;
a2fbb9ea
ET
10248 }
10249
10250 pci_set_drvdata(pdev, dev);
10251
34f80b04
EG
10252 rc = bnx2x_init_bp(bp);
10253 if (rc) {
10254 unregister_netdev(dev);
10255 goto init_one_exit;
10256 }
10257
12b56ea8
EG
10258 netif_carrier_off(dev);
10259
34f80b04 10260 bp->common.name = board_info[ent->driver_data].name;
25047950 10261 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10262 " IRQ %d, ", dev->name, bp->common.name,
10263 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10264 bnx2x_get_pcie_width(bp),
10265 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10266 dev->base_addr, bp->pdev->irq);
e174961c 10267 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10268 return 0;
34f80b04
EG
10269
10270init_one_exit:
10271 if (bp->regview)
10272 iounmap(bp->regview);
10273
10274 if (bp->doorbells)
10275 iounmap(bp->doorbells);
10276
10277 free_netdev(dev);
10278
10279 if (atomic_read(&pdev->enable_cnt) == 1)
10280 pci_release_regions(pdev);
10281
10282 pci_disable_device(pdev);
10283 pci_set_drvdata(pdev, NULL);
10284
10285 return rc;
a2fbb9ea
ET
10286}
10287
10288static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10289{
10290 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10291 struct bnx2x *bp;
10292
10293 if (!dev) {
228241eb
ET
10294 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10295 return;
10296 }
228241eb 10297 bp = netdev_priv(dev);
a2fbb9ea 10298
a2fbb9ea
ET
10299 unregister_netdev(dev);
10300
10301 if (bp->regview)
10302 iounmap(bp->regview);
10303
10304 if (bp->doorbells)
10305 iounmap(bp->doorbells);
10306
10307 free_netdev(dev);
34f80b04
EG
10308
10309 if (atomic_read(&pdev->enable_cnt) == 1)
10310 pci_release_regions(pdev);
10311
a2fbb9ea
ET
10312 pci_disable_device(pdev);
10313 pci_set_drvdata(pdev, NULL);
10314}
10315
10316static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10317{
10318 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10319 struct bnx2x *bp;
10320
34f80b04
EG
10321 if (!dev) {
10322 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10323 return -ENODEV;
10324 }
10325 bp = netdev_priv(dev);
a2fbb9ea 10326
34f80b04 10327 rtnl_lock();
a2fbb9ea 10328
34f80b04 10329 pci_save_state(pdev);
228241eb 10330
34f80b04
EG
10331 if (!netif_running(dev)) {
10332 rtnl_unlock();
10333 return 0;
10334 }
a2fbb9ea
ET
10335
10336 netif_device_detach(dev);
a2fbb9ea 10337
da5a662a 10338 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10339
a2fbb9ea 10340 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10341
34f80b04
EG
10342 rtnl_unlock();
10343
a2fbb9ea
ET
10344 return 0;
10345}
10346
10347static int bnx2x_resume(struct pci_dev *pdev)
10348{
10349 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10350 struct bnx2x *bp;
a2fbb9ea
ET
10351 int rc;
10352
228241eb
ET
10353 if (!dev) {
10354 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10355 return -ENODEV;
10356 }
228241eb 10357 bp = netdev_priv(dev);
a2fbb9ea 10358
34f80b04
EG
10359 rtnl_lock();
10360
228241eb 10361 pci_restore_state(pdev);
34f80b04
EG
10362
10363 if (!netif_running(dev)) {
10364 rtnl_unlock();
10365 return 0;
10366 }
10367
a2fbb9ea
ET
10368 bnx2x_set_power_state(bp, PCI_D0);
10369 netif_device_attach(dev);
10370
da5a662a 10371 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10372
34f80b04
EG
10373 rtnl_unlock();
10374
10375 return rc;
a2fbb9ea
ET
10376}
10377
f8ef6e44
YG
10378static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10379{
10380 int i;
10381
10382 bp->state = BNX2X_STATE_ERROR;
10383
10384 bp->rx_mode = BNX2X_RX_MODE_NONE;
10385
10386 bnx2x_netif_stop(bp, 0);
10387
10388 del_timer_sync(&bp->timer);
10389 bp->stats_state = STATS_STATE_DISABLED;
10390 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10391
10392 /* Release IRQs */
10393 bnx2x_free_irq(bp);
10394
10395 if (CHIP_IS_E1(bp)) {
10396 struct mac_configuration_cmd *config =
10397 bnx2x_sp(bp, mcast_config);
10398
10399 for (i = 0; i < config->hdr.length_6b; i++)
10400 CAM_INVALIDATE(config->config_table[i]);
10401 }
10402
10403 /* Free SKBs, SGEs, TPA pool and driver internals */
10404 bnx2x_free_skbs(bp);
10405 for_each_queue(bp, i)
10406 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10407 bnx2x_free_mem(bp);
10408
10409 bp->state = BNX2X_STATE_CLOSED;
10410
10411 netif_carrier_off(bp->dev);
10412
10413 return 0;
10414}
10415
10416static void bnx2x_eeh_recover(struct bnx2x *bp)
10417{
10418 u32 val;
10419
10420 mutex_init(&bp->port.phy_mutex);
10421
10422 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10423 bp->link_params.shmem_base = bp->common.shmem_base;
10424 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10425
10426 if (!bp->common.shmem_base ||
10427 (bp->common.shmem_base < 0xA0000) ||
10428 (bp->common.shmem_base >= 0xC0000)) {
10429 BNX2X_DEV_INFO("MCP not active\n");
10430 bp->flags |= NO_MCP_FLAG;
10431 return;
10432 }
10433
10434 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10435 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10436 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10437 BNX2X_ERR("BAD MCP validity signature\n");
10438
10439 if (!BP_NOMCP(bp)) {
10440 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10441 & DRV_MSG_SEQ_NUMBER_MASK);
10442 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10443 }
10444}
10445
493adb1f
WX
10446/**
10447 * bnx2x_io_error_detected - called when PCI error is detected
10448 * @pdev: Pointer to PCI device
10449 * @state: The current pci connection state
10450 *
10451 * This function is called after a PCI bus error affecting
10452 * this device has been detected.
10453 */
10454static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10455 pci_channel_state_t state)
10456{
10457 struct net_device *dev = pci_get_drvdata(pdev);
10458 struct bnx2x *bp = netdev_priv(dev);
10459
10460 rtnl_lock();
10461
10462 netif_device_detach(dev);
10463
10464 if (netif_running(dev))
f8ef6e44 10465 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10466
10467 pci_disable_device(pdev);
10468
10469 rtnl_unlock();
10470
10471 /* Request a slot reset */
10472 return PCI_ERS_RESULT_NEED_RESET;
10473}
10474
10475/**
10476 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10477 * @pdev: Pointer to PCI device
10478 *
10479 * Restart the card from scratch, as if from a cold-boot.
10480 */
10481static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10482{
10483 struct net_device *dev = pci_get_drvdata(pdev);
10484 struct bnx2x *bp = netdev_priv(dev);
10485
10486 rtnl_lock();
10487
10488 if (pci_enable_device(pdev)) {
10489 dev_err(&pdev->dev,
10490 "Cannot re-enable PCI device after reset\n");
10491 rtnl_unlock();
10492 return PCI_ERS_RESULT_DISCONNECT;
10493 }
10494
10495 pci_set_master(pdev);
10496 pci_restore_state(pdev);
10497
10498 if (netif_running(dev))
10499 bnx2x_set_power_state(bp, PCI_D0);
10500
10501 rtnl_unlock();
10502
10503 return PCI_ERS_RESULT_RECOVERED;
10504}
10505
10506/**
10507 * bnx2x_io_resume - called when traffic can start flowing again
10508 * @pdev: Pointer to PCI device
10509 *
10510 * This callback is called when the error recovery driver tells us that
10511 * its OK to resume normal operation.
10512 */
10513static void bnx2x_io_resume(struct pci_dev *pdev)
10514{
10515 struct net_device *dev = pci_get_drvdata(pdev);
10516 struct bnx2x *bp = netdev_priv(dev);
10517
10518 rtnl_lock();
10519
f8ef6e44
YG
10520 bnx2x_eeh_recover(bp);
10521
493adb1f 10522 if (netif_running(dev))
f8ef6e44 10523 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10524
10525 netif_device_attach(dev);
10526
10527 rtnl_unlock();
10528}
10529
10530static struct pci_error_handlers bnx2x_err_handler = {
10531 .error_detected = bnx2x_io_error_detected,
10532 .slot_reset = bnx2x_io_slot_reset,
10533 .resume = bnx2x_io_resume,
10534};
10535
a2fbb9ea 10536static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10537 .name = DRV_MODULE_NAME,
10538 .id_table = bnx2x_pci_tbl,
10539 .probe = bnx2x_init_one,
10540 .remove = __devexit_p(bnx2x_remove_one),
10541 .suspend = bnx2x_suspend,
10542 .resume = bnx2x_resume,
10543 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10544};
10545
10546static int __init bnx2x_init(void)
10547{
1cf167f2
EG
10548 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10549 if (bnx2x_wq == NULL) {
10550 printk(KERN_ERR PFX "Cannot create workqueue\n");
10551 return -ENOMEM;
10552 }
10553
a2fbb9ea
ET
10554 return pci_register_driver(&bnx2x_pci_driver);
10555}
10556
10557static void __exit bnx2x_cleanup(void)
10558{
10559 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10560
10561 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10562}
10563
10564module_init(bnx2x_init);
10565module_exit(bnx2x_cleanup);
10566