]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/bnx2x_main.c
Merge branch 'for-linus' of git://git.infradead.org/ubi-2.6
[mirror_ubuntu-bionic-kernel.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
56ed4351
VZ
59#define DRV_MODULE_VERSION "1.48.105-1"
60#define DRV_MODULE_RELDATE "2009/04/22"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
2059aba7 83MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
555f6c78 84
19680c48 85static int disable_tpa;
19680c48 86module_param(disable_tpa, int, 0);
9898f86d 87MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
88
89static int int_mode;
90module_param(int_mode, int, 0);
91MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
9898f86d 93static int poll;
a2fbb9ea 94module_param(poll, int, 0);
9898f86d 95MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
96
97static int mrrs = -1;
98module_param(mrrs, int, 0);
99MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
9898f86d 101static int debug;
a2fbb9ea 102module_param(debug, int, 0);
9898f86d
EG
103MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 106
1cf167f2 107static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
108
109enum bnx2x_board_type {
110 BCM57710 = 0,
34f80b04
EG
111 BCM57711 = 1,
112 BCM57711E = 2,
a2fbb9ea
ET
113};
114
34f80b04 115/* indexed by board_type, above */
53a10565 116static struct {
a2fbb9ea
ET
117 char *name;
118} board_info[] __devinitdata = {
34f80b04
EG
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
122};
123
34f80b04 124
a2fbb9ea
ET
125static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
132 { 0 }
133};
134
135MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137/****************************************************************************
138* General service functions
139****************************************************************************/
140
141/* used only at init
142 * locking is done by mcp
143 */
144static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145{
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150}
151
a2fbb9ea
ET
152static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153{
154 u32 val;
155
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
160
161 return val;
162}
a2fbb9ea
ET
163
164static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169};
170
171/* copy command into DMAE command memory and set DMAE command go */
172static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173 int idx)
174{
175 u32 cmd_offset;
176 int i;
177
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
ad8d3948
EG
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
184 }
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
186}
187
ad8d3948
EG
188void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189 u32 len32)
a2fbb9ea 190{
ad8d3948 191 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
193 int cnt = 200;
194
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201 return;
202 }
203
204 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
205
206 memset(dmae, 0, sizeof(struct dmae_command));
207
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211#ifdef __BIG_ENDIAN
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
213#else
214 DMAE_CMD_ENDIANITY_DW_SWAP |
215#endif
34f80b04
EG
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
222 dmae->len = len32;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 225 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 226
c3eefaf6 227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
237
238 *wb_comp = 0;
239
34f80b04 240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
241
242 udelay(5);
ad8d3948
EG
243
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
ad8d3948 247 if (!cnt) {
c3eefaf6 248 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
249 break;
250 }
ad8d3948 251 cnt--;
12469401
YG
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
254 msleep(100);
255 else
256 udelay(5);
a2fbb9ea 257 }
ad8d3948
EG
258
259 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
260}
261
c18487ee 262void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 263{
ad8d3948 264 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
266 int cnt = 200;
267
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
270 int i;
271
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276 return;
277 }
278
279 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
280
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
283
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287#ifdef __BIG_ENDIAN
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
289#else
290 DMAE_CMD_ENDIANITY_DW_SWAP |
291#endif
34f80b04
EG
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298 dmae->len = len32;
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 301 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 302
c3eefaf6 303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
310
311 *wb_comp = 0;
312
34f80b04 313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
314
315 udelay(5);
ad8d3948
EG
316
317 while (*wb_comp != DMAE_COMP_VAL) {
318
ad8d3948 319 if (!cnt) {
c3eefaf6 320 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
321 break;
322 }
ad8d3948 323 cnt--;
12469401
YG
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
326 msleep(100);
327 else
328 udelay(5);
a2fbb9ea 329 }
ad8d3948 330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
333
334 mutex_unlock(&bp->dmae_mutex);
335}
336
337/* used only for slowpath so not inlined */
338static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339{
340 u32 wb_write[2];
341
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 345}
a2fbb9ea 346
ad8d3948
EG
347#ifdef USE_WB_RD
348static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349{
350 u32 wb_data[2];
351
352 REG_RD_DMAE(bp, reg, wb_data, 2);
353
354 return HILO_U64(wb_data[0], wb_data[1]);
355}
356#endif
357
a2fbb9ea
ET
358static int bnx2x_mc_assert(struct bnx2x *bp)
359{
a2fbb9ea 360 char last_idx;
34f80b04
EG
361 int i, rc = 0;
362 u32 row0, row1, row2, row3;
363
364 /* XSTORM */
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
367 if (last_idx)
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
386 rc++;
387 } else {
388 break;
389 }
390 }
391
392 /* TSTORM */
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
395 if (last_idx)
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
414 rc++;
415 } else {
416 break;
417 }
418 }
419
420 /* CSTORM */
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
423 if (last_idx)
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
442 rc++;
443 } else {
444 break;
445 }
446 }
447
448 /* USTORM */
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
451 if (last_idx)
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
470 rc++;
471 } else {
472 break;
a2fbb9ea
ET
473 }
474 }
34f80b04 475
a2fbb9ea
ET
476 return rc;
477}
c14423fe 478
a2fbb9ea
ET
479static void bnx2x_fw_dump(struct bnx2x *bp)
480{
481 u32 mark, offset;
4781bfad 482 __be32 data[9];
a2fbb9ea
ET
483 int word;
484
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 486 mark = ((mark + 0x3) & ~0x3);
ad361c98 487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 488
ad361c98 489 printk(KERN_ERR PFX);
a2fbb9ea
ET
490 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
491 for (word = 0; word < 8; word++)
492 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
493 offset + 4*word));
494 data[8] = 0x0;
49d66772 495 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
496 }
497 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
498 for (word = 0; word < 8; word++)
499 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
500 offset + 4*word));
501 data[8] = 0x0;
49d66772 502 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 503 }
ad361c98 504 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
505}
506
507static void bnx2x_panic_dump(struct bnx2x *bp)
508{
509 int i;
510 u16 j, start, end;
511
66e855f3
YG
512 bp->stats_state = STATS_STATE_DISABLED;
513 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
514
a2fbb9ea
ET
515 BNX2X_ERR("begin crash dump -----------------\n");
516
8440d2b6
EG
517 /* Indices */
518 /* Common */
519 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
520 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
521 " spq_prod_idx(%u)\n",
522 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
523 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
524
525 /* Rx */
526 for_each_rx_queue(bp, i) {
a2fbb9ea 527 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 528
c3eefaf6 529 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
530 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
531 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 532 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
533 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
534 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 535 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
536 " fp_u_idx(%x) *sb_u_idx(%x)\n",
537 fp->rx_sge_prod, fp->last_max_sge,
538 le16_to_cpu(fp->fp_u_idx),
539 fp->status_blk->u_status_block.status_block_index);
540 }
a2fbb9ea 541
8440d2b6
EG
542 /* Tx */
543 for_each_tx_queue(bp, i) {
544 struct bnx2x_fastpath *fp = &bp->fp[i];
545 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 546
c3eefaf6 547 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
548 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
549 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
550 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 551 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
8440d2b6
EG
552 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
553 fp->status_blk->c_status_block.status_block_index,
554 hw_prods->packets_prod, hw_prods->bds_prod);
555 }
a2fbb9ea 556
8440d2b6
EG
557 /* Rings */
558 /* Rx */
559 for_each_rx_queue(bp, i) {
560 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
561
562 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
563 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 564 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
565 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
566 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
567
c3eefaf6
EG
568 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
569 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
570 }
571
3196a88a
EG
572 start = RX_SGE(fp->rx_sge_prod);
573 end = RX_SGE(fp->last_max_sge);
8440d2b6 574 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
575 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
576 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
577
c3eefaf6
EG
578 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
579 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
580 }
581
a2fbb9ea
ET
582 start = RCQ_BD(fp->rx_comp_cons - 10);
583 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 584 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
585 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
586
c3eefaf6
EG
587 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
588 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
589 }
590 }
591
8440d2b6
EG
592 /* Tx */
593 for_each_tx_queue(bp, i) {
594 struct bnx2x_fastpath *fp = &bp->fp[i];
595
596 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
597 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
598 for (j = start; j != end; j = TX_BD(j + 1)) {
599 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
600
c3eefaf6
EG
601 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
602 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
603 }
604
605 start = TX_BD(fp->tx_bd_cons - 10);
606 end = TX_BD(fp->tx_bd_cons + 254);
607 for (j = start; j != end; j = TX_BD(j + 1)) {
608 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
609
c3eefaf6
EG
610 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
611 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
612 }
613 }
a2fbb9ea 614
34f80b04 615 bnx2x_fw_dump(bp);
a2fbb9ea
ET
616 bnx2x_mc_assert(bp);
617 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
618}
619
615f8fd9 620static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 621{
34f80b04 622 int port = BP_PORT(bp);
a2fbb9ea
ET
623 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
624 u32 val = REG_RD(bp, addr);
625 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 626 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
627
628 if (msix) {
8badd27a
EG
629 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
630 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
631 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
633 } else if (msi) {
634 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
635 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
636 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
637 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
638 } else {
639 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
641 HC_CONFIG_0_REG_INT_LINE_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 643
8badd27a
EG
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645 val, port, addr);
615f8fd9
ET
646
647 REG_WR(bp, addr, val);
648
a2fbb9ea
ET
649 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
650 }
651
8badd27a
EG
652 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
653 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
654
655 REG_WR(bp, addr, val);
34f80b04
EG
656
657 if (CHIP_IS_E1H(bp)) {
658 /* init leading/trailing edge */
659 if (IS_E1HMF(bp)) {
8badd27a 660 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 661 if (bp->port.pmf)
4acac6a5
EG
662 /* enable nig and gpio3 attention */
663 val |= 0x1100;
34f80b04
EG
664 } else
665 val = 0xffff;
666
667 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
668 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
669 }
a2fbb9ea
ET
670}
671
615f8fd9 672static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 673{
34f80b04 674 int port = BP_PORT(bp);
a2fbb9ea
ET
675 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
676 u32 val = REG_RD(bp, addr);
677
678 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
679 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
680 HC_CONFIG_0_REG_INT_LINE_EN_0 |
681 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
682
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
684 val, port, addr);
685
8badd27a
EG
686 /* flush all outstanding writes */
687 mmiowb();
688
a2fbb9ea
ET
689 REG_WR(bp, addr, val);
690 if (REG_RD(bp, addr) != val)
691 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 692
a2fbb9ea
ET
693}
694
f8ef6e44 695static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 696{
a2fbb9ea 697 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 698 int i, offset;
a2fbb9ea 699
34f80b04 700 /* disable interrupt handling */
a2fbb9ea 701 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
702 if (disable_hw)
703 /* prevent the HW from sending interrupts */
704 bnx2x_int_disable(bp);
a2fbb9ea
ET
705
706 /* make sure all ISRs are done */
707 if (msix) {
8badd27a
EG
708 synchronize_irq(bp->msix_table[0].vector);
709 offset = 1;
a2fbb9ea 710 for_each_queue(bp, i)
8badd27a 711 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
712 } else
713 synchronize_irq(bp->pdev->irq);
714
715 /* make sure sp_task is not running */
1cf167f2
EG
716 cancel_delayed_work(&bp->sp_task);
717 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
718}
719
34f80b04 720/* fast path */
a2fbb9ea
ET
721
722/*
34f80b04 723 * General service functions
a2fbb9ea
ET
724 */
725
34f80b04 726static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
727 u8 storm, u16 index, u8 op, u8 update)
728{
5c862848
EG
729 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
730 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
731 struct igu_ack_register igu_ack;
732
733 igu_ack.status_block_index = index;
734 igu_ack.sb_id_and_flags =
34f80b04 735 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
736 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
737 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
738 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
739
5c862848
EG
740 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
741 (*(u32 *)&igu_ack), hc_addr);
742 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
743}
744
745static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
746{
747 struct host_status_block *fpsb = fp->status_blk;
748 u16 rc = 0;
749
750 barrier(); /* status block is written to by the chip */
751 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
752 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
753 rc |= 1;
754 }
755 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
756 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
757 rc |= 2;
758 }
759 return rc;
760}
761
a2fbb9ea
ET
762static u16 bnx2x_ack_int(struct bnx2x *bp)
763{
5c862848
EG
764 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
765 COMMAND_REG_SIMD_MASK);
766 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 767
5c862848
EG
768 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
769 result, hc_addr);
a2fbb9ea 770
a2fbb9ea
ET
771 return result;
772}
773
774
775/*
776 * fast path service functions
777 */
778
237907c1
EG
779static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
780{
781 u16 tx_cons_sb;
782
783 /* Tell compiler that status block fields can change */
784 barrier();
785 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
786 return (fp->tx_pkt_cons != tx_cons_sb);
787}
788
789static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
790{
791 /* Tell compiler that consumer and producer can change */
792 barrier();
793 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
794}
795
a2fbb9ea
ET
796/* free skb in the packet ring at pos idx
797 * return idx of last bd freed
798 */
799static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
800 u16 idx)
801{
802 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
803 struct eth_tx_bd *tx_bd;
804 struct sk_buff *skb = tx_buf->skb;
34f80b04 805 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
806 int nbd;
807
808 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
809 idx, tx_buf, skb);
810
811 /* unmap first bd */
812 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
813 tx_bd = &fp->tx_desc_ring[bd_idx];
814 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
815 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
816
817 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 818 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
819#ifdef BNX2X_STOP_ON_ERROR
820 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 821 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
822 bnx2x_panic();
823 }
824#endif
825
826 /* Skip a parse bd and the TSO split header bd
827 since they have no mapping */
828 if (nbd)
829 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
830
831 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
832 ETH_TX_BD_FLAGS_TCP_CSUM |
833 ETH_TX_BD_FLAGS_SW_LSO)) {
834 if (--nbd)
835 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
836 tx_bd = &fp->tx_desc_ring[bd_idx];
837 /* is this a TSO split header bd? */
838 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
839 if (--nbd)
840 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
841 }
842 }
843
844 /* now free frags */
845 while (nbd > 0) {
846
847 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
848 tx_bd = &fp->tx_desc_ring[bd_idx];
849 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
850 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
851 if (--nbd)
852 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
853 }
854
855 /* release skb */
53e5e96e 856 WARN_ON(!skb);
a2fbb9ea
ET
857 dev_kfree_skb(skb);
858 tx_buf->first_bd = 0;
859 tx_buf->skb = NULL;
860
34f80b04 861 return new_cons;
a2fbb9ea
ET
862}
863
34f80b04 864static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 865{
34f80b04
EG
866 s16 used;
867 u16 prod;
868 u16 cons;
a2fbb9ea 869
34f80b04 870 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
871 prod = fp->tx_bd_prod;
872 cons = fp->tx_bd_cons;
873
34f80b04
EG
874 /* NUM_TX_RINGS = number of "next-page" entries
875 It will be used as a threshold */
876 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 877
34f80b04 878#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
879 WARN_ON(used < 0);
880 WARN_ON(used > fp->bp->tx_ring_size);
881 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 882#endif
a2fbb9ea 883
34f80b04 884 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
885}
886
7961f791 887static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
888{
889 struct bnx2x *bp = fp->bp;
555f6c78 890 struct netdev_queue *txq;
a2fbb9ea
ET
891 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
892 int done = 0;
893
894#ifdef BNX2X_STOP_ON_ERROR
895 if (unlikely(bp->panic))
896 return;
897#endif
898
555f6c78 899 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
900 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
901 sw_cons = fp->tx_pkt_cons;
902
903 while (sw_cons != hw_cons) {
904 u16 pkt_cons;
905
906 pkt_cons = TX_BD(sw_cons);
907
908 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
909
34f80b04 910 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
911 hw_cons, sw_cons, pkt_cons);
912
34f80b04 913/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
914 rmb();
915 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
916 }
917*/
918 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
919 sw_cons++;
920 done++;
a2fbb9ea
ET
921 }
922
923 fp->tx_pkt_cons = sw_cons;
924 fp->tx_bd_cons = bd_cons;
925
a2fbb9ea 926 /* TBD need a thresh? */
555f6c78 927 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 928
555f6c78 929 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 930
6044735d
EG
931 /* Need to make the tx_bd_cons update visible to start_xmit()
932 * before checking for netif_tx_queue_stopped(). Without the
933 * memory barrier, there is a small possibility that
934 * start_xmit() will miss it and cause the queue to be stopped
935 * forever.
936 */
937 smp_mb();
938
555f6c78 939 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 940 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 941 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 942 netif_tx_wake_queue(txq);
a2fbb9ea 943
555f6c78 944 __netif_tx_unlock(txq);
a2fbb9ea
ET
945 }
946}
947
3196a88a 948
a2fbb9ea
ET
949static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
950 union eth_rx_cqe *rr_cqe)
951{
952 struct bnx2x *bp = fp->bp;
953 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
954 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
955
34f80b04 956 DP(BNX2X_MSG_SP,
a2fbb9ea 957 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 958 fp->index, cid, command, bp->state,
34f80b04 959 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
960
961 bp->spq_left++;
962
0626b899 963 if (fp->index) {
a2fbb9ea
ET
964 switch (command | fp->state) {
965 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
966 BNX2X_FP_STATE_OPENING):
967 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
968 cid);
969 fp->state = BNX2X_FP_STATE_OPEN;
970 break;
971
972 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
973 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
974 cid);
975 fp->state = BNX2X_FP_STATE_HALTED;
976 break;
977
978 default:
34f80b04
EG
979 BNX2X_ERR("unexpected MC reply (%d) "
980 "fp->state is %x\n", command, fp->state);
981 break;
a2fbb9ea 982 }
34f80b04 983 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
984 return;
985 }
c14423fe 986
a2fbb9ea
ET
987 switch (command | bp->state) {
988 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
989 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
990 bp->state = BNX2X_STATE_OPEN;
991 break;
992
993 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
994 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
995 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
996 fp->state = BNX2X_FP_STATE_HALTED;
997 break;
998
a2fbb9ea 999 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1000 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1001 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1002 break;
1003
3196a88a 1004
a2fbb9ea 1005 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1006 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1007 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1008 bp->set_mac_pending = 0;
a2fbb9ea
ET
1009 break;
1010
49d66772 1011 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1012 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1013 break;
1014
a2fbb9ea 1015 default:
34f80b04 1016 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1017 command, bp->state);
34f80b04 1018 break;
a2fbb9ea 1019 }
34f80b04 1020 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1021}
1022
7a9b2557
VZ
1023static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1024 struct bnx2x_fastpath *fp, u16 index)
1025{
1026 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1027 struct page *page = sw_buf->page;
1028 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1029
1030 /* Skip "next page" elements */
1031 if (!page)
1032 return;
1033
1034 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1035 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1036 __free_pages(page, PAGES_PER_SGE_SHIFT);
1037
1038 sw_buf->page = NULL;
1039 sge->addr_hi = 0;
1040 sge->addr_lo = 0;
1041}
1042
1043static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1044 struct bnx2x_fastpath *fp, int last)
1045{
1046 int i;
1047
1048 for (i = 0; i < last; i++)
1049 bnx2x_free_rx_sge(bp, fp, i);
1050}
1051
1052static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1053 struct bnx2x_fastpath *fp, u16 index)
1054{
1055 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1056 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1057 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1058 dma_addr_t mapping;
1059
1060 if (unlikely(page == NULL))
1061 return -ENOMEM;
1062
4f40f2cb 1063 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1064 PCI_DMA_FROMDEVICE);
8d8bb39b 1065 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1066 __free_pages(page, PAGES_PER_SGE_SHIFT);
1067 return -ENOMEM;
1068 }
1069
1070 sw_buf->page = page;
1071 pci_unmap_addr_set(sw_buf, mapping, mapping);
1072
1073 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1074 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1075
1076 return 0;
1077}
1078
a2fbb9ea
ET
1079static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1080 struct bnx2x_fastpath *fp, u16 index)
1081{
1082 struct sk_buff *skb;
1083 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1084 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1085 dma_addr_t mapping;
1086
1087 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1088 if (unlikely(skb == NULL))
1089 return -ENOMEM;
1090
437cf2f1 1091 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1092 PCI_DMA_FROMDEVICE);
8d8bb39b 1093 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1094 dev_kfree_skb(skb);
1095 return -ENOMEM;
1096 }
1097
1098 rx_buf->skb = skb;
1099 pci_unmap_addr_set(rx_buf, mapping, mapping);
1100
1101 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1102 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1103
1104 return 0;
1105}
1106
1107/* note that we are not allocating a new skb,
1108 * we are just moving one from cons to prod
1109 * we are not creating a new mapping,
1110 * so there is no need to check for dma_mapping_error().
1111 */
1112static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1113 struct sk_buff *skb, u16 cons, u16 prod)
1114{
1115 struct bnx2x *bp = fp->bp;
1116 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1117 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1118 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1119 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1120
1121 pci_dma_sync_single_for_device(bp->pdev,
1122 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1123 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1124
1125 prod_rx_buf->skb = cons_rx_buf->skb;
1126 pci_unmap_addr_set(prod_rx_buf, mapping,
1127 pci_unmap_addr(cons_rx_buf, mapping));
1128 *prod_bd = *cons_bd;
1129}
1130
7a9b2557
VZ
1131static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1132 u16 idx)
1133{
1134 u16 last_max = fp->last_max_sge;
1135
1136 if (SUB_S16(idx, last_max) > 0)
1137 fp->last_max_sge = idx;
1138}
1139
1140static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1141{
1142 int i, j;
1143
1144 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1145 int idx = RX_SGE_CNT * i - 1;
1146
1147 for (j = 0; j < 2; j++) {
1148 SGE_MASK_CLEAR_BIT(fp, idx);
1149 idx--;
1150 }
1151 }
1152}
1153
1154static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1155 struct eth_fast_path_rx_cqe *fp_cqe)
1156{
1157 struct bnx2x *bp = fp->bp;
4f40f2cb 1158 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1159 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1160 SGE_PAGE_SHIFT;
7a9b2557
VZ
1161 u16 last_max, last_elem, first_elem;
1162 u16 delta = 0;
1163 u16 i;
1164
1165 if (!sge_len)
1166 return;
1167
1168 /* First mark all used pages */
1169 for (i = 0; i < sge_len; i++)
1170 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1171
1172 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1173 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1174
1175 /* Here we assume that the last SGE index is the biggest */
1176 prefetch((void *)(fp->sge_mask));
1177 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1178
1179 last_max = RX_SGE(fp->last_max_sge);
1180 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1181 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1182
1183 /* If ring is not full */
1184 if (last_elem + 1 != first_elem)
1185 last_elem++;
1186
1187 /* Now update the prod */
1188 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1189 if (likely(fp->sge_mask[i]))
1190 break;
1191
1192 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1193 delta += RX_SGE_MASK_ELEM_SZ;
1194 }
1195
1196 if (delta > 0) {
1197 fp->rx_sge_prod += delta;
1198 /* clear page-end entries */
1199 bnx2x_clear_sge_mask_next_elems(fp);
1200 }
1201
1202 DP(NETIF_MSG_RX_STATUS,
1203 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1204 fp->last_max_sge, fp->rx_sge_prod);
1205}
1206
1207static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1208{
1209 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1210 memset(fp->sge_mask, 0xff,
1211 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1212
33471629
EG
1213 /* Clear the two last indices in the page to 1:
1214 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1215 hence will never be indicated and should be removed from
1216 the calculations. */
1217 bnx2x_clear_sge_mask_next_elems(fp);
1218}
1219
1220static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1221 struct sk_buff *skb, u16 cons, u16 prod)
1222{
1223 struct bnx2x *bp = fp->bp;
1224 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1225 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1226 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1227 dma_addr_t mapping;
1228
1229 /* move empty skb from pool to prod and map it */
1230 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1231 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1232 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1233 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1234
1235 /* move partial skb from cons to pool (don't unmap yet) */
1236 fp->tpa_pool[queue] = *cons_rx_buf;
1237
1238 /* mark bin state as start - print error if current state != stop */
1239 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1240 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1241
1242 fp->tpa_state[queue] = BNX2X_TPA_START;
1243
1244 /* point prod_bd to new skb */
1245 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1246 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1247
1248#ifdef BNX2X_STOP_ON_ERROR
1249 fp->tpa_queue_used |= (1 << queue);
1250#ifdef __powerpc64__
1251 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1252#else
1253 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1254#endif
1255 fp->tpa_queue_used);
1256#endif
1257}
1258
1259static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1260 struct sk_buff *skb,
1261 struct eth_fast_path_rx_cqe *fp_cqe,
1262 u16 cqe_idx)
1263{
1264 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1265 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1266 u32 i, frag_len, frag_size, pages;
1267 int err;
1268 int j;
1269
1270 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1271 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1272
1273 /* This is needed in order to enable forwarding support */
1274 if (frag_size)
4f40f2cb 1275 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1276 max(frag_size, (u32)len_on_bd));
1277
1278#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1279 if (pages >
1280 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1281 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1282 pages, cqe_idx);
1283 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1284 fp_cqe->pkt_len, len_on_bd);
1285 bnx2x_panic();
1286 return -EINVAL;
1287 }
1288#endif
1289
1290 /* Run through the SGL and compose the fragmented skb */
1291 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1292 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1293
1294 /* FW gives the indices of the SGE as if the ring is an array
1295 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1296 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1297 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1298 old_rx_pg = *rx_pg;
1299
1300 /* If we fail to allocate a substitute page, we simply stop
1301 where we are and drop the whole packet */
1302 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1303 if (unlikely(err)) {
de832a55 1304 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1305 return err;
1306 }
1307
1308 /* Unmap the page as we r going to pass it to the stack */
1309 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1310 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1311
1312 /* Add one frag and update the appropriate fields in the skb */
1313 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1314
1315 skb->data_len += frag_len;
1316 skb->truesize += frag_len;
1317 skb->len += frag_len;
1318
1319 frag_size -= frag_len;
1320 }
1321
1322 return 0;
1323}
1324
1325static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1326 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1327 u16 cqe_idx)
1328{
1329 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1330 struct sk_buff *skb = rx_buf->skb;
1331 /* alloc new skb */
1332 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1333
1334 /* Unmap skb in the pool anyway, as we are going to change
1335 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1336 fails. */
1337 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1338 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1339
7a9b2557 1340 if (likely(new_skb)) {
66e855f3
YG
1341 /* fix ip xsum and give it to the stack */
1342 /* (no need to map the new skb) */
0c6671b0
EG
1343#ifdef BCM_VLAN
1344 int is_vlan_cqe =
1345 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1346 PARSING_FLAGS_VLAN);
1347 int is_not_hwaccel_vlan_cqe =
1348 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1349#endif
7a9b2557
VZ
1350
1351 prefetch(skb);
1352 prefetch(((char *)(skb)) + 128);
1353
7a9b2557
VZ
1354#ifdef BNX2X_STOP_ON_ERROR
1355 if (pad + len > bp->rx_buf_size) {
1356 BNX2X_ERR("skb_put is about to fail... "
1357 "pad %d len %d rx_buf_size %d\n",
1358 pad, len, bp->rx_buf_size);
1359 bnx2x_panic();
1360 return;
1361 }
1362#endif
1363
1364 skb_reserve(skb, pad);
1365 skb_put(skb, len);
1366
1367 skb->protocol = eth_type_trans(skb, bp->dev);
1368 skb->ip_summed = CHECKSUM_UNNECESSARY;
1369
1370 {
1371 struct iphdr *iph;
1372
1373 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1374#ifdef BCM_VLAN
1375 /* If there is no Rx VLAN offloading -
1376 take VLAN tag into an account */
1377 if (unlikely(is_not_hwaccel_vlan_cqe))
1378 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1379#endif
7a9b2557
VZ
1380 iph->check = 0;
1381 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1382 }
1383
1384 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1385 &cqe->fast_path_cqe, cqe_idx)) {
1386#ifdef BCM_VLAN
0c6671b0
EG
1387 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1388 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1389 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1390 le16_to_cpu(cqe->fast_path_cqe.
1391 vlan_tag));
1392 else
1393#endif
1394 netif_receive_skb(skb);
1395 } else {
1396 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1397 " - dropping packet!\n");
1398 dev_kfree_skb(skb);
1399 }
1400
7a9b2557
VZ
1401
1402 /* put new skb in bin */
1403 fp->tpa_pool[queue].skb = new_skb;
1404
1405 } else {
66e855f3 1406 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1407 DP(NETIF_MSG_RX_STATUS,
1408 "Failed to allocate new skb - dropping packet!\n");
de832a55 1409 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1410 }
1411
1412 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1413}
1414
1415static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1416 struct bnx2x_fastpath *fp,
1417 u16 bd_prod, u16 rx_comp_prod,
1418 u16 rx_sge_prod)
1419{
8d9c5f34 1420 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1421 int i;
1422
1423 /* Update producers */
1424 rx_prods.bd_prod = bd_prod;
1425 rx_prods.cqe_prod = rx_comp_prod;
1426 rx_prods.sge_prod = rx_sge_prod;
1427
58f4c4cf
EG
1428 /*
1429 * Make sure that the BD and SGE data is updated before updating the
1430 * producers since FW might read the BD/SGE right after the producer
1431 * is updated.
1432 * This is only applicable for weak-ordered memory model archs such
1433 * as IA-64. The following barrier is also mandatory since FW will
1434 * assumes BDs must have buffers.
1435 */
1436 wmb();
1437
8d9c5f34
EG
1438 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1439 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1440 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1441 ((u32 *)&rx_prods)[i]);
1442
58f4c4cf
EG
1443 mmiowb(); /* keep prod updates ordered */
1444
7a9b2557 1445 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1446 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1447 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1448}
1449
a2fbb9ea
ET
1450static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1451{
1452 struct bnx2x *bp = fp->bp;
34f80b04 1453 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1454 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1455 int rx_pkt = 0;
1456
1457#ifdef BNX2X_STOP_ON_ERROR
1458 if (unlikely(bp->panic))
1459 return 0;
1460#endif
1461
34f80b04
EG
1462 /* CQ "next element" is of the size of the regular element,
1463 that's why it's ok here */
a2fbb9ea
ET
1464 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1465 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1466 hw_comp_cons++;
1467
1468 bd_cons = fp->rx_bd_cons;
1469 bd_prod = fp->rx_bd_prod;
34f80b04 1470 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1471 sw_comp_cons = fp->rx_comp_cons;
1472 sw_comp_prod = fp->rx_comp_prod;
1473
1474 /* Memory barrier necessary as speculative reads of the rx
1475 * buffer can be ahead of the index in the status block
1476 */
1477 rmb();
1478
1479 DP(NETIF_MSG_RX_STATUS,
1480 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1481 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1482
1483 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1484 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1485 struct sk_buff *skb;
1486 union eth_rx_cqe *cqe;
34f80b04
EG
1487 u8 cqe_fp_flags;
1488 u16 len, pad;
a2fbb9ea
ET
1489
1490 comp_ring_cons = RCQ_BD(sw_comp_cons);
1491 bd_prod = RX_BD(bd_prod);
1492 bd_cons = RX_BD(bd_cons);
1493
1494 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1495 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1496
a2fbb9ea 1497 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1498 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1499 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1500 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1501 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1502 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1503
1504 /* is this a slowpath msg? */
34f80b04 1505 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1506 bnx2x_sp_event(fp, cqe);
1507 goto next_cqe;
1508
1509 /* this is an rx packet */
1510 } else {
1511 rx_buf = &fp->rx_buf_ring[bd_cons];
1512 skb = rx_buf->skb;
a2fbb9ea
ET
1513 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1514 pad = cqe->fast_path_cqe.placement_offset;
1515
7a9b2557
VZ
1516 /* If CQE is marked both TPA_START and TPA_END
1517 it is a non-TPA CQE */
1518 if ((!fp->disable_tpa) &&
1519 (TPA_TYPE(cqe_fp_flags) !=
1520 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1521 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1522
1523 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1524 DP(NETIF_MSG_RX_STATUS,
1525 "calling tpa_start on queue %d\n",
1526 queue);
1527
1528 bnx2x_tpa_start(fp, queue, skb,
1529 bd_cons, bd_prod);
1530 goto next_rx;
1531 }
1532
1533 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1534 DP(NETIF_MSG_RX_STATUS,
1535 "calling tpa_stop on queue %d\n",
1536 queue);
1537
1538 if (!BNX2X_RX_SUM_FIX(cqe))
1539 BNX2X_ERR("STOP on none TCP "
1540 "data\n");
1541
1542 /* This is a size of the linear data
1543 on this skb */
1544 len = le16_to_cpu(cqe->fast_path_cqe.
1545 len_on_bd);
1546 bnx2x_tpa_stop(bp, fp, queue, pad,
1547 len, cqe, comp_ring_cons);
1548#ifdef BNX2X_STOP_ON_ERROR
1549 if (bp->panic)
17cb4006 1550 return 0;
7a9b2557
VZ
1551#endif
1552
1553 bnx2x_update_sge_prod(fp,
1554 &cqe->fast_path_cqe);
1555 goto next_cqe;
1556 }
1557 }
1558
a2fbb9ea
ET
1559 pci_dma_sync_single_for_device(bp->pdev,
1560 pci_unmap_addr(rx_buf, mapping),
1561 pad + RX_COPY_THRESH,
1562 PCI_DMA_FROMDEVICE);
1563 prefetch(skb);
1564 prefetch(((char *)(skb)) + 128);
1565
1566 /* is this an error packet? */
34f80b04 1567 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1568 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1569 "ERROR flags %x rx packet %u\n",
1570 cqe_fp_flags, sw_comp_cons);
de832a55 1571 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1572 goto reuse_rx;
1573 }
1574
1575 /* Since we don't have a jumbo ring
1576 * copy small packets if mtu > 1500
1577 */
1578 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1579 (len <= RX_COPY_THRESH)) {
1580 struct sk_buff *new_skb;
1581
1582 new_skb = netdev_alloc_skb(bp->dev,
1583 len + pad);
1584 if (new_skb == NULL) {
1585 DP(NETIF_MSG_RX_ERR,
34f80b04 1586 "ERROR packet dropped "
a2fbb9ea 1587 "because of alloc failure\n");
de832a55 1588 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1589 goto reuse_rx;
1590 }
1591
1592 /* aligned copy */
1593 skb_copy_from_linear_data_offset(skb, pad,
1594 new_skb->data + pad, len);
1595 skb_reserve(new_skb, pad);
1596 skb_put(new_skb, len);
1597
1598 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1599
1600 skb = new_skb;
1601
1602 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1603 pci_unmap_single(bp->pdev,
1604 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1605 bp->rx_buf_size,
a2fbb9ea
ET
1606 PCI_DMA_FROMDEVICE);
1607 skb_reserve(skb, pad);
1608 skb_put(skb, len);
1609
1610 } else {
1611 DP(NETIF_MSG_RX_ERR,
34f80b04 1612 "ERROR packet dropped because "
a2fbb9ea 1613 "of alloc failure\n");
de832a55 1614 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1615reuse_rx:
1616 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1617 goto next_rx;
1618 }
1619
1620 skb->protocol = eth_type_trans(skb, bp->dev);
1621
1622 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1623 if (bp->rx_csum) {
1adcd8be
EG
1624 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1625 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1626 else
de832a55 1627 fp->eth_q_stats.hw_csum_err++;
66e855f3 1628 }
a2fbb9ea
ET
1629 }
1630
748e5439 1631 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1632#ifdef BCM_VLAN
0c6671b0 1633 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1634 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1635 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1636 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1637 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1638 else
1639#endif
34f80b04 1640 netif_receive_skb(skb);
a2fbb9ea 1641
a2fbb9ea
ET
1642
1643next_rx:
1644 rx_buf->skb = NULL;
1645
1646 bd_cons = NEXT_RX_IDX(bd_cons);
1647 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1648 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1649 rx_pkt++;
a2fbb9ea
ET
1650next_cqe:
1651 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1652 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1653
34f80b04 1654 if (rx_pkt == budget)
a2fbb9ea
ET
1655 break;
1656 } /* while */
1657
1658 fp->rx_bd_cons = bd_cons;
34f80b04 1659 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1660 fp->rx_comp_cons = sw_comp_cons;
1661 fp->rx_comp_prod = sw_comp_prod;
1662
7a9b2557
VZ
1663 /* Update producers */
1664 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1665 fp->rx_sge_prod);
a2fbb9ea
ET
1666
1667 fp->rx_pkt += rx_pkt;
1668 fp->rx_calls++;
1669
1670 return rx_pkt;
1671}
1672
1673static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1674{
1675 struct bnx2x_fastpath *fp = fp_cookie;
1676 struct bnx2x *bp = fp->bp;
0626b899 1677 int index = fp->index;
a2fbb9ea 1678
da5a662a
VZ
1679 /* Return here if interrupt is disabled */
1680 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1681 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1682 return IRQ_HANDLED;
1683 }
1684
34f80b04 1685 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1686 index, fp->sb_id);
1687 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1688
1689#ifdef BNX2X_STOP_ON_ERROR
1690 if (unlikely(bp->panic))
1691 return IRQ_HANDLED;
1692#endif
1693
1694 prefetch(fp->rx_cons_sb);
1695 prefetch(fp->tx_cons_sb);
1696 prefetch(&fp->status_blk->c_status_block.status_block_index);
1697 prefetch(&fp->status_blk->u_status_block.status_block_index);
1698
288379f0 1699 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1700
a2fbb9ea
ET
1701 return IRQ_HANDLED;
1702}
1703
1704static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1705{
555f6c78 1706 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1707 u16 status = bnx2x_ack_int(bp);
34f80b04 1708 u16 mask;
a2fbb9ea 1709
34f80b04 1710 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1711 if (unlikely(status == 0)) {
1712 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1713 return IRQ_NONE;
1714 }
f5372251 1715 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1716
34f80b04 1717 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1718 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1719 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1720 return IRQ_HANDLED;
1721 }
1722
3196a88a
EG
1723#ifdef BNX2X_STOP_ON_ERROR
1724 if (unlikely(bp->panic))
1725 return IRQ_HANDLED;
1726#endif
1727
34f80b04
EG
1728 mask = 0x2 << bp->fp[0].sb_id;
1729 if (status & mask) {
a2fbb9ea
ET
1730 struct bnx2x_fastpath *fp = &bp->fp[0];
1731
1732 prefetch(fp->rx_cons_sb);
1733 prefetch(fp->tx_cons_sb);
1734 prefetch(&fp->status_blk->c_status_block.status_block_index);
1735 prefetch(&fp->status_blk->u_status_block.status_block_index);
1736
288379f0 1737 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1738
34f80b04 1739 status &= ~mask;
a2fbb9ea
ET
1740 }
1741
a2fbb9ea 1742
34f80b04 1743 if (unlikely(status & 0x1)) {
1cf167f2 1744 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1745
1746 status &= ~0x1;
1747 if (!status)
1748 return IRQ_HANDLED;
1749 }
1750
34f80b04
EG
1751 if (status)
1752 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1753 status);
a2fbb9ea 1754
c18487ee 1755 return IRQ_HANDLED;
a2fbb9ea
ET
1756}
1757
c18487ee 1758/* end of fast path */
a2fbb9ea 1759
bb2a0f7a 1760static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1761
c18487ee
YR
1762/* Link */
1763
1764/*
1765 * General service functions
1766 */
a2fbb9ea 1767
4a37fb66 1768static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1769{
1770 u32 lock_status;
1771 u32 resource_bit = (1 << resource);
4a37fb66
YG
1772 int func = BP_FUNC(bp);
1773 u32 hw_lock_control_reg;
c18487ee 1774 int cnt;
a2fbb9ea 1775
c18487ee
YR
1776 /* Validating that the resource is within range */
1777 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1778 DP(NETIF_MSG_HW,
1779 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1780 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1781 return -EINVAL;
1782 }
a2fbb9ea 1783
4a37fb66
YG
1784 if (func <= 5) {
1785 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1786 } else {
1787 hw_lock_control_reg =
1788 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1789 }
1790
c18487ee 1791 /* Validating that the resource is not already taken */
4a37fb66 1792 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1793 if (lock_status & resource_bit) {
1794 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1795 lock_status, resource_bit);
1796 return -EEXIST;
1797 }
a2fbb9ea 1798
46230476
EG
1799 /* Try for 5 second every 5ms */
1800 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1801 /* Try to acquire the lock */
4a37fb66
YG
1802 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1803 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1804 if (lock_status & resource_bit)
1805 return 0;
a2fbb9ea 1806
c18487ee 1807 msleep(5);
a2fbb9ea 1808 }
c18487ee
YR
1809 DP(NETIF_MSG_HW, "Timeout\n");
1810 return -EAGAIN;
1811}
a2fbb9ea 1812
4a37fb66 1813static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1814{
1815 u32 lock_status;
1816 u32 resource_bit = (1 << resource);
4a37fb66
YG
1817 int func = BP_FUNC(bp);
1818 u32 hw_lock_control_reg;
a2fbb9ea 1819
c18487ee
YR
1820 /* Validating that the resource is within range */
1821 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1822 DP(NETIF_MSG_HW,
1823 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1824 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1825 return -EINVAL;
1826 }
1827
4a37fb66
YG
1828 if (func <= 5) {
1829 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1830 } else {
1831 hw_lock_control_reg =
1832 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1833 }
1834
c18487ee 1835 /* Validating that the resource is currently taken */
4a37fb66 1836 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1837 if (!(lock_status & resource_bit)) {
1838 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1839 lock_status, resource_bit);
1840 return -EFAULT;
a2fbb9ea
ET
1841 }
1842
4a37fb66 1843 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1844 return 0;
1845}
1846
1847/* HW Lock for shared dual port PHYs */
4a37fb66 1848static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1849{
34f80b04 1850 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1851
46c6a674
EG
1852 if (bp->port.need_hw_lock)
1853 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1854}
a2fbb9ea 1855
4a37fb66 1856static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1857{
46c6a674
EG
1858 if (bp->port.need_hw_lock)
1859 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1860
34f80b04 1861 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1862}
a2fbb9ea 1863
4acac6a5
EG
1864int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1865{
1866 /* The GPIO should be swapped if swap register is set and active */
1867 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1868 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1869 int gpio_shift = gpio_num +
1870 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1871 u32 gpio_mask = (1 << gpio_shift);
1872 u32 gpio_reg;
1873 int value;
1874
1875 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1876 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1877 return -EINVAL;
1878 }
1879
1880 /* read GPIO value */
1881 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1882
1883 /* get the requested pin value */
1884 if ((gpio_reg & gpio_mask) == gpio_mask)
1885 value = 1;
1886 else
1887 value = 0;
1888
1889 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1890
1891 return value;
1892}
1893
17de50b7 1894int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1895{
1896 /* The GPIO should be swapped if swap register is set and active */
1897 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1898 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1899 int gpio_shift = gpio_num +
1900 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1901 u32 gpio_mask = (1 << gpio_shift);
1902 u32 gpio_reg;
a2fbb9ea 1903
c18487ee
YR
1904 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1905 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1906 return -EINVAL;
1907 }
a2fbb9ea 1908
4a37fb66 1909 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1910 /* read GPIO and mask except the float bits */
1911 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1912
c18487ee
YR
1913 switch (mode) {
1914 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1915 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1916 gpio_num, gpio_shift);
1917 /* clear FLOAT and set CLR */
1918 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1920 break;
a2fbb9ea 1921
c18487ee
YR
1922 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1923 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1924 gpio_num, gpio_shift);
1925 /* clear FLOAT and set SET */
1926 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1927 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1928 break;
a2fbb9ea 1929
17de50b7 1930 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1931 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1932 gpio_num, gpio_shift);
1933 /* set FLOAT */
1934 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1935 break;
a2fbb9ea 1936
c18487ee
YR
1937 default:
1938 break;
a2fbb9ea
ET
1939 }
1940
c18487ee 1941 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1942 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1943
c18487ee 1944 return 0;
a2fbb9ea
ET
1945}
1946
4acac6a5
EG
1947int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1948{
1949 /* The GPIO should be swapped if swap register is set and active */
1950 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1951 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1952 int gpio_shift = gpio_num +
1953 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1954 u32 gpio_mask = (1 << gpio_shift);
1955 u32 gpio_reg;
1956
1957 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1958 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1959 return -EINVAL;
1960 }
1961
1962 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1963 /* read GPIO int */
1964 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1965
1966 switch (mode) {
1967 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1968 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1969 "output low\n", gpio_num, gpio_shift);
1970 /* clear SET and set CLR */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1973 break;
1974
1975 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1976 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1977 "output high\n", gpio_num, gpio_shift);
1978 /* clear CLR and set SET */
1979 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1980 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1981 break;
1982
1983 default:
1984 break;
1985 }
1986
1987 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1988 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1989
1990 return 0;
1991}
1992
c18487ee 1993static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1994{
c18487ee
YR
1995 u32 spio_mask = (1 << spio_num);
1996 u32 spio_reg;
a2fbb9ea 1997
c18487ee
YR
1998 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1999 (spio_num > MISC_REGISTERS_SPIO_7)) {
2000 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2001 return -EINVAL;
a2fbb9ea
ET
2002 }
2003
4a37fb66 2004 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2005 /* read SPIO and mask except the float bits */
2006 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2007
c18487ee 2008 switch (mode) {
6378c025 2009 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2010 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2011 /* clear FLOAT and set CLR */
2012 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2013 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2014 break;
a2fbb9ea 2015
6378c025 2016 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2017 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2018 /* clear FLOAT and set SET */
2019 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2020 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2021 break;
a2fbb9ea 2022
c18487ee
YR
2023 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2024 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2025 /* set FLOAT */
2026 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2027 break;
a2fbb9ea 2028
c18487ee
YR
2029 default:
2030 break;
a2fbb9ea
ET
2031 }
2032
c18487ee 2033 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2034 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2035
a2fbb9ea
ET
2036 return 0;
2037}
2038
c18487ee 2039static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2040{
ad33ea3a
EG
2041 switch (bp->link_vars.ieee_fc &
2042 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2043 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2044 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2045 ADVERTISED_Pause);
2046 break;
356e2385 2047
c18487ee 2048 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2049 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2050 ADVERTISED_Pause);
2051 break;
356e2385 2052
c18487ee 2053 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2054 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2055 break;
356e2385 2056
c18487ee 2057 default:
34f80b04 2058 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2059 ADVERTISED_Pause);
2060 break;
2061 }
2062}
f1410647 2063
c18487ee
YR
2064static void bnx2x_link_report(struct bnx2x *bp)
2065{
2066 if (bp->link_vars.link_up) {
2067 if (bp->state == BNX2X_STATE_OPEN)
2068 netif_carrier_on(bp->dev);
2069 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2070
c18487ee 2071 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2072
c18487ee
YR
2073 if (bp->link_vars.duplex == DUPLEX_FULL)
2074 printk("full duplex");
2075 else
2076 printk("half duplex");
f1410647 2077
c0700f90
DM
2078 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2079 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2080 printk(", receive ");
356e2385
EG
2081 if (bp->link_vars.flow_ctrl &
2082 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2083 printk("& transmit ");
2084 } else {
2085 printk(", transmit ");
2086 }
2087 printk("flow control ON");
2088 }
2089 printk("\n");
f1410647 2090
c18487ee
YR
2091 } else { /* link_down */
2092 netif_carrier_off(bp->dev);
2093 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2094 }
c18487ee
YR
2095}
2096
b5bf9068 2097static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2098{
19680c48
EG
2099 if (!BP_NOMCP(bp)) {
2100 u8 rc;
a2fbb9ea 2101
19680c48 2102 /* Initialize link parameters structure variables */
8c99e7b0
YR
2103 /* It is recommended to turn off RX FC for jumbo frames
2104 for better performance */
2105 if (IS_E1HMF(bp))
c0700f90 2106 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2107 else if (bp->dev->mtu > 5000)
c0700f90 2108 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2109 else
c0700f90 2110 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2111
4a37fb66 2112 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2113
2114 if (load_mode == LOAD_DIAG)
2115 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2116
19680c48 2117 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2118
4a37fb66 2119 bnx2x_release_phy_lock(bp);
a2fbb9ea 2120
3c96c68b
EG
2121 bnx2x_calc_fc_adv(bp);
2122
b5bf9068
EG
2123 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2124 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2125 bnx2x_link_report(bp);
b5bf9068 2126 }
34f80b04 2127
19680c48
EG
2128 return rc;
2129 }
f5372251 2130 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2131 return -EINVAL;
a2fbb9ea
ET
2132}
2133
c18487ee 2134static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2135{
19680c48 2136 if (!BP_NOMCP(bp)) {
4a37fb66 2137 bnx2x_acquire_phy_lock(bp);
19680c48 2138 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2139 bnx2x_release_phy_lock(bp);
a2fbb9ea 2140
19680c48
EG
2141 bnx2x_calc_fc_adv(bp);
2142 } else
f5372251 2143 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2144}
a2fbb9ea 2145
c18487ee
YR
2146static void bnx2x__link_reset(struct bnx2x *bp)
2147{
19680c48 2148 if (!BP_NOMCP(bp)) {
4a37fb66 2149 bnx2x_acquire_phy_lock(bp);
589abe3a 2150 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2151 bnx2x_release_phy_lock(bp);
19680c48 2152 } else
f5372251 2153 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2154}
a2fbb9ea 2155
c18487ee
YR
2156static u8 bnx2x_link_test(struct bnx2x *bp)
2157{
2158 u8 rc;
a2fbb9ea 2159
4a37fb66 2160 bnx2x_acquire_phy_lock(bp);
c18487ee 2161 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2162 bnx2x_release_phy_lock(bp);
a2fbb9ea 2163
c18487ee
YR
2164 return rc;
2165}
a2fbb9ea 2166
8a1c38d1 2167static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2168{
8a1c38d1
EG
2169 u32 r_param = bp->link_vars.line_speed / 8;
2170 u32 fair_periodic_timeout_usec;
2171 u32 t_fair;
34f80b04 2172
8a1c38d1
EG
2173 memset(&(bp->cmng.rs_vars), 0,
2174 sizeof(struct rate_shaping_vars_per_port));
2175 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2176
8a1c38d1
EG
2177 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2178 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2179
8a1c38d1
EG
2180 /* this is the threshold below which no timer arming will occur
2181 1.25 coefficient is for the threshold to be a little bigger
2182 than the real time, to compensate for timer in-accuracy */
2183 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2184 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2185
8a1c38d1
EG
2186 /* resolution of fairness timer */
2187 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2188 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2189 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2190
8a1c38d1
EG
2191 /* this is the threshold below which we won't arm the timer anymore */
2192 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2193
8a1c38d1
EG
2194 /* we multiply by 1e3/8 to get bytes/msec.
2195 We don't want the credits to pass a credit
2196 of the t_fair*FAIR_MEM (algorithm resolution) */
2197 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2198 /* since each tick is 4 usec */
2199 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2200}
2201
8a1c38d1 2202static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2203{
2204 struct rate_shaping_vars_per_vn m_rs_vn;
2205 struct fairness_vars_per_vn m_fair_vn;
2206 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2207 u16 vn_min_rate, vn_max_rate;
2208 int i;
2209
2210 /* If function is hidden - set min and max to zeroes */
2211 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2212 vn_min_rate = 0;
2213 vn_max_rate = 0;
2214
2215 } else {
2216 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2217 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2218 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2219 if current min rate is zero - set it to 1.
33471629 2220 This is a requirement of the algorithm. */
8a1c38d1 2221 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2222 vn_min_rate = DEF_MIN_RATE;
2223 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2224 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2225 }
2226
8a1c38d1
EG
2227 DP(NETIF_MSG_IFUP,
2228 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2229 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2230
2231 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2232 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2233
2234 /* global vn counter - maximal Mbps for this vn */
2235 m_rs_vn.vn_counter.rate = vn_max_rate;
2236
2237 /* quota - number of bytes transmitted in this period */
2238 m_rs_vn.vn_counter.quota =
2239 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2240
8a1c38d1 2241 if (bp->vn_weight_sum) {
34f80b04
EG
2242 /* credit for each period of the fairness algorithm:
2243 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2244 vn_weight_sum should not be larger than 10000, thus
2245 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2246 than zero */
34f80b04 2247 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2248 max((u32)(vn_min_rate * (T_FAIR_COEF /
2249 (8 * bp->vn_weight_sum))),
2250 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2251 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2252 m_fair_vn.vn_credit_delta);
2253 }
2254
34f80b04
EG
2255 /* Store it to internal memory */
2256 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2257 REG_WR(bp, BAR_XSTRORM_INTMEM +
2258 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2259 ((u32 *)(&m_rs_vn))[i]);
2260
2261 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2262 REG_WR(bp, BAR_XSTRORM_INTMEM +
2263 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2264 ((u32 *)(&m_fair_vn))[i]);
2265}
2266
8a1c38d1 2267
c18487ee
YR
2268/* This function is called upon link interrupt */
2269static void bnx2x_link_attn(struct bnx2x *bp)
2270{
bb2a0f7a
YG
2271 /* Make sure that we are synced with the current statistics */
2272 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2273
c18487ee 2274 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2275
bb2a0f7a
YG
2276 if (bp->link_vars.link_up) {
2277
1c06328c
EG
2278 /* dropless flow control */
2279 if (CHIP_IS_E1H(bp)) {
2280 int port = BP_PORT(bp);
2281 u32 pause_enabled = 0;
2282
2283 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2284 pause_enabled = 1;
2285
2286 REG_WR(bp, BAR_USTRORM_INTMEM +
2287 USTORM_PAUSE_ENABLED_OFFSET(port),
2288 pause_enabled);
2289 }
2290
bb2a0f7a
YG
2291 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2292 struct host_port_stats *pstats;
2293
2294 pstats = bnx2x_sp(bp, port_stats);
2295 /* reset old bmac stats */
2296 memset(&(pstats->mac_stx[0]), 0,
2297 sizeof(struct mac_stx));
2298 }
2299 if ((bp->state == BNX2X_STATE_OPEN) ||
2300 (bp->state == BNX2X_STATE_DISABLED))
2301 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2302 }
2303
c18487ee
YR
2304 /* indicate link status */
2305 bnx2x_link_report(bp);
34f80b04
EG
2306
2307 if (IS_E1HMF(bp)) {
8a1c38d1 2308 int port = BP_PORT(bp);
34f80b04 2309 int func;
8a1c38d1 2310 int vn;
34f80b04
EG
2311
2312 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2313 if (vn == BP_E1HVN(bp))
2314 continue;
2315
8a1c38d1 2316 func = ((vn << 1) | port);
34f80b04
EG
2317
2318 /* Set the attention towards other drivers
2319 on the same port */
2320 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2321 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2322 }
34f80b04 2323
8a1c38d1
EG
2324 if (bp->link_vars.link_up) {
2325 int i;
2326
2327 /* Init rate shaping and fairness contexts */
2328 bnx2x_init_port_minmax(bp);
34f80b04 2329
34f80b04 2330 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2331 bnx2x_init_vn_minmax(bp, 2*vn + port);
2332
2333 /* Store it to internal memory */
2334 for (i = 0;
2335 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2336 REG_WR(bp, BAR_XSTRORM_INTMEM +
2337 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2338 ((u32 *)(&bp->cmng))[i]);
2339 }
34f80b04 2340 }
c18487ee 2341}
a2fbb9ea 2342
c18487ee
YR
2343static void bnx2x__link_status_update(struct bnx2x *bp)
2344{
2345 if (bp->state != BNX2X_STATE_OPEN)
2346 return;
a2fbb9ea 2347
c18487ee 2348 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2349
bb2a0f7a
YG
2350 if (bp->link_vars.link_up)
2351 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2352 else
2353 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2354
c18487ee
YR
2355 /* indicate link status */
2356 bnx2x_link_report(bp);
a2fbb9ea 2357}
a2fbb9ea 2358
34f80b04
EG
2359static void bnx2x_pmf_update(struct bnx2x *bp)
2360{
2361 int port = BP_PORT(bp);
2362 u32 val;
2363
2364 bp->port.pmf = 1;
2365 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2366
2367 /* enable nig attention */
2368 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2369 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2370 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2371
2372 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2373}
2374
c18487ee 2375/* end of Link */
a2fbb9ea
ET
2376
2377/* slow path */
2378
2379/*
2380 * General service functions
2381 */
2382
2383/* the slow path queue is odd since completions arrive on the fastpath ring */
2384static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2385 u32 data_hi, u32 data_lo, int common)
2386{
34f80b04 2387 int func = BP_FUNC(bp);
a2fbb9ea 2388
34f80b04
EG
2389 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2390 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2391 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2392 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2393 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2394
2395#ifdef BNX2X_STOP_ON_ERROR
2396 if (unlikely(bp->panic))
2397 return -EIO;
2398#endif
2399
34f80b04 2400 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2401
2402 if (!bp->spq_left) {
2403 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2404 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2405 bnx2x_panic();
2406 return -EBUSY;
2407 }
f1410647 2408
a2fbb9ea
ET
2409 /* CID needs port number to be encoded int it */
2410 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2411 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2412 HW_CID(bp, cid)));
2413 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2414 if (common)
2415 bp->spq_prod_bd->hdr.type |=
2416 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2417
2418 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2419 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2420
2421 bp->spq_left--;
2422
2423 if (bp->spq_prod_bd == bp->spq_last_bd) {
2424 bp->spq_prod_bd = bp->spq;
2425 bp->spq_prod_idx = 0;
2426 DP(NETIF_MSG_TIMER, "end of spq\n");
2427
2428 } else {
2429 bp->spq_prod_bd++;
2430 bp->spq_prod_idx++;
2431 }
2432
34f80b04 2433 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2434 bp->spq_prod_idx);
2435
34f80b04 2436 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2437 return 0;
2438}
2439
2440/* acquire split MCP access lock register */
4a37fb66 2441static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2442{
a2fbb9ea 2443 u32 i, j, val;
34f80b04 2444 int rc = 0;
a2fbb9ea
ET
2445
2446 might_sleep();
2447 i = 100;
2448 for (j = 0; j < i*10; j++) {
2449 val = (1UL << 31);
2450 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2451 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2452 if (val & (1L << 31))
2453 break;
2454
2455 msleep(5);
2456 }
a2fbb9ea 2457 if (!(val & (1L << 31))) {
19680c48 2458 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2459 rc = -EBUSY;
2460 }
2461
2462 return rc;
2463}
2464
4a37fb66
YG
2465/* release split MCP access lock register */
2466static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2467{
2468 u32 val = 0;
2469
2470 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2471}
2472
2473static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2474{
2475 struct host_def_status_block *def_sb = bp->def_status_blk;
2476 u16 rc = 0;
2477
2478 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2479 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2480 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2481 rc |= 1;
2482 }
2483 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2484 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2485 rc |= 2;
2486 }
2487 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2488 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2489 rc |= 4;
2490 }
2491 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2492 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2493 rc |= 8;
2494 }
2495 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2496 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2497 rc |= 16;
2498 }
2499 return rc;
2500}
2501
2502/*
2503 * slow path service functions
2504 */
2505
2506static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2507{
34f80b04 2508 int port = BP_PORT(bp);
5c862848
EG
2509 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2510 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2511 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2512 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2513 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2514 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2515 u32 aeu_mask;
87942b46 2516 u32 nig_mask = 0;
a2fbb9ea 2517
a2fbb9ea
ET
2518 if (bp->attn_state & asserted)
2519 BNX2X_ERR("IGU ERROR\n");
2520
3fcaf2e5
EG
2521 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2522 aeu_mask = REG_RD(bp, aeu_addr);
2523
a2fbb9ea 2524 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2525 aeu_mask, asserted);
2526 aeu_mask &= ~(asserted & 0xff);
2527 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2528
3fcaf2e5
EG
2529 REG_WR(bp, aeu_addr, aeu_mask);
2530 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2531
3fcaf2e5 2532 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2533 bp->attn_state |= asserted;
3fcaf2e5 2534 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2535
2536 if (asserted & ATTN_HARD_WIRED_MASK) {
2537 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2538
a5e9a7cf
EG
2539 bnx2x_acquire_phy_lock(bp);
2540
877e9aa4 2541 /* save nig interrupt mask */
87942b46 2542 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2543 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2544
c18487ee 2545 bnx2x_link_attn(bp);
a2fbb9ea
ET
2546
2547 /* handle unicore attn? */
2548 }
2549 if (asserted & ATTN_SW_TIMER_4_FUNC)
2550 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2551
2552 if (asserted & GPIO_2_FUNC)
2553 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2554
2555 if (asserted & GPIO_3_FUNC)
2556 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2557
2558 if (asserted & GPIO_4_FUNC)
2559 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2560
2561 if (port == 0) {
2562 if (asserted & ATTN_GENERAL_ATTN_1) {
2563 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2564 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2565 }
2566 if (asserted & ATTN_GENERAL_ATTN_2) {
2567 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2568 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2569 }
2570 if (asserted & ATTN_GENERAL_ATTN_3) {
2571 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2572 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2573 }
2574 } else {
2575 if (asserted & ATTN_GENERAL_ATTN_4) {
2576 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2577 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2578 }
2579 if (asserted & ATTN_GENERAL_ATTN_5) {
2580 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2581 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2582 }
2583 if (asserted & ATTN_GENERAL_ATTN_6) {
2584 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2585 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2586 }
2587 }
2588
2589 } /* if hardwired */
2590
5c862848
EG
2591 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2592 asserted, hc_addr);
2593 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2594
2595 /* now set back the mask */
a5e9a7cf 2596 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2597 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2598 bnx2x_release_phy_lock(bp);
2599 }
a2fbb9ea
ET
2600}
2601
877e9aa4 2602static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2603{
34f80b04 2604 int port = BP_PORT(bp);
877e9aa4
ET
2605 int reg_offset;
2606 u32 val;
2607
34f80b04
EG
2608 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2609 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2610
34f80b04 2611 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2612
2613 val = REG_RD(bp, reg_offset);
2614 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2615 REG_WR(bp, reg_offset, val);
2616
2617 BNX2X_ERR("SPIO5 hw attention\n");
2618
35b19ba5
EG
2619 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2620 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2621 /* Fan failure attention */
2622
17de50b7 2623 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2624 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2625 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2626 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2627 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2628 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2629 /* mark the failure */
c18487ee 2630 bp->link_params.ext_phy_config &=
877e9aa4 2631 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2632 bp->link_params.ext_phy_config |=
877e9aa4
ET
2633 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2634 SHMEM_WR(bp,
2635 dev_info.port_hw_config[port].
2636 external_phy_config,
c18487ee 2637 bp->link_params.ext_phy_config);
877e9aa4
ET
2638 /* log the failure */
2639 printk(KERN_ERR PFX "Fan Failure on Network"
2640 " Controller %s has caused the driver to"
2641 " shutdown the card to prevent permanent"
2642 " damage. Please contact Dell Support for"
2643 " assistance\n", bp->dev->name);
2644 break;
2645
2646 default:
2647 break;
2648 }
2649 }
34f80b04 2650
589abe3a
EG
2651 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2652 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2653 bnx2x_acquire_phy_lock(bp);
2654 bnx2x_handle_module_detect_int(&bp->link_params);
2655 bnx2x_release_phy_lock(bp);
2656 }
2657
34f80b04
EG
2658 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2659
2660 val = REG_RD(bp, reg_offset);
2661 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2662 REG_WR(bp, reg_offset, val);
2663
2664 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2665 (attn & HW_INTERRUT_ASSERT_SET_0));
2666 bnx2x_panic();
2667 }
877e9aa4
ET
2668}
2669
2670static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2671{
2672 u32 val;
2673
0626b899 2674 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2675
2676 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2677 BNX2X_ERR("DB hw attention 0x%x\n", val);
2678 /* DORQ discard attention */
2679 if (val & 0x2)
2680 BNX2X_ERR("FATAL error from DORQ\n");
2681 }
34f80b04
EG
2682
2683 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2684
2685 int port = BP_PORT(bp);
2686 int reg_offset;
2687
2688 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2689 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2690
2691 val = REG_RD(bp, reg_offset);
2692 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2693 REG_WR(bp, reg_offset, val);
2694
2695 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2696 (attn & HW_INTERRUT_ASSERT_SET_1));
2697 bnx2x_panic();
2698 }
877e9aa4
ET
2699}
2700
2701static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2702{
2703 u32 val;
2704
2705 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2706
2707 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2708 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2709 /* CFC error attention */
2710 if (val & 0x2)
2711 BNX2X_ERR("FATAL error from CFC\n");
2712 }
2713
2714 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2715
2716 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2717 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2718 /* RQ_USDMDP_FIFO_OVERFLOW */
2719 if (val & 0x18000)
2720 BNX2X_ERR("FATAL error from PXP\n");
2721 }
34f80b04
EG
2722
2723 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2724
2725 int port = BP_PORT(bp);
2726 int reg_offset;
2727
2728 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2729 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2730
2731 val = REG_RD(bp, reg_offset);
2732 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2733 REG_WR(bp, reg_offset, val);
2734
2735 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2736 (attn & HW_INTERRUT_ASSERT_SET_2));
2737 bnx2x_panic();
2738 }
877e9aa4
ET
2739}
2740
2741static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2742{
34f80b04
EG
2743 u32 val;
2744
877e9aa4
ET
2745 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2746
34f80b04
EG
2747 if (attn & BNX2X_PMF_LINK_ASSERT) {
2748 int func = BP_FUNC(bp);
2749
2750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2751 bnx2x__link_status_update(bp);
2752 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2753 DRV_STATUS_PMF)
2754 bnx2x_pmf_update(bp);
2755
2756 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2757
2758 BNX2X_ERR("MC assert!\n");
2759 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2760 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2761 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2762 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2763 bnx2x_panic();
2764
2765 } else if (attn & BNX2X_MCP_ASSERT) {
2766
2767 BNX2X_ERR("MCP assert!\n");
2768 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2769 bnx2x_fw_dump(bp);
877e9aa4
ET
2770
2771 } else
2772 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2773 }
2774
2775 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2776 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2777 if (attn & BNX2X_GRC_TIMEOUT) {
2778 val = CHIP_IS_E1H(bp) ?
2779 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2780 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2781 }
2782 if (attn & BNX2X_GRC_RSV) {
2783 val = CHIP_IS_E1H(bp) ?
2784 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2785 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2786 }
877e9aa4 2787 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2788 }
2789}
2790
2791static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2792{
a2fbb9ea
ET
2793 struct attn_route attn;
2794 struct attn_route group_mask;
34f80b04 2795 int port = BP_PORT(bp);
877e9aa4 2796 int index;
a2fbb9ea
ET
2797 u32 reg_addr;
2798 u32 val;
3fcaf2e5 2799 u32 aeu_mask;
a2fbb9ea
ET
2800
2801 /* need to take HW lock because MCP or other port might also
2802 try to handle this event */
4a37fb66 2803 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2804
2805 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2806 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2807 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2808 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2809 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2810 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2811
2812 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2813 if (deasserted & (1 << index)) {
2814 group_mask = bp->attn_group[index];
2815
34f80b04
EG
2816 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2817 index, group_mask.sig[0], group_mask.sig[1],
2818 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2819
877e9aa4
ET
2820 bnx2x_attn_int_deasserted3(bp,
2821 attn.sig[3] & group_mask.sig[3]);
2822 bnx2x_attn_int_deasserted1(bp,
2823 attn.sig[1] & group_mask.sig[1]);
2824 bnx2x_attn_int_deasserted2(bp,
2825 attn.sig[2] & group_mask.sig[2]);
2826 bnx2x_attn_int_deasserted0(bp,
2827 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2828
a2fbb9ea
ET
2829 if ((attn.sig[0] & group_mask.sig[0] &
2830 HW_PRTY_ASSERT_SET_0) ||
2831 (attn.sig[1] & group_mask.sig[1] &
2832 HW_PRTY_ASSERT_SET_1) ||
2833 (attn.sig[2] & group_mask.sig[2] &
2834 HW_PRTY_ASSERT_SET_2))
6378c025 2835 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2836 }
2837 }
2838
4a37fb66 2839 bnx2x_release_alr(bp);
a2fbb9ea 2840
5c862848 2841 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2842
2843 val = ~deasserted;
3fcaf2e5
EG
2844 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2845 val, reg_addr);
5c862848 2846 REG_WR(bp, reg_addr, val);
a2fbb9ea 2847
a2fbb9ea 2848 if (~bp->attn_state & deasserted)
3fcaf2e5 2849 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2850
2851 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2852 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2853
3fcaf2e5
EG
2854 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2855 aeu_mask = REG_RD(bp, reg_addr);
2856
2857 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2858 aeu_mask, deasserted);
2859 aeu_mask |= (deasserted & 0xff);
2860 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2861
3fcaf2e5
EG
2862 REG_WR(bp, reg_addr, aeu_mask);
2863 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2864
2865 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2866 bp->attn_state &= ~deasserted;
2867 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2868}
2869
2870static void bnx2x_attn_int(struct bnx2x *bp)
2871{
2872 /* read local copy of bits */
68d59484
EG
2873 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2874 attn_bits);
2875 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2876 attn_bits_ack);
a2fbb9ea
ET
2877 u32 attn_state = bp->attn_state;
2878
2879 /* look for changed bits */
2880 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2881 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2882
2883 DP(NETIF_MSG_HW,
2884 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2885 attn_bits, attn_ack, asserted, deasserted);
2886
2887 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2888 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2889
2890 /* handle bits that were raised */
2891 if (asserted)
2892 bnx2x_attn_int_asserted(bp, asserted);
2893
2894 if (deasserted)
2895 bnx2x_attn_int_deasserted(bp, deasserted);
2896}
2897
2898static void bnx2x_sp_task(struct work_struct *work)
2899{
1cf167f2 2900 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2901 u16 status;
2902
34f80b04 2903
a2fbb9ea
ET
2904 /* Return here if interrupt is disabled */
2905 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2906 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2907 return;
2908 }
2909
2910 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2911/* if (status == 0) */
2912/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2913
3196a88a 2914 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2915
877e9aa4
ET
2916 /* HW attentions */
2917 if (status & 0x1)
a2fbb9ea 2918 bnx2x_attn_int(bp);
a2fbb9ea 2919
68d59484 2920 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2921 IGU_INT_NOP, 1);
2922 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2923 IGU_INT_NOP, 1);
2924 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2925 IGU_INT_NOP, 1);
2926 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2927 IGU_INT_NOP, 1);
2928 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2929 IGU_INT_ENABLE, 1);
877e9aa4 2930
a2fbb9ea
ET
2931}
2932
2933static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2934{
2935 struct net_device *dev = dev_instance;
2936 struct bnx2x *bp = netdev_priv(dev);
2937
2938 /* Return here if interrupt is disabled */
2939 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2940 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2941 return IRQ_HANDLED;
2942 }
2943
8d9c5f34 2944 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2945
2946#ifdef BNX2X_STOP_ON_ERROR
2947 if (unlikely(bp->panic))
2948 return IRQ_HANDLED;
2949#endif
2950
1cf167f2 2951 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2952
2953 return IRQ_HANDLED;
2954}
2955
2956/* end of slow path */
2957
2958/* Statistics */
2959
2960/****************************************************************************
2961* Macros
2962****************************************************************************/
2963
a2fbb9ea
ET
2964/* sum[hi:lo] += add[hi:lo] */
2965#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2966 do { \
2967 s_lo += a_lo; \
f5ba6772 2968 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2969 } while (0)
2970
2971/* difference = minuend - subtrahend */
2972#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2973 do { \
bb2a0f7a
YG
2974 if (m_lo < s_lo) { \
2975 /* underflow */ \
a2fbb9ea 2976 d_hi = m_hi - s_hi; \
bb2a0f7a 2977 if (d_hi > 0) { \
6378c025 2978 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2979 d_hi--; \
2980 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2981 } else { \
6378c025 2982 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2983 d_hi = 0; \
2984 d_lo = 0; \
2985 } \
bb2a0f7a
YG
2986 } else { \
2987 /* m_lo >= s_lo */ \
a2fbb9ea 2988 if (m_hi < s_hi) { \
bb2a0f7a
YG
2989 d_hi = 0; \
2990 d_lo = 0; \
2991 } else { \
6378c025 2992 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2993 d_hi = m_hi - s_hi; \
2994 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2995 } \
2996 } \
2997 } while (0)
2998
bb2a0f7a 2999#define UPDATE_STAT64(s, t) \
a2fbb9ea 3000 do { \
bb2a0f7a
YG
3001 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3002 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3003 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3004 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3005 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3006 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3007 } while (0)
3008
bb2a0f7a 3009#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3010 do { \
bb2a0f7a
YG
3011 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3012 diff.lo, new->s##_lo, old->s##_lo); \
3013 ADD_64(estats->t##_hi, diff.hi, \
3014 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3015 } while (0)
3016
3017/* sum[hi:lo] += add */
3018#define ADD_EXTEND_64(s_hi, s_lo, a) \
3019 do { \
3020 s_lo += a; \
3021 s_hi += (s_lo < a) ? 1 : 0; \
3022 } while (0)
3023
bb2a0f7a 3024#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3025 do { \
bb2a0f7a
YG
3026 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3027 pstats->mac_stx[1].s##_lo, \
3028 new->s); \
a2fbb9ea
ET
3029 } while (0)
3030
bb2a0f7a 3031#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3032 do { \
4781bfad
EG
3033 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3034 old_tclient->s = tclient->s; \
de832a55
EG
3035 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3036 } while (0)
3037
3038#define UPDATE_EXTEND_USTAT(s, t) \
3039 do { \
3040 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3041 old_uclient->s = uclient->s; \
3042 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3043 } while (0)
3044
3045#define UPDATE_EXTEND_XSTAT(s, t) \
3046 do { \
4781bfad
EG
3047 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3048 old_xclient->s = xclient->s; \
de832a55
EG
3049 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3050 } while (0)
3051
3052/* minuend -= subtrahend */
3053#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3054 do { \
3055 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3056 } while (0)
3057
3058/* minuend[hi:lo] -= subtrahend */
3059#define SUB_EXTEND_64(m_hi, m_lo, s) \
3060 do { \
3061 SUB_64(m_hi, 0, m_lo, s); \
3062 } while (0)
3063
3064#define SUB_EXTEND_USTAT(s, t) \
3065 do { \
3066 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3067 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3068 } while (0)
3069
3070/*
3071 * General service functions
3072 */
3073
3074static inline long bnx2x_hilo(u32 *hiref)
3075{
3076 u32 lo = *(hiref + 1);
3077#if (BITS_PER_LONG == 64)
3078 u32 hi = *hiref;
3079
3080 return HILO_U64(hi, lo);
3081#else
3082 return lo;
3083#endif
3084}
3085
3086/*
3087 * Init service functions
3088 */
3089
bb2a0f7a
YG
3090static void bnx2x_storm_stats_post(struct bnx2x *bp)
3091{
3092 if (!bp->stats_pending) {
3093 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3094 int i, rc;
bb2a0f7a
YG
3095
3096 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3097 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3098 for_each_queue(bp, i)
3099 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3100
3101 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3102 ((u32 *)&ramrod_data)[1],
3103 ((u32 *)&ramrod_data)[0], 0);
3104 if (rc == 0) {
3105 /* stats ramrod has it's own slot on the spq */
3106 bp->spq_left++;
3107 bp->stats_pending = 1;
3108 }
3109 }
3110}
3111
3112static void bnx2x_stats_init(struct bnx2x *bp)
3113{
3114 int port = BP_PORT(bp);
de832a55 3115 int i;
bb2a0f7a 3116
de832a55 3117 bp->stats_pending = 0;
bb2a0f7a
YG
3118 bp->executer_idx = 0;
3119 bp->stats_counter = 0;
3120
3121 /* port stats */
3122 if (!BP_NOMCP(bp))
3123 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3124 else
3125 bp->port.port_stx = 0;
3126 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3127
3128 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3129 bp->port.old_nig_stats.brb_discard =
3130 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3131 bp->port.old_nig_stats.brb_truncate =
3132 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3133 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3134 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3135 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3136 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3137
3138 /* function stats */
de832a55
EG
3139 for_each_queue(bp, i) {
3140 struct bnx2x_fastpath *fp = &bp->fp[i];
3141
3142 memset(&fp->old_tclient, 0,
3143 sizeof(struct tstorm_per_client_stats));
3144 memset(&fp->old_uclient, 0,
3145 sizeof(struct ustorm_per_client_stats));
3146 memset(&fp->old_xclient, 0,
3147 sizeof(struct xstorm_per_client_stats));
3148 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3149 }
3150
bb2a0f7a 3151 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3152 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3153
3154 bp->stats_state = STATS_STATE_DISABLED;
3155 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3156 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3157}
3158
3159static void bnx2x_hw_stats_post(struct bnx2x *bp)
3160{
3161 struct dmae_command *dmae = &bp->stats_dmae;
3162 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3163
3164 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3165 if (CHIP_REV_IS_SLOW(bp))
3166 return;
bb2a0f7a
YG
3167
3168 /* loader */
3169 if (bp->executer_idx) {
3170 int loader_idx = PMF_DMAE_C(bp);
3171
3172 memset(dmae, 0, sizeof(struct dmae_command));
3173
3174 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3175 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3176 DMAE_CMD_DST_RESET |
3177#ifdef __BIG_ENDIAN
3178 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3179#else
3180 DMAE_CMD_ENDIANITY_DW_SWAP |
3181#endif
3182 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3183 DMAE_CMD_PORT_0) |
3184 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3185 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3186 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3187 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3188 sizeof(struct dmae_command) *
3189 (loader_idx + 1)) >> 2;
3190 dmae->dst_addr_hi = 0;
3191 dmae->len = sizeof(struct dmae_command) >> 2;
3192 if (CHIP_IS_E1(bp))
3193 dmae->len--;
3194 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3195 dmae->comp_addr_hi = 0;
3196 dmae->comp_val = 1;
3197
3198 *stats_comp = 0;
3199 bnx2x_post_dmae(bp, dmae, loader_idx);
3200
3201 } else if (bp->func_stx) {
3202 *stats_comp = 0;
3203 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3204 }
3205}
3206
3207static int bnx2x_stats_comp(struct bnx2x *bp)
3208{
3209 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3210 int cnt = 10;
3211
3212 might_sleep();
3213 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3214 if (!cnt) {
3215 BNX2X_ERR("timeout waiting for stats finished\n");
3216 break;
3217 }
3218 cnt--;
12469401 3219 msleep(1);
bb2a0f7a
YG
3220 }
3221 return 1;
3222}
3223
3224/*
3225 * Statistics service functions
3226 */
3227
3228static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3229{
3230 struct dmae_command *dmae;
3231 u32 opcode;
3232 int loader_idx = PMF_DMAE_C(bp);
3233 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3234
3235 /* sanity */
3236 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3237 BNX2X_ERR("BUG!\n");
3238 return;
3239 }
3240
3241 bp->executer_idx = 0;
3242
3243 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3244 DMAE_CMD_C_ENABLE |
3245 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3246#ifdef __BIG_ENDIAN
3247 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3248#else
3249 DMAE_CMD_ENDIANITY_DW_SWAP |
3250#endif
3251 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3252 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3253
3254 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3255 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3256 dmae->src_addr_lo = bp->port.port_stx >> 2;
3257 dmae->src_addr_hi = 0;
3258 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3259 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3260 dmae->len = DMAE_LEN32_RD_MAX;
3261 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3262 dmae->comp_addr_hi = 0;
3263 dmae->comp_val = 1;
3264
3265 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3266 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3267 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3268 dmae->src_addr_hi = 0;
7a9b2557
VZ
3269 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3270 DMAE_LEN32_RD_MAX * 4);
3271 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3272 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3273 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3274 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3275 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3276 dmae->comp_val = DMAE_COMP_VAL;
3277
3278 *stats_comp = 0;
3279 bnx2x_hw_stats_post(bp);
3280 bnx2x_stats_comp(bp);
3281}
3282
3283static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3284{
3285 struct dmae_command *dmae;
34f80b04 3286 int port = BP_PORT(bp);
bb2a0f7a 3287 int vn = BP_E1HVN(bp);
a2fbb9ea 3288 u32 opcode;
bb2a0f7a 3289 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3290 u32 mac_addr;
bb2a0f7a
YG
3291 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3292
3293 /* sanity */
3294 if (!bp->link_vars.link_up || !bp->port.pmf) {
3295 BNX2X_ERR("BUG!\n");
3296 return;
3297 }
a2fbb9ea
ET
3298
3299 bp->executer_idx = 0;
bb2a0f7a
YG
3300
3301 /* MCP */
3302 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3303 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3304 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3305#ifdef __BIG_ENDIAN
bb2a0f7a 3306 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3307#else
bb2a0f7a 3308 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3309#endif
bb2a0f7a
YG
3310 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3311 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3312
bb2a0f7a 3313 if (bp->port.port_stx) {
a2fbb9ea
ET
3314
3315 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3316 dmae->opcode = opcode;
bb2a0f7a
YG
3317 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3318 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3319 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3320 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3321 dmae->len = sizeof(struct host_port_stats) >> 2;
3322 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323 dmae->comp_addr_hi = 0;
3324 dmae->comp_val = 1;
a2fbb9ea
ET
3325 }
3326
bb2a0f7a
YG
3327 if (bp->func_stx) {
3328
3329 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3330 dmae->opcode = opcode;
3331 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3332 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3333 dmae->dst_addr_lo = bp->func_stx >> 2;
3334 dmae->dst_addr_hi = 0;
3335 dmae->len = sizeof(struct host_func_stats) >> 2;
3336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 dmae->comp_addr_hi = 0;
3338 dmae->comp_val = 1;
a2fbb9ea
ET
3339 }
3340
bb2a0f7a 3341 /* MAC */
a2fbb9ea
ET
3342 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3343 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3344 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3345#ifdef __BIG_ENDIAN
3346 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3347#else
3348 DMAE_CMD_ENDIANITY_DW_SWAP |
3349#endif
bb2a0f7a
YG
3350 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3351 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3352
c18487ee 3353 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3354
3355 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3356 NIG_REG_INGRESS_BMAC0_MEM);
3357
3358 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3359 BIGMAC_REGISTER_TX_STAT_GTBYT */
3360 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3361 dmae->opcode = opcode;
3362 dmae->src_addr_lo = (mac_addr +
3363 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3364 dmae->src_addr_hi = 0;
3365 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3366 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3367 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3368 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3369 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3370 dmae->comp_addr_hi = 0;
3371 dmae->comp_val = 1;
3372
3373 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3374 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3375 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3376 dmae->opcode = opcode;
3377 dmae->src_addr_lo = (mac_addr +
3378 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3379 dmae->src_addr_hi = 0;
3380 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3381 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3382 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3383 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3384 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3385 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3386 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3387 dmae->comp_addr_hi = 0;
3388 dmae->comp_val = 1;
3389
c18487ee 3390 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3391
3392 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3393
3394 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3396 dmae->opcode = opcode;
3397 dmae->src_addr_lo = (mac_addr +
3398 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3399 dmae->src_addr_hi = 0;
3400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3401 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3402 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3404 dmae->comp_addr_hi = 0;
3405 dmae->comp_val = 1;
3406
3407 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3408 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3409 dmae->opcode = opcode;
3410 dmae->src_addr_lo = (mac_addr +
3411 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3412 dmae->src_addr_hi = 0;
3413 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3414 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3415 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3416 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3417 dmae->len = 1;
3418 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3419 dmae->comp_addr_hi = 0;
3420 dmae->comp_val = 1;
3421
3422 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3423 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3424 dmae->opcode = opcode;
3425 dmae->src_addr_lo = (mac_addr +
3426 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3427 dmae->src_addr_hi = 0;
3428 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3429 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3430 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3431 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3432 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3433 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3434 dmae->comp_addr_hi = 0;
3435 dmae->comp_val = 1;
3436 }
3437
3438 /* NIG */
bb2a0f7a
YG
3439 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3440 dmae->opcode = opcode;
3441 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3442 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3443 dmae->src_addr_hi = 0;
3444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3445 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3446 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3447 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3448 dmae->comp_addr_hi = 0;
3449 dmae->comp_val = 1;
3450
3451 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3452 dmae->opcode = opcode;
3453 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3454 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3455 dmae->src_addr_hi = 0;
3456 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3457 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3458 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3459 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3460 dmae->len = (2*sizeof(u32)) >> 2;
3461 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3462 dmae->comp_addr_hi = 0;
3463 dmae->comp_val = 1;
3464
a2fbb9ea
ET
3465 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3466 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3467 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3468 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3469#ifdef __BIG_ENDIAN
3470 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3471#else
3472 DMAE_CMD_ENDIANITY_DW_SWAP |
3473#endif
bb2a0f7a
YG
3474 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3475 (vn << DMAE_CMD_E1HVN_SHIFT));
3476 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3477 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3478 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3479 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3480 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3481 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3482 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3483 dmae->len = (2*sizeof(u32)) >> 2;
3484 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3485 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3486 dmae->comp_val = DMAE_COMP_VAL;
3487
3488 *stats_comp = 0;
a2fbb9ea
ET
3489}
3490
bb2a0f7a 3491static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3492{
bb2a0f7a
YG
3493 struct dmae_command *dmae = &bp->stats_dmae;
3494 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3495
bb2a0f7a
YG
3496 /* sanity */
3497 if (!bp->func_stx) {
3498 BNX2X_ERR("BUG!\n");
3499 return;
3500 }
a2fbb9ea 3501
bb2a0f7a
YG
3502 bp->executer_idx = 0;
3503 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3504
bb2a0f7a
YG
3505 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3506 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3507 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3508#ifdef __BIG_ENDIAN
3509 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3510#else
3511 DMAE_CMD_ENDIANITY_DW_SWAP |
3512#endif
3513 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3514 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3515 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3516 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3517 dmae->dst_addr_lo = bp->func_stx >> 2;
3518 dmae->dst_addr_hi = 0;
3519 dmae->len = sizeof(struct host_func_stats) >> 2;
3520 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3521 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3522 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3523
bb2a0f7a
YG
3524 *stats_comp = 0;
3525}
a2fbb9ea 3526
bb2a0f7a
YG
3527static void bnx2x_stats_start(struct bnx2x *bp)
3528{
3529 if (bp->port.pmf)
3530 bnx2x_port_stats_init(bp);
3531
3532 else if (bp->func_stx)
3533 bnx2x_func_stats_init(bp);
3534
3535 bnx2x_hw_stats_post(bp);
3536 bnx2x_storm_stats_post(bp);
3537}
3538
3539static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3540{
3541 bnx2x_stats_comp(bp);
3542 bnx2x_stats_pmf_update(bp);
3543 bnx2x_stats_start(bp);
3544}
3545
3546static void bnx2x_stats_restart(struct bnx2x *bp)
3547{
3548 bnx2x_stats_comp(bp);
3549 bnx2x_stats_start(bp);
3550}
3551
3552static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3553{
3554 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3555 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3556 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3557 struct {
3558 u32 lo;
3559 u32 hi;
3560 } diff;
bb2a0f7a
YG
3561
3562 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3563 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3564 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3565 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3566 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3567 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3568 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3569 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3570 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3571 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3572 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3573 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3574 UPDATE_STAT64(tx_stat_gt127,
3575 tx_stat_etherstatspkts65octetsto127octets);
3576 UPDATE_STAT64(tx_stat_gt255,
3577 tx_stat_etherstatspkts128octetsto255octets);
3578 UPDATE_STAT64(tx_stat_gt511,
3579 tx_stat_etherstatspkts256octetsto511octets);
3580 UPDATE_STAT64(tx_stat_gt1023,
3581 tx_stat_etherstatspkts512octetsto1023octets);
3582 UPDATE_STAT64(tx_stat_gt1518,
3583 tx_stat_etherstatspkts1024octetsto1522octets);
3584 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3585 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3586 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3587 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3588 UPDATE_STAT64(tx_stat_gterr,
3589 tx_stat_dot3statsinternalmactransmiterrors);
3590 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3591
3592 estats->pause_frames_received_hi =
3593 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3594 estats->pause_frames_received_lo =
3595 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3596
3597 estats->pause_frames_sent_hi =
3598 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3599 estats->pause_frames_sent_lo =
3600 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3601}
3602
3603static void bnx2x_emac_stats_update(struct bnx2x *bp)
3604{
3605 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3606 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3607 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3608
3609 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3610 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3611 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3612 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3613 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3614 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3615 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3616 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3617 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3618 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3619 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3620 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3621 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3622 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3623 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3624 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3625 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3627 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3628 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3629 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3630 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3631 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3632 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3633 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3634 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3635 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3636 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3637 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3638 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3639 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3640
3641 estats->pause_frames_received_hi =
3642 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3643 estats->pause_frames_received_lo =
3644 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3645 ADD_64(estats->pause_frames_received_hi,
3646 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3647 estats->pause_frames_received_lo,
3648 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3649
3650 estats->pause_frames_sent_hi =
3651 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3652 estats->pause_frames_sent_lo =
3653 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3654 ADD_64(estats->pause_frames_sent_hi,
3655 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3656 estats->pause_frames_sent_lo,
3657 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3658}
3659
3660static int bnx2x_hw_stats_update(struct bnx2x *bp)
3661{
3662 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3663 struct nig_stats *old = &(bp->port.old_nig_stats);
3664 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3665 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3666 struct {
3667 u32 lo;
3668 u32 hi;
3669 } diff;
de832a55 3670 u32 nig_timer_max;
bb2a0f7a
YG
3671
3672 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3673 bnx2x_bmac_stats_update(bp);
3674
3675 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3676 bnx2x_emac_stats_update(bp);
3677
3678 else { /* unreached */
c3eefaf6 3679 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3680 return -1;
3681 }
a2fbb9ea 3682
bb2a0f7a
YG
3683 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3684 new->brb_discard - old->brb_discard);
66e855f3
YG
3685 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3686 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3687
bb2a0f7a
YG
3688 UPDATE_STAT64_NIG(egress_mac_pkt0,
3689 etherstatspkts1024octetsto1522octets);
3690 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3691
bb2a0f7a 3692 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3693
bb2a0f7a
YG
3694 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3695 sizeof(struct mac_stx));
3696 estats->brb_drop_hi = pstats->brb_drop_hi;
3697 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3698
bb2a0f7a 3699 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3700
de832a55
EG
3701 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3702 if (nig_timer_max != estats->nig_timer_max) {
3703 estats->nig_timer_max = nig_timer_max;
3704 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3705 }
3706
bb2a0f7a 3707 return 0;
a2fbb9ea
ET
3708}
3709
bb2a0f7a 3710static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3711{
3712 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3713 struct tstorm_per_port_stats *tport =
de832a55 3714 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3715 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3716 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3717 int i;
3718
3719 memset(&(fstats->total_bytes_received_hi), 0,
3720 sizeof(struct host_func_stats) - 2*sizeof(u32));
3721 estats->error_bytes_received_hi = 0;
3722 estats->error_bytes_received_lo = 0;
3723 estats->etherstatsoverrsizepkts_hi = 0;
3724 estats->etherstatsoverrsizepkts_lo = 0;
3725 estats->no_buff_discard_hi = 0;
3726 estats->no_buff_discard_lo = 0;
a2fbb9ea 3727
de832a55
EG
3728 for_each_queue(bp, i) {
3729 struct bnx2x_fastpath *fp = &bp->fp[i];
3730 int cl_id = fp->cl_id;
3731 struct tstorm_per_client_stats *tclient =
3732 &stats->tstorm_common.client_statistics[cl_id];
3733 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3734 struct ustorm_per_client_stats *uclient =
3735 &stats->ustorm_common.client_statistics[cl_id];
3736 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3737 struct xstorm_per_client_stats *xclient =
3738 &stats->xstorm_common.client_statistics[cl_id];
3739 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3740 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3741 u32 diff;
3742
3743 /* are storm stats valid? */
3744 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3745 bp->stats_counter) {
de832a55
EG
3746 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3747 " xstorm counter (%d) != stats_counter (%d)\n",
3748 i, xclient->stats_counter, bp->stats_counter);
3749 return -1;
3750 }
3751 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3752 bp->stats_counter) {
de832a55
EG
3753 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3754 " tstorm counter (%d) != stats_counter (%d)\n",
3755 i, tclient->stats_counter, bp->stats_counter);
3756 return -2;
3757 }
3758 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3759 bp->stats_counter) {
3760 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3761 " ustorm counter (%d) != stats_counter (%d)\n",
3762 i, uclient->stats_counter, bp->stats_counter);
3763 return -4;
3764 }
a2fbb9ea 3765
de832a55
EG
3766 qstats->total_bytes_received_hi =
3767 qstats->valid_bytes_received_hi =
a2fbb9ea 3768 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3769 qstats->total_bytes_received_lo =
3770 qstats->valid_bytes_received_lo =
a2fbb9ea 3771 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3772
de832a55 3773 qstats->error_bytes_received_hi =
bb2a0f7a 3774 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3775 qstats->error_bytes_received_lo =
bb2a0f7a 3776 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3777
de832a55
EG
3778 ADD_64(qstats->total_bytes_received_hi,
3779 qstats->error_bytes_received_hi,
3780 qstats->total_bytes_received_lo,
3781 qstats->error_bytes_received_lo);
3782
3783 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3784 total_unicast_packets_received);
3785 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3786 total_multicast_packets_received);
3787 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3788 total_broadcast_packets_received);
3789 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3790 etherstatsoverrsizepkts);
3791 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3792
3793 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3794 total_unicast_packets_received);
3795 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3796 total_multicast_packets_received);
3797 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3798 total_broadcast_packets_received);
3799 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3800 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3801 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3802
3803 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3804 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3805 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3806 le32_to_cpu(xclient->total_sent_bytes.lo);
3807
de832a55
EG
3808 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3809 total_unicast_packets_transmitted);
3810 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3811 total_multicast_packets_transmitted);
3812 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3813 total_broadcast_packets_transmitted);
3814
3815 old_tclient->checksum_discard = tclient->checksum_discard;
3816 old_tclient->ttl0_discard = tclient->ttl0_discard;
3817
3818 ADD_64(fstats->total_bytes_received_hi,
3819 qstats->total_bytes_received_hi,
3820 fstats->total_bytes_received_lo,
3821 qstats->total_bytes_received_lo);
3822 ADD_64(fstats->total_bytes_transmitted_hi,
3823 qstats->total_bytes_transmitted_hi,
3824 fstats->total_bytes_transmitted_lo,
3825 qstats->total_bytes_transmitted_lo);
3826 ADD_64(fstats->total_unicast_packets_received_hi,
3827 qstats->total_unicast_packets_received_hi,
3828 fstats->total_unicast_packets_received_lo,
3829 qstats->total_unicast_packets_received_lo);
3830 ADD_64(fstats->total_multicast_packets_received_hi,
3831 qstats->total_multicast_packets_received_hi,
3832 fstats->total_multicast_packets_received_lo,
3833 qstats->total_multicast_packets_received_lo);
3834 ADD_64(fstats->total_broadcast_packets_received_hi,
3835 qstats->total_broadcast_packets_received_hi,
3836 fstats->total_broadcast_packets_received_lo,
3837 qstats->total_broadcast_packets_received_lo);
3838 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3839 qstats->total_unicast_packets_transmitted_hi,
3840 fstats->total_unicast_packets_transmitted_lo,
3841 qstats->total_unicast_packets_transmitted_lo);
3842 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3843 qstats->total_multicast_packets_transmitted_hi,
3844 fstats->total_multicast_packets_transmitted_lo,
3845 qstats->total_multicast_packets_transmitted_lo);
3846 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3847 qstats->total_broadcast_packets_transmitted_hi,
3848 fstats->total_broadcast_packets_transmitted_lo,
3849 qstats->total_broadcast_packets_transmitted_lo);
3850 ADD_64(fstats->valid_bytes_received_hi,
3851 qstats->valid_bytes_received_hi,
3852 fstats->valid_bytes_received_lo,
3853 qstats->valid_bytes_received_lo);
3854
3855 ADD_64(estats->error_bytes_received_hi,
3856 qstats->error_bytes_received_hi,
3857 estats->error_bytes_received_lo,
3858 qstats->error_bytes_received_lo);
3859 ADD_64(estats->etherstatsoverrsizepkts_hi,
3860 qstats->etherstatsoverrsizepkts_hi,
3861 estats->etherstatsoverrsizepkts_lo,
3862 qstats->etherstatsoverrsizepkts_lo);
3863 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3864 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3865 }
3866
3867 ADD_64(fstats->total_bytes_received_hi,
3868 estats->rx_stat_ifhcinbadoctets_hi,
3869 fstats->total_bytes_received_lo,
3870 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3871
3872 memcpy(estats, &(fstats->total_bytes_received_hi),
3873 sizeof(struct host_func_stats) - 2*sizeof(u32));
3874
de832a55
EG
3875 ADD_64(estats->etherstatsoverrsizepkts_hi,
3876 estats->rx_stat_dot3statsframestoolong_hi,
3877 estats->etherstatsoverrsizepkts_lo,
3878 estats->rx_stat_dot3statsframestoolong_lo);
3879 ADD_64(estats->error_bytes_received_hi,
3880 estats->rx_stat_ifhcinbadoctets_hi,
3881 estats->error_bytes_received_lo,
3882 estats->rx_stat_ifhcinbadoctets_lo);
3883
3884 if (bp->port.pmf) {
3885 estats->mac_filter_discard =
3886 le32_to_cpu(tport->mac_filter_discard);
3887 estats->xxoverflow_discard =
3888 le32_to_cpu(tport->xxoverflow_discard);
3889 estats->brb_truncate_discard =
bb2a0f7a 3890 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3891 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3892 }
bb2a0f7a
YG
3893
3894 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3895
de832a55
EG
3896 bp->stats_pending = 0;
3897
a2fbb9ea
ET
3898 return 0;
3899}
3900
bb2a0f7a 3901static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3902{
bb2a0f7a 3903 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3904 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3905 int i;
a2fbb9ea
ET
3906
3907 nstats->rx_packets =
3908 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3909 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3910 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3911
3912 nstats->tx_packets =
3913 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3914 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3915 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3916
de832a55 3917 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3918
0e39e645 3919 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3920
de832a55
EG
3921 nstats->rx_dropped = estats->mac_discard;
3922 for_each_queue(bp, i)
3923 nstats->rx_dropped +=
3924 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3925
a2fbb9ea
ET
3926 nstats->tx_dropped = 0;
3927
3928 nstats->multicast =
de832a55 3929 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3930
bb2a0f7a 3931 nstats->collisions =
de832a55 3932 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3933
3934 nstats->rx_length_errors =
de832a55
EG
3935 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3936 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3937 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3938 bnx2x_hilo(&estats->brb_truncate_hi);
3939 nstats->rx_crc_errors =
3940 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3941 nstats->rx_frame_errors =
3942 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3943 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3944 nstats->rx_missed_errors = estats->xxoverflow_discard;
3945
3946 nstats->rx_errors = nstats->rx_length_errors +
3947 nstats->rx_over_errors +
3948 nstats->rx_crc_errors +
3949 nstats->rx_frame_errors +
0e39e645
ET
3950 nstats->rx_fifo_errors +
3951 nstats->rx_missed_errors;
a2fbb9ea 3952
bb2a0f7a 3953 nstats->tx_aborted_errors =
de832a55
EG
3954 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3955 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3956 nstats->tx_carrier_errors =
3957 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3958 nstats->tx_fifo_errors = 0;
3959 nstats->tx_heartbeat_errors = 0;
3960 nstats->tx_window_errors = 0;
3961
3962 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3963 nstats->tx_carrier_errors +
3964 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3965}
3966
3967static void bnx2x_drv_stats_update(struct bnx2x *bp)
3968{
3969 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3970 int i;
3971
3972 estats->driver_xoff = 0;
3973 estats->rx_err_discard_pkt = 0;
3974 estats->rx_skb_alloc_failed = 0;
3975 estats->hw_csum_err = 0;
3976 for_each_queue(bp, i) {
3977 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3978
3979 estats->driver_xoff += qstats->driver_xoff;
3980 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3981 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3982 estats->hw_csum_err += qstats->hw_csum_err;
3983 }
a2fbb9ea
ET
3984}
3985
bb2a0f7a 3986static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3987{
bb2a0f7a 3988 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3989
bb2a0f7a
YG
3990 if (*stats_comp != DMAE_COMP_VAL)
3991 return;
3992
3993 if (bp->port.pmf)
de832a55 3994 bnx2x_hw_stats_update(bp);
a2fbb9ea 3995
de832a55
EG
3996 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3997 BNX2X_ERR("storm stats were not updated for 3 times\n");
3998 bnx2x_panic();
3999 return;
a2fbb9ea
ET
4000 }
4001
de832a55
EG
4002 bnx2x_net_stats_update(bp);
4003 bnx2x_drv_stats_update(bp);
4004
a2fbb9ea 4005 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
4006 struct tstorm_per_client_stats *old_tclient =
4007 &bp->fp->old_tclient;
4008 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4009 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4010 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4011 int i;
a2fbb9ea
ET
4012
4013 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4014 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4015 " tx pkt (%lx)\n",
4016 bnx2x_tx_avail(bp->fp),
7a9b2557 4017 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4018 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4019 " rx pkt (%lx)\n",
7a9b2557
VZ
4020 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4021 bp->fp->rx_comp_cons),
4022 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4023 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4024 "brb truncate %u\n",
4025 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4026 qstats->driver_xoff,
4027 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4028 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4029 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4030 "mac_discard %u mac_filter_discard %u "
4031 "xxovrflow_discard %u brb_truncate_discard %u "
4032 "ttl0_discard %u\n",
4781bfad 4033 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4034 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4035 bnx2x_hilo(&qstats->no_buff_discard_hi),
4036 estats->mac_discard, estats->mac_filter_discard,
4037 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4038 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4039
4040 for_each_queue(bp, i) {
4041 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4042 bnx2x_fp(bp, i, tx_pkt),
4043 bnx2x_fp(bp, i, rx_pkt),
4044 bnx2x_fp(bp, i, rx_calls));
4045 }
4046 }
4047
bb2a0f7a
YG
4048 bnx2x_hw_stats_post(bp);
4049 bnx2x_storm_stats_post(bp);
4050}
a2fbb9ea 4051
bb2a0f7a
YG
4052static void bnx2x_port_stats_stop(struct bnx2x *bp)
4053{
4054 struct dmae_command *dmae;
4055 u32 opcode;
4056 int loader_idx = PMF_DMAE_C(bp);
4057 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4058
bb2a0f7a 4059 bp->executer_idx = 0;
a2fbb9ea 4060
bb2a0f7a
YG
4061 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4062 DMAE_CMD_C_ENABLE |
4063 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4064#ifdef __BIG_ENDIAN
bb2a0f7a 4065 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4066#else
bb2a0f7a 4067 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4068#endif
bb2a0f7a
YG
4069 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4070 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4071
4072 if (bp->port.port_stx) {
4073
4074 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4075 if (bp->func_stx)
4076 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4077 else
4078 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4079 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4080 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4081 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4082 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4083 dmae->len = sizeof(struct host_port_stats) >> 2;
4084 if (bp->func_stx) {
4085 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4086 dmae->comp_addr_hi = 0;
4087 dmae->comp_val = 1;
4088 } else {
4089 dmae->comp_addr_lo =
4090 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4091 dmae->comp_addr_hi =
4092 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4093 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4094
bb2a0f7a
YG
4095 *stats_comp = 0;
4096 }
a2fbb9ea
ET
4097 }
4098
bb2a0f7a
YG
4099 if (bp->func_stx) {
4100
4101 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4102 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4103 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4104 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4105 dmae->dst_addr_lo = bp->func_stx >> 2;
4106 dmae->dst_addr_hi = 0;
4107 dmae->len = sizeof(struct host_func_stats) >> 2;
4108 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4109 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4110 dmae->comp_val = DMAE_COMP_VAL;
4111
4112 *stats_comp = 0;
a2fbb9ea 4113 }
bb2a0f7a
YG
4114}
4115
4116static void bnx2x_stats_stop(struct bnx2x *bp)
4117{
4118 int update = 0;
4119
4120 bnx2x_stats_comp(bp);
4121
4122 if (bp->port.pmf)
4123 update = (bnx2x_hw_stats_update(bp) == 0);
4124
4125 update |= (bnx2x_storm_stats_update(bp) == 0);
4126
4127 if (update) {
4128 bnx2x_net_stats_update(bp);
a2fbb9ea 4129
bb2a0f7a
YG
4130 if (bp->port.pmf)
4131 bnx2x_port_stats_stop(bp);
4132
4133 bnx2x_hw_stats_post(bp);
4134 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4135 }
4136}
4137
bb2a0f7a
YG
4138static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4139{
4140}
4141
4142static const struct {
4143 void (*action)(struct bnx2x *bp);
4144 enum bnx2x_stats_state next_state;
4145} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4146/* state event */
4147{
4148/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4149/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4150/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4151/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4152},
4153{
4154/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4155/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4156/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4157/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4158}
4159};
4160
4161static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4162{
4163 enum bnx2x_stats_state state = bp->stats_state;
4164
4165 bnx2x_stats_stm[state][event].action(bp);
4166 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4167
4168 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4169 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4170 state, event, bp->stats_state);
4171}
4172
a2fbb9ea
ET
4173static void bnx2x_timer(unsigned long data)
4174{
4175 struct bnx2x *bp = (struct bnx2x *) data;
4176
4177 if (!netif_running(bp->dev))
4178 return;
4179
4180 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4181 goto timer_restart;
a2fbb9ea
ET
4182
4183 if (poll) {
4184 struct bnx2x_fastpath *fp = &bp->fp[0];
4185 int rc;
4186
7961f791 4187 bnx2x_tx_int(fp);
a2fbb9ea
ET
4188 rc = bnx2x_rx_int(fp, 1000);
4189 }
4190
34f80b04
EG
4191 if (!BP_NOMCP(bp)) {
4192 int func = BP_FUNC(bp);
a2fbb9ea
ET
4193 u32 drv_pulse;
4194 u32 mcp_pulse;
4195
4196 ++bp->fw_drv_pulse_wr_seq;
4197 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4198 /* TBD - add SYSTEM_TIME */
4199 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4200 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4201
34f80b04 4202 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4203 MCP_PULSE_SEQ_MASK);
4204 /* The delta between driver pulse and mcp response
4205 * should be 1 (before mcp response) or 0 (after mcp response)
4206 */
4207 if ((drv_pulse != mcp_pulse) &&
4208 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4209 /* someone lost a heartbeat... */
4210 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4211 drv_pulse, mcp_pulse);
4212 }
4213 }
4214
bb2a0f7a
YG
4215 if ((bp->state == BNX2X_STATE_OPEN) ||
4216 (bp->state == BNX2X_STATE_DISABLED))
4217 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4218
f1410647 4219timer_restart:
a2fbb9ea
ET
4220 mod_timer(&bp->timer, jiffies + bp->current_interval);
4221}
4222
4223/* end of Statistics */
4224
4225/* nic init */
4226
4227/*
4228 * nic init service functions
4229 */
4230
34f80b04 4231static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4232{
34f80b04
EG
4233 int port = BP_PORT(bp);
4234
490c3c9b 4235 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04 4236 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4237 sizeof(struct ustorm_status_block)/4);
490c3c9b 4238 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04 4239 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4240 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4241}
4242
5c862848
EG
4243static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4244 dma_addr_t mapping, int sb_id)
34f80b04
EG
4245{
4246 int port = BP_PORT(bp);
bb2a0f7a 4247 int func = BP_FUNC(bp);
a2fbb9ea 4248 int index;
34f80b04 4249 u64 section;
a2fbb9ea
ET
4250
4251 /* USTORM */
4252 section = ((u64)mapping) + offsetof(struct host_status_block,
4253 u_status_block);
34f80b04 4254 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4255
4256 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4257 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4258 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4259 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4260 U64_HI(section));
bb2a0f7a
YG
4261 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4262 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4263
4264 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4265 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4266 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4267
4268 /* CSTORM */
4269 section = ((u64)mapping) + offsetof(struct host_status_block,
4270 c_status_block);
34f80b04 4271 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4272
4273 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4274 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4275 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4276 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4277 U64_HI(section));
7a9b2557
VZ
4278 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4279 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4280
4281 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4282 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4283 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4284
4285 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4286}
4287
4288static void bnx2x_zero_def_sb(struct bnx2x *bp)
4289{
4290 int func = BP_FUNC(bp);
a2fbb9ea 4291
490c3c9b
EG
4292 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4293 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4294 sizeof(struct tstorm_def_status_block)/4);
4295 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04
EG
4296 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4297 sizeof(struct ustorm_def_status_block)/4);
490c3c9b 4298 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04
EG
4299 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4300 sizeof(struct cstorm_def_status_block)/4);
490c3c9b 4301 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
34f80b04
EG
4302 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4303 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4304}
4305
4306static void bnx2x_init_def_sb(struct bnx2x *bp,
4307 struct host_def_status_block *def_sb,
34f80b04 4308 dma_addr_t mapping, int sb_id)
a2fbb9ea 4309{
34f80b04
EG
4310 int port = BP_PORT(bp);
4311 int func = BP_FUNC(bp);
a2fbb9ea
ET
4312 int index, val, reg_offset;
4313 u64 section;
4314
4315 /* ATTN */
4316 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4317 atten_status_block);
34f80b04 4318 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4319
49d66772
ET
4320 bp->attn_state = 0;
4321
a2fbb9ea
ET
4322 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4323 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4324
34f80b04 4325 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4326 bp->attn_group[index].sig[0] = REG_RD(bp,
4327 reg_offset + 0x10*index);
4328 bp->attn_group[index].sig[1] = REG_RD(bp,
4329 reg_offset + 0x4 + 0x10*index);
4330 bp->attn_group[index].sig[2] = REG_RD(bp,
4331 reg_offset + 0x8 + 0x10*index);
4332 bp->attn_group[index].sig[3] = REG_RD(bp,
4333 reg_offset + 0xc + 0x10*index);
4334 }
4335
a2fbb9ea
ET
4336 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4337 HC_REG_ATTN_MSG0_ADDR_L);
4338
4339 REG_WR(bp, reg_offset, U64_LO(section));
4340 REG_WR(bp, reg_offset + 4, U64_HI(section));
4341
4342 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4343
4344 val = REG_RD(bp, reg_offset);
34f80b04 4345 val |= sb_id;
a2fbb9ea
ET
4346 REG_WR(bp, reg_offset, val);
4347
4348 /* USTORM */
4349 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4350 u_def_status_block);
34f80b04 4351 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4352
4353 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4354 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4355 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4356 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4357 U64_HI(section));
5c862848 4358 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4359 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4360
4361 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4362 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4363 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4364
4365 /* CSTORM */
4366 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4367 c_def_status_block);
34f80b04 4368 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4369
4370 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4371 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4372 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4373 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4374 U64_HI(section));
5c862848 4375 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4376 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4377
4378 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4379 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4380 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4381
4382 /* TSTORM */
4383 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4384 t_def_status_block);
34f80b04 4385 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4386
4387 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4388 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4389 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4390 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4391 U64_HI(section));
5c862848 4392 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4393 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4394
4395 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4396 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4397 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4398
4399 /* XSTORM */
4400 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4401 x_def_status_block);
34f80b04 4402 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4403
4404 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4405 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4406 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4407 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4408 U64_HI(section));
5c862848 4409 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4410 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4411
4412 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4413 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4414 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4415
bb2a0f7a 4416 bp->stats_pending = 0;
66e855f3 4417 bp->set_mac_pending = 0;
bb2a0f7a 4418
34f80b04 4419 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4420}
4421
4422static void bnx2x_update_coalesce(struct bnx2x *bp)
4423{
34f80b04 4424 int port = BP_PORT(bp);
a2fbb9ea
ET
4425 int i;
4426
4427 for_each_queue(bp, i) {
34f80b04 4428 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4429
4430 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4431 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4432 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4433 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4434 bp->rx_ticks/12);
a2fbb9ea 4435 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4436 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4437 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4438 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4439
4440 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4441 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4442 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4443 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4444 bp->tx_ticks/12);
a2fbb9ea 4445 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4446 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4447 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4448 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4449 }
4450}
4451
7a9b2557
VZ
4452static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4453 struct bnx2x_fastpath *fp, int last)
4454{
4455 int i;
4456
4457 for (i = 0; i < last; i++) {
4458 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4459 struct sk_buff *skb = rx_buf->skb;
4460
4461 if (skb == NULL) {
4462 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4463 continue;
4464 }
4465
4466 if (fp->tpa_state[i] == BNX2X_TPA_START)
4467 pci_unmap_single(bp->pdev,
4468 pci_unmap_addr(rx_buf, mapping),
356e2385 4469 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4470
4471 dev_kfree_skb(skb);
4472 rx_buf->skb = NULL;
4473 }
4474}
4475
a2fbb9ea
ET
4476static void bnx2x_init_rx_rings(struct bnx2x *bp)
4477{
7a9b2557 4478 int func = BP_FUNC(bp);
32626230
EG
4479 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4480 ETH_MAX_AGGREGATION_QUEUES_E1H;
4481 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4482 int i, j;
a2fbb9ea 4483
87942b46 4484 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4485 DP(NETIF_MSG_IFUP,
4486 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4487
7a9b2557 4488 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4489
555f6c78 4490 for_each_rx_queue(bp, j) {
32626230 4491 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4492
32626230 4493 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4494 fp->tpa_pool[i].skb =
4495 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4496 if (!fp->tpa_pool[i].skb) {
4497 BNX2X_ERR("Failed to allocate TPA "
4498 "skb pool for queue[%d] - "
4499 "disabling TPA on this "
4500 "queue!\n", j);
4501 bnx2x_free_tpa_pool(bp, fp, i);
4502 fp->disable_tpa = 1;
4503 break;
4504 }
4505 pci_unmap_addr_set((struct sw_rx_bd *)
4506 &bp->fp->tpa_pool[i],
4507 mapping, 0);
4508 fp->tpa_state[i] = BNX2X_TPA_STOP;
4509 }
4510 }
4511 }
4512
555f6c78 4513 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4514 struct bnx2x_fastpath *fp = &bp->fp[j];
4515
4516 fp->rx_bd_cons = 0;
4517 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4518 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4519
4520 /* "next page" elements initialization */
4521 /* SGE ring */
4522 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4523 struct eth_rx_sge *sge;
4524
4525 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4526 sge->addr_hi =
4527 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4528 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4529 sge->addr_lo =
4530 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4531 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4532 }
4533
4534 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4535
7a9b2557 4536 /* RX BD ring */
a2fbb9ea
ET
4537 for (i = 1; i <= NUM_RX_RINGS; i++) {
4538 struct eth_rx_bd *rx_bd;
4539
4540 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4541 rx_bd->addr_hi =
4542 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4543 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4544 rx_bd->addr_lo =
4545 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4546 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4547 }
4548
34f80b04 4549 /* CQ ring */
a2fbb9ea
ET
4550 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4551 struct eth_rx_cqe_next_page *nextpg;
4552
4553 nextpg = (struct eth_rx_cqe_next_page *)
4554 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4555 nextpg->addr_hi =
4556 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4557 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4558 nextpg->addr_lo =
4559 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4560 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4561 }
4562
7a9b2557
VZ
4563 /* Allocate SGEs and initialize the ring elements */
4564 for (i = 0, ring_prod = 0;
4565 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4566
7a9b2557
VZ
4567 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4568 BNX2X_ERR("was only able to allocate "
4569 "%d rx sges\n", i);
4570 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4571 /* Cleanup already allocated elements */
4572 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4573 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4574 fp->disable_tpa = 1;
4575 ring_prod = 0;
4576 break;
4577 }
4578 ring_prod = NEXT_SGE_IDX(ring_prod);
4579 }
4580 fp->rx_sge_prod = ring_prod;
4581
4582 /* Allocate BDs and initialize BD ring */
66e855f3 4583 fp->rx_comp_cons = 0;
7a9b2557 4584 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4585 for (i = 0; i < bp->rx_ring_size; i++) {
4586 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4587 BNX2X_ERR("was only able to allocate "
de832a55
EG
4588 "%d rx skbs on queue[%d]\n", i, j);
4589 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4590 break;
4591 }
4592 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4593 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4594 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4595 }
4596
7a9b2557
VZ
4597 fp->rx_bd_prod = ring_prod;
4598 /* must not have more available CQEs than BDs */
4599 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4600 cqe_ring_prod);
a2fbb9ea
ET
4601 fp->rx_pkt = fp->rx_calls = 0;
4602
7a9b2557
VZ
4603 /* Warning!
4604 * this will generate an interrupt (to the TSTORM)
4605 * must only be done after chip is initialized
4606 */
4607 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4608 fp->rx_sge_prod);
a2fbb9ea
ET
4609 if (j != 0)
4610 continue;
4611
4612 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4613 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4614 U64_LO(fp->rx_comp_mapping));
4615 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4616 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4617 U64_HI(fp->rx_comp_mapping));
4618 }
4619}
4620
4621static void bnx2x_init_tx_ring(struct bnx2x *bp)
4622{
4623 int i, j;
4624
555f6c78 4625 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4626 struct bnx2x_fastpath *fp = &bp->fp[j];
4627
4628 for (i = 1; i <= NUM_TX_RINGS; i++) {
4629 struct eth_tx_bd *tx_bd =
4630 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4631
4632 tx_bd->addr_hi =
4633 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4634 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4635 tx_bd->addr_lo =
4636 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4637 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4638 }
4639
4640 fp->tx_pkt_prod = 0;
4641 fp->tx_pkt_cons = 0;
4642 fp->tx_bd_prod = 0;
4643 fp->tx_bd_cons = 0;
4644 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4645 fp->tx_pkt = 0;
4646 }
4647}
4648
4649static void bnx2x_init_sp_ring(struct bnx2x *bp)
4650{
34f80b04 4651 int func = BP_FUNC(bp);
a2fbb9ea
ET
4652
4653 spin_lock_init(&bp->spq_lock);
4654
4655 bp->spq_left = MAX_SPQ_PENDING;
4656 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4657 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4658 bp->spq_prod_bd = bp->spq;
4659 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4660
34f80b04 4661 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4662 U64_LO(bp->spq_mapping));
34f80b04
EG
4663 REG_WR(bp,
4664 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4665 U64_HI(bp->spq_mapping));
4666
34f80b04 4667 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4668 bp->spq_prod_idx);
4669}
4670
4671static void bnx2x_init_context(struct bnx2x *bp)
4672{
4673 int i;
4674
4675 for_each_queue(bp, i) {
4676 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4677 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4678 u8 cl_id = fp->cl_id;
0626b899 4679 u8 sb_id = fp->sb_id;
a2fbb9ea 4680
34f80b04
EG
4681 context->ustorm_st_context.common.sb_index_numbers =
4682 BNX2X_RX_SB_INDEX_NUM;
0626b899 4683 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4684 context->ustorm_st_context.common.status_block_id = sb_id;
4685 context->ustorm_st_context.common.flags =
de832a55
EG
4686 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4687 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4688 context->ustorm_st_context.common.statistics_counter_id =
4689 cl_id;
8d9c5f34 4690 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4691 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4692 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4693 bp->rx_buf_size;
34f80b04 4694 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4695 U64_HI(fp->rx_desc_mapping);
34f80b04 4696 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4697 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4698 if (!fp->disable_tpa) {
4699 context->ustorm_st_context.common.flags |=
4700 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4701 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4702 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4703 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4704 (u32)0xffff);
7a9b2557
VZ
4705 context->ustorm_st_context.common.sge_page_base_hi =
4706 U64_HI(fp->rx_sge_mapping);
4707 context->ustorm_st_context.common.sge_page_base_lo =
4708 U64_LO(fp->rx_sge_mapping);
4709 }
4710
8d9c5f34
EG
4711 context->ustorm_ag_context.cdu_usage =
4712 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4713 CDU_REGION_NUMBER_UCM_AG,
4714 ETH_CONNECTION_TYPE);
4715
4716 context->xstorm_st_context.tx_bd_page_base_hi =
4717 U64_HI(fp->tx_desc_mapping);
4718 context->xstorm_st_context.tx_bd_page_base_lo =
4719 U64_LO(fp->tx_desc_mapping);
4720 context->xstorm_st_context.db_data_addr_hi =
4721 U64_HI(fp->tx_prods_mapping);
4722 context->xstorm_st_context.db_data_addr_lo =
4723 U64_LO(fp->tx_prods_mapping);
0626b899 4724 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4725 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4726 context->cstorm_st_context.sb_index_number =
5c862848 4727 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4728 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4729
4730 context->xstorm_ag_context.cdu_reserved =
4731 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4732 CDU_REGION_NUMBER_XCM_AG,
4733 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4734 }
4735}
4736
4737static void bnx2x_init_ind_table(struct bnx2x *bp)
4738{
26c8fa4d 4739 int func = BP_FUNC(bp);
a2fbb9ea
ET
4740 int i;
4741
555f6c78 4742 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4743 return;
4744
555f6c78
EG
4745 DP(NETIF_MSG_IFUP,
4746 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4747 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4748 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4749 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4750 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4751}
4752
49d66772
ET
4753static void bnx2x_set_client_config(struct bnx2x *bp)
4754{
49d66772 4755 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4756 int port = BP_PORT(bp);
4757 int i;
49d66772 4758
e7799c5f 4759 tstorm_client.mtu = bp->dev->mtu;
49d66772 4760 tstorm_client.config_flags =
de832a55
EG
4761 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4762 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4763#ifdef BCM_VLAN
0c6671b0 4764 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4765 tstorm_client.config_flags |=
8d9c5f34 4766 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4767 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4768 }
4769#endif
49d66772 4770
7a9b2557
VZ
4771 if (bp->flags & TPA_ENABLE_FLAG) {
4772 tstorm_client.max_sges_for_packet =
4f40f2cb 4773 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4774 tstorm_client.max_sges_for_packet =
4775 ((tstorm_client.max_sges_for_packet +
4776 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4777 PAGES_PER_SGE_SHIFT;
4778
4779 tstorm_client.config_flags |=
4780 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4781 }
4782
49d66772 4783 for_each_queue(bp, i) {
de832a55
EG
4784 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4785
49d66772 4786 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4787 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4788 ((u32 *)&tstorm_client)[0]);
4789 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4790 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4791 ((u32 *)&tstorm_client)[1]);
4792 }
4793
34f80b04
EG
4794 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4795 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4796}
4797
a2fbb9ea
ET
4798static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4799{
a2fbb9ea 4800 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4801 int mode = bp->rx_mode;
4802 int mask = (1 << BP_L_ID(bp));
4803 int func = BP_FUNC(bp);
a2fbb9ea
ET
4804 int i;
4805
3196a88a 4806 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4807
4808 switch (mode) {
4809 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4810 tstorm_mac_filter.ucast_drop_all = mask;
4811 tstorm_mac_filter.mcast_drop_all = mask;
4812 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4813 break;
356e2385 4814
a2fbb9ea 4815 case BNX2X_RX_MODE_NORMAL:
34f80b04 4816 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4817 break;
356e2385 4818
a2fbb9ea 4819 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4820 tstorm_mac_filter.mcast_accept_all = mask;
4821 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4822 break;
356e2385 4823
a2fbb9ea 4824 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4825 tstorm_mac_filter.ucast_accept_all = mask;
4826 tstorm_mac_filter.mcast_accept_all = mask;
4827 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4828 break;
356e2385 4829
a2fbb9ea 4830 default:
34f80b04
EG
4831 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4832 break;
a2fbb9ea
ET
4833 }
4834
4835 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4836 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4837 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4838 ((u32 *)&tstorm_mac_filter)[i]);
4839
34f80b04 4840/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4841 ((u32 *)&tstorm_mac_filter)[i]); */
4842 }
a2fbb9ea 4843
49d66772
ET
4844 if (mode != BNX2X_RX_MODE_NONE)
4845 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4846}
4847
471de716
EG
4848static void bnx2x_init_internal_common(struct bnx2x *bp)
4849{
4850 int i;
4851
3cdf1db7
YG
4852 if (bp->flags & TPA_ENABLE_FLAG) {
4853 struct tstorm_eth_tpa_exist tpa = {0};
4854
4855 tpa.tpa_exist = 1;
4856
4857 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4858 ((u32 *)&tpa)[0]);
4859 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4860 ((u32 *)&tpa)[1]);
4861 }
4862
471de716
EG
4863 /* Zero this manually as its initialization is
4864 currently missing in the initTool */
4865 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4866 REG_WR(bp, BAR_USTRORM_INTMEM +
4867 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4868}
4869
4870static void bnx2x_init_internal_port(struct bnx2x *bp)
4871{
4872 int port = BP_PORT(bp);
4873
4874 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4875 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4876 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4877 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4878}
4879
8a1c38d1
EG
4880/* Calculates the sum of vn_min_rates.
4881 It's needed for further normalizing of the min_rates.
4882 Returns:
4883 sum of vn_min_rates.
4884 or
4885 0 - if all the min_rates are 0.
4886 In the later case fainess algorithm should be deactivated.
4887 If not all min_rates are zero then those that are zeroes will be set to 1.
4888 */
4889static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4890{
4891 int all_zero = 1;
4892 int port = BP_PORT(bp);
4893 int vn;
4894
4895 bp->vn_weight_sum = 0;
4896 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4897 int func = 2*vn + port;
4898 u32 vn_cfg =
4899 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4900 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4901 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4902
4903 /* Skip hidden vns */
4904 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4905 continue;
4906
4907 /* If min rate is zero - set it to 1 */
4908 if (!vn_min_rate)
4909 vn_min_rate = DEF_MIN_RATE;
4910 else
4911 all_zero = 0;
4912
4913 bp->vn_weight_sum += vn_min_rate;
4914 }
4915
4916 /* ... only if all min rates are zeros - disable fairness */
4917 if (all_zero)
4918 bp->vn_weight_sum = 0;
4919}
4920
471de716 4921static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4922{
a2fbb9ea
ET
4923 struct tstorm_eth_function_common_config tstorm_config = {0};
4924 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4925 int port = BP_PORT(bp);
4926 int func = BP_FUNC(bp);
de832a55
EG
4927 int i, j;
4928 u32 offset;
471de716 4929 u16 max_agg_size;
a2fbb9ea
ET
4930
4931 if (is_multi(bp)) {
555f6c78 4932 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4933 tstorm_config.rss_result_mask = MULTI_MASK;
4934 }
8d9c5f34
EG
4935 if (IS_E1HMF(bp))
4936 tstorm_config.config_flags |=
4937 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4938
34f80b04
EG
4939 tstorm_config.leading_client_id = BP_L_ID(bp);
4940
a2fbb9ea 4941 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4942 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4943 (*(u32 *)&tstorm_config));
4944
c14423fe 4945 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4946 bnx2x_set_storm_rx_mode(bp);
4947
de832a55
EG
4948 for_each_queue(bp, i) {
4949 u8 cl_id = bp->fp[i].cl_id;
4950
4951 /* reset xstorm per client statistics */
4952 offset = BAR_XSTRORM_INTMEM +
4953 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4954 for (j = 0;
4955 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4956 REG_WR(bp, offset + j*4, 0);
4957
4958 /* reset tstorm per client statistics */
4959 offset = BAR_TSTRORM_INTMEM +
4960 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4961 for (j = 0;
4962 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4963 REG_WR(bp, offset + j*4, 0);
4964
4965 /* reset ustorm per client statistics */
4966 offset = BAR_USTRORM_INTMEM +
4967 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4968 for (j = 0;
4969 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4970 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4971 }
4972
4973 /* Init statistics related context */
34f80b04 4974 stats_flags.collect_eth = 1;
a2fbb9ea 4975
66e855f3 4976 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4977 ((u32 *)&stats_flags)[0]);
66e855f3 4978 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4979 ((u32 *)&stats_flags)[1]);
4980
66e855f3 4981 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4982 ((u32 *)&stats_flags)[0]);
66e855f3 4983 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4984 ((u32 *)&stats_flags)[1]);
4985
de832a55
EG
4986 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4987 ((u32 *)&stats_flags)[0]);
4988 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4989 ((u32 *)&stats_flags)[1]);
4990
66e855f3 4991 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4992 ((u32 *)&stats_flags)[0]);
66e855f3 4993 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4994 ((u32 *)&stats_flags)[1]);
4995
66e855f3
YG
4996 REG_WR(bp, BAR_XSTRORM_INTMEM +
4997 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4998 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4999 REG_WR(bp, BAR_XSTRORM_INTMEM +
5000 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5001 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5002
5003 REG_WR(bp, BAR_TSTRORM_INTMEM +
5004 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5005 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5006 REG_WR(bp, BAR_TSTRORM_INTMEM +
5007 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5008 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5009
de832a55
EG
5010 REG_WR(bp, BAR_USTRORM_INTMEM +
5011 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5012 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5013 REG_WR(bp, BAR_USTRORM_INTMEM +
5014 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5015 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5016
34f80b04
EG
5017 if (CHIP_IS_E1H(bp)) {
5018 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5019 IS_E1HMF(bp));
5020 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5021 IS_E1HMF(bp));
5022 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5023 IS_E1HMF(bp));
5024 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5025 IS_E1HMF(bp));
5026
7a9b2557
VZ
5027 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5028 bp->e1hov);
34f80b04
EG
5029 }
5030
4f40f2cb
EG
5031 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5032 max_agg_size =
5033 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5034 SGE_PAGE_SIZE * PAGES_PER_SGE),
5035 (u32)0xffff);
555f6c78 5036 for_each_rx_queue(bp, i) {
7a9b2557 5037 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5038
5039 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5040 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5041 U64_LO(fp->rx_comp_mapping));
5042 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5043 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5044 U64_HI(fp->rx_comp_mapping));
5045
7a9b2557 5046 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5047 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5048 max_agg_size);
5049 }
8a1c38d1 5050
1c06328c
EG
5051 /* dropless flow control */
5052 if (CHIP_IS_E1H(bp)) {
5053 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5054
5055 rx_pause.bd_thr_low = 250;
5056 rx_pause.cqe_thr_low = 250;
5057 rx_pause.cos = 1;
5058 rx_pause.sge_thr_low = 0;
5059 rx_pause.bd_thr_high = 350;
5060 rx_pause.cqe_thr_high = 350;
5061 rx_pause.sge_thr_high = 0;
5062
5063 for_each_rx_queue(bp, i) {
5064 struct bnx2x_fastpath *fp = &bp->fp[i];
5065
5066 if (!fp->disable_tpa) {
5067 rx_pause.sge_thr_low = 150;
5068 rx_pause.sge_thr_high = 250;
5069 }
5070
5071
5072 offset = BAR_USTRORM_INTMEM +
5073 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5074 fp->cl_id);
5075 for (j = 0;
5076 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5077 j++)
5078 REG_WR(bp, offset + j*4,
5079 ((u32 *)&rx_pause)[j]);
5080 }
5081 }
5082
8a1c38d1
EG
5083 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5084
5085 /* Init rate shaping and fairness contexts */
5086 if (IS_E1HMF(bp)) {
5087 int vn;
5088
5089 /* During init there is no active link
5090 Until link is up, set link rate to 10Gbps */
5091 bp->link_vars.line_speed = SPEED_10000;
5092 bnx2x_init_port_minmax(bp);
5093
5094 bnx2x_calc_vn_weight_sum(bp);
5095
5096 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5097 bnx2x_init_vn_minmax(bp, 2*vn + port);
5098
5099 /* Enable rate shaping and fairness */
5100 bp->cmng.flags.cmng_enables =
5101 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5102 if (bp->vn_weight_sum)
5103 bp->cmng.flags.cmng_enables |=
5104 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5105 else
5106 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5107 " fairness will be disabled\n");
5108 } else {
5109 /* rate shaping and fairness are disabled */
5110 DP(NETIF_MSG_IFUP,
5111 "single function mode minmax will be disabled\n");
5112 }
5113
5114
5115 /* Store it to internal memory */
5116 if (bp->port.pmf)
5117 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5118 REG_WR(bp, BAR_XSTRORM_INTMEM +
5119 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5120 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5121}
5122
471de716
EG
5123static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5124{
5125 switch (load_code) {
5126 case FW_MSG_CODE_DRV_LOAD_COMMON:
5127 bnx2x_init_internal_common(bp);
5128 /* no break */
5129
5130 case FW_MSG_CODE_DRV_LOAD_PORT:
5131 bnx2x_init_internal_port(bp);
5132 /* no break */
5133
5134 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5135 bnx2x_init_internal_func(bp);
5136 break;
5137
5138 default:
5139 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5140 break;
5141 }
5142}
5143
5144static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5145{
5146 int i;
5147
5148 for_each_queue(bp, i) {
5149 struct bnx2x_fastpath *fp = &bp->fp[i];
5150
34f80b04 5151 fp->bp = bp;
a2fbb9ea 5152 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5153 fp->index = i;
34f80b04
EG
5154 fp->cl_id = BP_L_ID(bp) + i;
5155 fp->sb_id = fp->cl_id;
5156 DP(NETIF_MSG_IFUP,
f5372251
EG
5157 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5158 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5159 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5160 fp->sb_id);
5c862848 5161 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5162 }
5163
16119785
EG
5164 /* ensure status block indices were read */
5165 rmb();
5166
5167
5c862848
EG
5168 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5169 DEF_SB_ID);
5170 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5171 bnx2x_update_coalesce(bp);
5172 bnx2x_init_rx_rings(bp);
5173 bnx2x_init_tx_ring(bp);
5174 bnx2x_init_sp_ring(bp);
5175 bnx2x_init_context(bp);
471de716 5176 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5177 bnx2x_init_ind_table(bp);
0ef00459
EG
5178 bnx2x_stats_init(bp);
5179
5180 /* At this point, we are ready for interrupts */
5181 atomic_set(&bp->intr_sem, 0);
5182
5183 /* flush all before enabling interrupts */
5184 mb();
5185 mmiowb();
5186
615f8fd9 5187 bnx2x_int_enable(bp);
a2fbb9ea
ET
5188}
5189
5190/* end of nic init */
5191
5192/*
5193 * gzip service functions
5194 */
5195
5196static int bnx2x_gunzip_init(struct bnx2x *bp)
5197{
5198 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5199 &bp->gunzip_mapping);
5200 if (bp->gunzip_buf == NULL)
5201 goto gunzip_nomem1;
5202
5203 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5204 if (bp->strm == NULL)
5205 goto gunzip_nomem2;
5206
5207 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5208 GFP_KERNEL);
5209 if (bp->strm->workspace == NULL)
5210 goto gunzip_nomem3;
5211
5212 return 0;
5213
5214gunzip_nomem3:
5215 kfree(bp->strm);
5216 bp->strm = NULL;
5217
5218gunzip_nomem2:
5219 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5220 bp->gunzip_mapping);
5221 bp->gunzip_buf = NULL;
5222
5223gunzip_nomem1:
5224 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5225 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5226 return -ENOMEM;
5227}
5228
5229static void bnx2x_gunzip_end(struct bnx2x *bp)
5230{
5231 kfree(bp->strm->workspace);
5232
5233 kfree(bp->strm);
5234 bp->strm = NULL;
5235
5236 if (bp->gunzip_buf) {
5237 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5238 bp->gunzip_mapping);
5239 bp->gunzip_buf = NULL;
5240 }
5241}
5242
94a78b79 5243static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5244{
5245 int n, rc;
5246
5247 /* check gzip header */
94a78b79
VZ
5248 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5249 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5250 return -EINVAL;
94a78b79 5251 }
a2fbb9ea
ET
5252
5253 n = 10;
5254
34f80b04 5255#define FNAME 0x8
a2fbb9ea
ET
5256
5257 if (zbuf[3] & FNAME)
5258 while ((zbuf[n++] != 0) && (n < len));
5259
94a78b79 5260 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5261 bp->strm->avail_in = len - n;
5262 bp->strm->next_out = bp->gunzip_buf;
5263 bp->strm->avail_out = FW_BUF_SIZE;
5264
5265 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5266 if (rc != Z_OK)
5267 return rc;
5268
5269 rc = zlib_inflate(bp->strm, Z_FINISH);
5270 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5271 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5272 bp->dev->name, bp->strm->msg);
5273
5274 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5275 if (bp->gunzip_outlen & 0x3)
5276 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5277 " gunzip_outlen (%d) not aligned\n",
5278 bp->dev->name, bp->gunzip_outlen);
5279 bp->gunzip_outlen >>= 2;
5280
5281 zlib_inflateEnd(bp->strm);
5282
5283 if (rc == Z_STREAM_END)
5284 return 0;
5285
5286 return rc;
5287}
5288
5289/* nic load/unload */
5290
5291/*
34f80b04 5292 * General service functions
a2fbb9ea
ET
5293 */
5294
5295/* send a NIG loopback debug packet */
5296static void bnx2x_lb_pckt(struct bnx2x *bp)
5297{
a2fbb9ea 5298 u32 wb_write[3];
a2fbb9ea
ET
5299
5300 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5301 wb_write[0] = 0x55555555;
5302 wb_write[1] = 0x55555555;
34f80b04 5303 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5304 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5305
5306 /* NON-IP protocol */
a2fbb9ea
ET
5307 wb_write[0] = 0x09000000;
5308 wb_write[1] = 0x55555555;
34f80b04 5309 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5310 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5311}
5312
5313/* some of the internal memories
5314 * are not directly readable from the driver
5315 * to test them we send debug packets
5316 */
5317static int bnx2x_int_mem_test(struct bnx2x *bp)
5318{
5319 int factor;
5320 int count, i;
5321 u32 val = 0;
5322
ad8d3948 5323 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5324 factor = 120;
ad8d3948
EG
5325 else if (CHIP_REV_IS_EMUL(bp))
5326 factor = 200;
5327 else
a2fbb9ea 5328 factor = 1;
a2fbb9ea
ET
5329
5330 DP(NETIF_MSG_HW, "start part1\n");
5331
5332 /* Disable inputs of parser neighbor blocks */
5333 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5334 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5335 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5336 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5337
5338 /* Write 0 to parser credits for CFC search request */
5339 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5340
5341 /* send Ethernet packet */
5342 bnx2x_lb_pckt(bp);
5343
5344 /* TODO do i reset NIG statistic? */
5345 /* Wait until NIG register shows 1 packet of size 0x10 */
5346 count = 1000 * factor;
5347 while (count) {
34f80b04 5348
a2fbb9ea
ET
5349 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5350 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5351 if (val == 0x10)
5352 break;
5353
5354 msleep(10);
5355 count--;
5356 }
5357 if (val != 0x10) {
5358 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5359 return -1;
5360 }
5361
5362 /* Wait until PRS register shows 1 packet */
5363 count = 1000 * factor;
5364 while (count) {
5365 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5366 if (val == 1)
5367 break;
5368
5369 msleep(10);
5370 count--;
5371 }
5372 if (val != 0x1) {
5373 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5374 return -2;
5375 }
5376
5377 /* Reset and init BRB, PRS */
34f80b04 5378 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5379 msleep(50);
34f80b04 5380 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5381 msleep(50);
94a78b79
VZ
5382 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5383 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5384
5385 DP(NETIF_MSG_HW, "part2\n");
5386
5387 /* Disable inputs of parser neighbor blocks */
5388 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5389 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5390 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5391 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5392
5393 /* Write 0 to parser credits for CFC search request */
5394 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5395
5396 /* send 10 Ethernet packets */
5397 for (i = 0; i < 10; i++)
5398 bnx2x_lb_pckt(bp);
5399
5400 /* Wait until NIG register shows 10 + 1
5401 packets of size 11*0x10 = 0xb0 */
5402 count = 1000 * factor;
5403 while (count) {
34f80b04 5404
a2fbb9ea
ET
5405 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5406 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5407 if (val == 0xb0)
5408 break;
5409
5410 msleep(10);
5411 count--;
5412 }
5413 if (val != 0xb0) {
5414 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5415 return -3;
5416 }
5417
5418 /* Wait until PRS register shows 2 packets */
5419 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5420 if (val != 2)
5421 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5422
5423 /* Write 1 to parser credits for CFC search request */
5424 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5425
5426 /* Wait until PRS register shows 3 packets */
5427 msleep(10 * factor);
5428 /* Wait until NIG register shows 1 packet of size 0x10 */
5429 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5430 if (val != 3)
5431 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5432
5433 /* clear NIG EOP FIFO */
5434 for (i = 0; i < 11; i++)
5435 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5436 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5437 if (val != 1) {
5438 BNX2X_ERR("clear of NIG failed\n");
5439 return -4;
5440 }
5441
5442 /* Reset and init BRB, PRS, NIG */
5443 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5444 msleep(50);
5445 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5446 msleep(50);
94a78b79
VZ
5447 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5448 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5449#ifndef BCM_ISCSI
5450 /* set NIC mode */
5451 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5452#endif
5453
5454 /* Enable inputs of parser neighbor blocks */
5455 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5456 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5457 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5458 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5459
5460 DP(NETIF_MSG_HW, "done\n");
5461
5462 return 0; /* OK */
5463}
5464
5465static void enable_blocks_attention(struct bnx2x *bp)
5466{
5467 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5468 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5469 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5470 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5471 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5472 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5473 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5474 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5475 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5476/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5477/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5478 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5479 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5480 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5481/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5482/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5483 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5484 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5485 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5486 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5487/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5488/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5489 if (CHIP_REV_IS_FPGA(bp))
5490 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5491 else
5492 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5493 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5494 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5495 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5496/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5497/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5498 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5499 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5500/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5501 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5502}
5503
34f80b04 5504
81f75bbf
EG
5505static void bnx2x_reset_common(struct bnx2x *bp)
5506{
5507 /* reset_common */
5508 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5509 0xd3ffff7f);
5510 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5511}
5512
34f80b04 5513static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5514{
a2fbb9ea 5515 u32 val, i;
a2fbb9ea 5516
34f80b04 5517 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5518
81f75bbf 5519 bnx2x_reset_common(bp);
34f80b04
EG
5520 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5521 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5522
94a78b79 5523 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5524 if (CHIP_IS_E1H(bp))
5525 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5526
34f80b04
EG
5527 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5528 msleep(30);
5529 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5530
94a78b79 5531 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5532 if (CHIP_IS_E1(bp)) {
5533 /* enable HW interrupt from PXP on USDM overflow
5534 bit 16 on INT_MASK_0 */
5535 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5536 }
a2fbb9ea 5537
94a78b79 5538 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5539 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5540
5541#ifdef __BIG_ENDIAN
34f80b04
EG
5542 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5543 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5544 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5545 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5546 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5547 /* make sure this value is 0 */
5548 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5549
5550/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5551 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5552 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5553 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5554 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5555#endif
5556
34f80b04 5557 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5558#ifdef BCM_ISCSI
34f80b04
EG
5559 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5560 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5561 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5562#endif
5563
34f80b04
EG
5564 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5565 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5566
34f80b04
EG
5567 /* let the HW do it's magic ... */
5568 msleep(100);
5569 /* finish PXP init */
5570 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5571 if (val != 1) {
5572 BNX2X_ERR("PXP2 CFG failed\n");
5573 return -EBUSY;
5574 }
5575 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5576 if (val != 1) {
5577 BNX2X_ERR("PXP2 RD_INIT failed\n");
5578 return -EBUSY;
5579 }
a2fbb9ea 5580
34f80b04
EG
5581 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5582 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5583
94a78b79 5584 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5585
34f80b04
EG
5586 /* clean the DMAE memory */
5587 bp->dmae_ready = 1;
5588 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5589
94a78b79
VZ
5590 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5591 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5592 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5593 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5594
34f80b04
EG
5595 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5596 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5597 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5598 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5599
94a78b79 5600 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
5601 /* soft reset pulse */
5602 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5603 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5604
5605#ifdef BCM_ISCSI
94a78b79 5606 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5607#endif
a2fbb9ea 5608
94a78b79 5609 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
5610 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5611 if (!CHIP_REV_IS_SLOW(bp)) {
5612 /* enable hw interrupt from doorbell Q */
5613 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5614 }
a2fbb9ea 5615
94a78b79
VZ
5616 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5617 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5618 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5619 /* set NIC mode */
5620 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5621 if (CHIP_IS_E1H(bp))
5622 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5623
94a78b79
VZ
5624 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5625 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5626 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5627 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5628
490c3c9b
EG
5629 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5630 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5631 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5632 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5633
94a78b79
VZ
5634 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5635 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5636 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5637 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5638
34f80b04
EG
5639 /* sync semi rtc */
5640 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5641 0x80000000);
5642 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5643 0x80000000);
a2fbb9ea 5644
94a78b79
VZ
5645 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5646 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5647 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5648
34f80b04
EG
5649 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5650 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5651 REG_WR(bp, i, 0xc0cac01a);
5652 /* TODO: replace with something meaningful */
5653 }
94a78b79 5654 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 5655 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5656
34f80b04
EG
5657 if (sizeof(union cdu_context) != 1024)
5658 /* we currently assume that a context is 1024 bytes */
5659 printk(KERN_ALERT PFX "please adjust the size of"
5660 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5661
94a78b79 5662 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5663 val = (4 << 24) + (0 << 12) + 1024;
5664 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5665 if (CHIP_IS_E1(bp)) {
5666 /* !!! fix pxp client crdit until excel update */
5667 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5668 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5669 }
a2fbb9ea 5670
94a78b79 5671 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5672 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5673 /* enable context validation interrupt from CFC */
5674 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5675
5676 /* set the thresholds to prevent CFC/CDU race */
5677 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5678
94a78b79
VZ
5679 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5680 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5681
34f80b04 5682 /* PXPCS COMMON comes here */
94a78b79 5683 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5684 /* Reset PCIE errors for debug */
5685 REG_WR(bp, 0x2814, 0xffffffff);
5686 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5687
34f80b04 5688 /* EMAC0 COMMON comes here */
94a78b79 5689 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
34f80b04 5690 /* EMAC1 COMMON comes here */
94a78b79 5691 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
34f80b04 5692 /* DBU COMMON comes here */
94a78b79 5693 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
34f80b04 5694 /* DBG COMMON comes here */
94a78b79 5695 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5696
94a78b79 5697 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
5698 if (CHIP_IS_E1H(bp)) {
5699 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5700 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5701 }
5702
5703 if (CHIP_REV_IS_SLOW(bp))
5704 msleep(200);
5705
5706 /* finish CFC init */
5707 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5708 if (val != 1) {
5709 BNX2X_ERR("CFC LL_INIT failed\n");
5710 return -EBUSY;
5711 }
5712 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5713 if (val != 1) {
5714 BNX2X_ERR("CFC AC_INIT failed\n");
5715 return -EBUSY;
5716 }
5717 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5718 if (val != 1) {
5719 BNX2X_ERR("CFC CAM_INIT failed\n");
5720 return -EBUSY;
5721 }
5722 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5723
34f80b04
EG
5724 /* read NIG statistic
5725 to see if this is our first up since powerup */
5726 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5727 val = *bnx2x_sp(bp, wb_data[0]);
5728
5729 /* do internal memory self test */
5730 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5731 BNX2X_ERR("internal mem self test failed\n");
5732 return -EBUSY;
5733 }
5734
35b19ba5 5735 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5736 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5739 bp->port.need_hw_lock = 1;
5740 break;
5741
35b19ba5 5742 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5743 /* Fan failure is indicated by SPIO 5 */
5744 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5745 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5746
5747 /* set to active low mode */
5748 val = REG_RD(bp, MISC_REG_SPIO_INT);
5749 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5750 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5751 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5752
34f80b04
EG
5753 /* enable interrupt to signal the IGU */
5754 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5755 val |= (1 << MISC_REGISTERS_SPIO_5);
5756 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5757 break;
f1410647 5758
34f80b04
EG
5759 default:
5760 break;
5761 }
f1410647 5762
34f80b04
EG
5763 /* clear PXP2 attentions */
5764 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5765
34f80b04 5766 enable_blocks_attention(bp);
a2fbb9ea 5767
6bbca910
YR
5768 if (!BP_NOMCP(bp)) {
5769 bnx2x_acquire_phy_lock(bp);
5770 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5771 bnx2x_release_phy_lock(bp);
5772 } else
5773 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5774
34f80b04
EG
5775 return 0;
5776}
a2fbb9ea 5777
34f80b04
EG
5778static int bnx2x_init_port(struct bnx2x *bp)
5779{
5780 int port = BP_PORT(bp);
94a78b79 5781 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5782 u32 low, high;
34f80b04 5783 u32 val;
a2fbb9ea 5784
34f80b04
EG
5785 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5786
5787 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5788
5789 /* Port PXP comes here */
94a78b79 5790 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
a2fbb9ea 5791 /* Port PXP2 comes here */
94a78b79 5792 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
a2fbb9ea
ET
5793#ifdef BCM_ISCSI
5794 /* Port0 1
5795 * Port1 385 */
5796 i++;
5797 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5798 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5799 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5800 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5801
5802 /* Port0 2
5803 * Port1 386 */
5804 i++;
5805 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5806 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5807 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5808 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5809
5810 /* Port0 3
5811 * Port1 387 */
5812 i++;
5813 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5814 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5815 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5816 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5817#endif
34f80b04 5818 /* Port CMs come here */
94a78b79 5819 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea
ET
5820
5821 /* Port QM comes here */
a2fbb9ea
ET
5822#ifdef BCM_ISCSI
5823 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5824 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5825
94a78b79 5826 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea
ET
5827#endif
5828 /* Port DQ comes here */
94a78b79 5829 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5830
94a78b79 5831 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
5832 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5833 /* no pause for emulation and FPGA */
5834 low = 0;
5835 high = 513;
5836 } else {
5837 if (IS_E1HMF(bp))
5838 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5839 else if (bp->dev->mtu > 4096) {
5840 if (bp->flags & ONE_PORT_FLAG)
5841 low = 160;
5842 else {
5843 val = bp->dev->mtu;
5844 /* (24*1024 + val*4)/256 */
5845 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5846 }
5847 } else
5848 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5849 high = low + 56; /* 14*1024/256 */
5850 }
5851 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5852 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5853
5854
ad8d3948 5855 /* Port PRS comes here */
94a78b79 5856 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
a2fbb9ea 5857 /* Port TSDM comes here */
94a78b79 5858 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
a2fbb9ea 5859 /* Port CSDM comes here */
94a78b79 5860 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
a2fbb9ea 5861 /* Port USDM comes here */
94a78b79 5862 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
a2fbb9ea 5863 /* Port XSDM comes here */
94a78b79 5864 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5865
94a78b79
VZ
5866 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5867 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5868 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5869 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 5870
a2fbb9ea 5871 /* Port UPB comes here */
94a78b79 5872 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
34f80b04 5873 /* Port XPB comes here */
94a78b79 5874 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5875
94a78b79 5876 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
5877
5878 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5879 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5880
5881 /* update threshold */
34f80b04 5882 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5883 /* update init credit */
34f80b04 5884 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5885
5886 /* probe changes */
34f80b04 5887 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5888 msleep(5);
34f80b04 5889 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5890
5891#ifdef BCM_ISCSI
5892 /* tell the searcher where the T2 table is */
5893 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5894
5895 wb_write[0] = U64_LO(bp->t2_mapping);
5896 wb_write[1] = U64_HI(bp->t2_mapping);
5897 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5898 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5899 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5900 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5901
5902 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5903 /* Port SRCH comes here */
5904#endif
5905 /* Port CDU comes here */
94a78b79 5906 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
a2fbb9ea 5907 /* Port CFC comes here */
94a78b79 5908 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5909
5910 if (CHIP_IS_E1(bp)) {
5911 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5912 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5913 }
94a78b79 5914 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5915
94a78b79 5916 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5917 /* init aeu_mask_attn_func_0/1:
5918 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5919 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5920 * bits 4-7 are used for "per vn group attention" */
5921 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5922 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5923
a2fbb9ea 5924 /* Port PXPCS comes here */
94a78b79 5925 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
a2fbb9ea 5926 /* Port EMAC0 comes here */
94a78b79 5927 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
a2fbb9ea 5928 /* Port EMAC1 comes here */
94a78b79 5929 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
a2fbb9ea 5930 /* Port DBU comes here */
94a78b79 5931 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
a2fbb9ea 5932 /* Port DBG comes here */
94a78b79 5933 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5934
94a78b79 5935 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5936
5937 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5938
5939 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5940 /* 0x2 disable e1hov, 0x1 enable */
5941 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5942 (IS_E1HMF(bp) ? 0x1 : 0x2));
5943
1c06328c
EG
5944 /* support pause requests from USDM, TSDM and BRB */
5945 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5946
5947 {
5948 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5949 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5950 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5951 }
34f80b04
EG
5952 }
5953
a2fbb9ea 5954 /* Port MCP comes here */
94a78b79 5955 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
a2fbb9ea 5956 /* Port DMAE comes here */
94a78b79 5957 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 5958
35b19ba5 5959 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5961 {
5962 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5963
5964 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5965 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5966
5967 /* The GPIO should be swapped if the swap register is
5968 set and active */
5969 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5970 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5971
5972 /* Select function upon port-swap configuration */
5973 if (port == 0) {
5974 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5975 aeu_gpio_mask = (swap_val && swap_override) ?
5976 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5977 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5978 } else {
5979 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5980 aeu_gpio_mask = (swap_val && swap_override) ?
5981 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5982 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5983 }
5984 val = REG_RD(bp, offset);
5985 /* add GPIO3 to group */
5986 val |= aeu_gpio_mask;
5987 REG_WR(bp, offset, val);
5988 }
5989 break;
5990
35b19ba5 5991 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5992 /* add SPIO 5 to group 0 */
5993 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5994 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5995 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5996 break;
5997
5998 default:
5999 break;
6000 }
6001
c18487ee 6002 bnx2x__link_reset(bp);
a2fbb9ea 6003
34f80b04
EG
6004 return 0;
6005}
6006
6007#define ILT_PER_FUNC (768/2)
6008#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6009/* the phys address is shifted right 12 bits and has an added
6010 1=valid bit added to the 53rd bit
6011 then since this is a wide register(TM)
6012 we split it into two 32 bit writes
6013 */
6014#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6015#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6016#define PXP_ONE_ILT(x) (((x) << 10) | x)
6017#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6018
6019#define CNIC_ILT_LINES 0
6020
6021static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6022{
6023 int reg;
6024
6025 if (CHIP_IS_E1H(bp))
6026 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6027 else /* E1 */
6028 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6029
6030 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6031}
6032
6033static int bnx2x_init_func(struct bnx2x *bp)
6034{
6035 int port = BP_PORT(bp);
6036 int func = BP_FUNC(bp);
8badd27a 6037 u32 addr, val;
34f80b04
EG
6038 int i;
6039
6040 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6041
8badd27a
EG
6042 /* set MSI reconfigure capability */
6043 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6044 val = REG_RD(bp, addr);
6045 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6046 REG_WR(bp, addr, val);
6047
34f80b04
EG
6048 i = FUNC_ILT_BASE(func);
6049
6050 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6051 if (CHIP_IS_E1H(bp)) {
6052 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6053 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6054 } else /* E1 */
6055 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6056 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6057
6058
6059 if (CHIP_IS_E1H(bp)) {
6060 for (i = 0; i < 9; i++)
6061 bnx2x_init_block(bp,
94a78b79 6062 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6063
6064 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6065 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6066 }
6067
6068 /* HC init per function */
6069 if (CHIP_IS_E1H(bp)) {
6070 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6071
6072 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6073 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6074 }
94a78b79 6075 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6076
c14423fe 6077 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6078 REG_WR(bp, 0x2114, 0xffffffff);
6079 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6080
34f80b04
EG
6081 return 0;
6082}
6083
6084static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6085{
6086 int i, rc = 0;
a2fbb9ea 6087
34f80b04
EG
6088 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6089 BP_FUNC(bp), load_code);
a2fbb9ea 6090
34f80b04
EG
6091 bp->dmae_ready = 0;
6092 mutex_init(&bp->dmae_mutex);
6093 bnx2x_gunzip_init(bp);
a2fbb9ea 6094
34f80b04
EG
6095 switch (load_code) {
6096 case FW_MSG_CODE_DRV_LOAD_COMMON:
6097 rc = bnx2x_init_common(bp);
6098 if (rc)
6099 goto init_hw_err;
6100 /* no break */
6101
6102 case FW_MSG_CODE_DRV_LOAD_PORT:
6103 bp->dmae_ready = 1;
6104 rc = bnx2x_init_port(bp);
6105 if (rc)
6106 goto init_hw_err;
6107 /* no break */
6108
6109 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6110 bp->dmae_ready = 1;
6111 rc = bnx2x_init_func(bp);
6112 if (rc)
6113 goto init_hw_err;
6114 break;
6115
6116 default:
6117 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6118 break;
6119 }
6120
6121 if (!BP_NOMCP(bp)) {
6122 int func = BP_FUNC(bp);
a2fbb9ea
ET
6123
6124 bp->fw_drv_pulse_wr_seq =
34f80b04 6125 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6126 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6127 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6128 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6129 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6130 } else
6131 bp->func_stx = 0;
a2fbb9ea 6132
34f80b04
EG
6133 /* this needs to be done before gunzip end */
6134 bnx2x_zero_def_sb(bp);
6135 for_each_queue(bp, i)
6136 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6137
6138init_hw_err:
6139 bnx2x_gunzip_end(bp);
6140
6141 return rc;
a2fbb9ea
ET
6142}
6143
c14423fe 6144/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6145static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6146{
34f80b04 6147 int func = BP_FUNC(bp);
f1410647
ET
6148 u32 seq = ++bp->fw_seq;
6149 u32 rc = 0;
19680c48
EG
6150 u32 cnt = 1;
6151 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6152
34f80b04 6153 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6154 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6155
19680c48
EG
6156 do {
6157 /* let the FW do it's magic ... */
6158 msleep(delay);
a2fbb9ea 6159
19680c48 6160 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6161
19680c48
EG
6162 /* Give the FW up to 2 second (200*10ms) */
6163 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6164
6165 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6166 cnt*delay, rc, seq);
a2fbb9ea
ET
6167
6168 /* is this a reply to our command? */
6169 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6170 rc &= FW_MSG_CODE_MASK;
f1410647 6171
a2fbb9ea
ET
6172 } else {
6173 /* FW BUG! */
6174 BNX2X_ERR("FW failed to respond!\n");
6175 bnx2x_fw_dump(bp);
6176 rc = 0;
6177 }
f1410647 6178
a2fbb9ea
ET
6179 return rc;
6180}
6181
6182static void bnx2x_free_mem(struct bnx2x *bp)
6183{
6184
6185#define BNX2X_PCI_FREE(x, y, size) \
6186 do { \
6187 if (x) { \
6188 pci_free_consistent(bp->pdev, size, x, y); \
6189 x = NULL; \
6190 y = 0; \
6191 } \
6192 } while (0)
6193
6194#define BNX2X_FREE(x) \
6195 do { \
6196 if (x) { \
6197 vfree(x); \
6198 x = NULL; \
6199 } \
6200 } while (0)
6201
6202 int i;
6203
6204 /* fastpath */
555f6c78 6205 /* Common */
a2fbb9ea
ET
6206 for_each_queue(bp, i) {
6207
555f6c78 6208 /* status blocks */
a2fbb9ea
ET
6209 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6210 bnx2x_fp(bp, i, status_blk_mapping),
6211 sizeof(struct host_status_block) +
6212 sizeof(struct eth_tx_db_data));
555f6c78
EG
6213 }
6214 /* Rx */
6215 for_each_rx_queue(bp, i) {
a2fbb9ea 6216
555f6c78 6217 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6218 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6219 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6220 bnx2x_fp(bp, i, rx_desc_mapping),
6221 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6222
6223 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6224 bnx2x_fp(bp, i, rx_comp_mapping),
6225 sizeof(struct eth_fast_path_rx_cqe) *
6226 NUM_RCQ_BD);
a2fbb9ea 6227
7a9b2557 6228 /* SGE ring */
32626230 6229 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6230 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6231 bnx2x_fp(bp, i, rx_sge_mapping),
6232 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6233 }
555f6c78
EG
6234 /* Tx */
6235 for_each_tx_queue(bp, i) {
6236
6237 /* fastpath tx rings: tx_buf tx_desc */
6238 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6239 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6240 bnx2x_fp(bp, i, tx_desc_mapping),
6241 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6242 }
a2fbb9ea
ET
6243 /* end of fastpath */
6244
6245 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6246 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6247
6248 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6249 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6250
6251#ifdef BCM_ISCSI
6252 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6253 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6254 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6255 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6256#endif
7a9b2557 6257 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6258
6259#undef BNX2X_PCI_FREE
6260#undef BNX2X_KFREE
6261}
6262
6263static int bnx2x_alloc_mem(struct bnx2x *bp)
6264{
6265
6266#define BNX2X_PCI_ALLOC(x, y, size) \
6267 do { \
6268 x = pci_alloc_consistent(bp->pdev, size, y); \
6269 if (x == NULL) \
6270 goto alloc_mem_err; \
6271 memset(x, 0, size); \
6272 } while (0)
6273
6274#define BNX2X_ALLOC(x, size) \
6275 do { \
6276 x = vmalloc(size); \
6277 if (x == NULL) \
6278 goto alloc_mem_err; \
6279 memset(x, 0, size); \
6280 } while (0)
6281
6282 int i;
6283
6284 /* fastpath */
555f6c78 6285 /* Common */
a2fbb9ea
ET
6286 for_each_queue(bp, i) {
6287 bnx2x_fp(bp, i, bp) = bp;
6288
555f6c78 6289 /* status blocks */
a2fbb9ea
ET
6290 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6291 &bnx2x_fp(bp, i, status_blk_mapping),
6292 sizeof(struct host_status_block) +
6293 sizeof(struct eth_tx_db_data));
555f6c78
EG
6294 }
6295 /* Rx */
6296 for_each_rx_queue(bp, i) {
a2fbb9ea 6297
555f6c78 6298 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6299 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6300 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6301 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6302 &bnx2x_fp(bp, i, rx_desc_mapping),
6303 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6304
6305 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6306 &bnx2x_fp(bp, i, rx_comp_mapping),
6307 sizeof(struct eth_fast_path_rx_cqe) *
6308 NUM_RCQ_BD);
6309
7a9b2557
VZ
6310 /* SGE ring */
6311 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6312 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6313 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6314 &bnx2x_fp(bp, i, rx_sge_mapping),
6315 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6316 }
555f6c78
EG
6317 /* Tx */
6318 for_each_tx_queue(bp, i) {
6319
6320 bnx2x_fp(bp, i, hw_tx_prods) =
6321 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6322
6323 bnx2x_fp(bp, i, tx_prods_mapping) =
6324 bnx2x_fp(bp, i, status_blk_mapping) +
6325 sizeof(struct host_status_block);
6326
6327 /* fastpath tx rings: tx_buf tx_desc */
6328 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6329 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6330 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6331 &bnx2x_fp(bp, i, tx_desc_mapping),
6332 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6333 }
a2fbb9ea
ET
6334 /* end of fastpath */
6335
6336 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6337 sizeof(struct host_def_status_block));
6338
6339 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6340 sizeof(struct bnx2x_slowpath));
6341
6342#ifdef BCM_ISCSI
6343 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6344
6345 /* Initialize T1 */
6346 for (i = 0; i < 64*1024; i += 64) {
6347 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6348 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6349 }
6350
6351 /* allocate searcher T2 table
6352 we allocate 1/4 of alloc num for T2
6353 (which is not entered into the ILT) */
6354 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6355
6356 /* Initialize T2 */
6357 for (i = 0; i < 16*1024; i += 64)
6358 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6359
c14423fe 6360 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6361 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6362
6363 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6364 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6365
6366 /* QM queues (128*MAX_CONN) */
6367 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6368#endif
6369
6370 /* Slow path ring */
6371 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6372
6373 return 0;
6374
6375alloc_mem_err:
6376 bnx2x_free_mem(bp);
6377 return -ENOMEM;
6378
6379#undef BNX2X_PCI_ALLOC
6380#undef BNX2X_ALLOC
6381}
6382
6383static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6384{
6385 int i;
6386
555f6c78 6387 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6388 struct bnx2x_fastpath *fp = &bp->fp[i];
6389
6390 u16 bd_cons = fp->tx_bd_cons;
6391 u16 sw_prod = fp->tx_pkt_prod;
6392 u16 sw_cons = fp->tx_pkt_cons;
6393
a2fbb9ea
ET
6394 while (sw_cons != sw_prod) {
6395 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6396 sw_cons++;
6397 }
6398 }
6399}
6400
6401static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6402{
6403 int i, j;
6404
555f6c78 6405 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6406 struct bnx2x_fastpath *fp = &bp->fp[j];
6407
a2fbb9ea
ET
6408 for (i = 0; i < NUM_RX_BD; i++) {
6409 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6410 struct sk_buff *skb = rx_buf->skb;
6411
6412 if (skb == NULL)
6413 continue;
6414
6415 pci_unmap_single(bp->pdev,
6416 pci_unmap_addr(rx_buf, mapping),
356e2385 6417 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6418
6419 rx_buf->skb = NULL;
6420 dev_kfree_skb(skb);
6421 }
7a9b2557 6422 if (!fp->disable_tpa)
32626230
EG
6423 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6424 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6425 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6426 }
6427}
6428
6429static void bnx2x_free_skbs(struct bnx2x *bp)
6430{
6431 bnx2x_free_tx_skbs(bp);
6432 bnx2x_free_rx_skbs(bp);
6433}
6434
6435static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6436{
34f80b04 6437 int i, offset = 1;
a2fbb9ea
ET
6438
6439 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6440 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6441 bp->msix_table[0].vector);
6442
6443 for_each_queue(bp, i) {
c14423fe 6444 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6445 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6446 bnx2x_fp(bp, i, state));
6447
34f80b04 6448 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6449 }
a2fbb9ea
ET
6450}
6451
6452static void bnx2x_free_irq(struct bnx2x *bp)
6453{
a2fbb9ea 6454 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6455 bnx2x_free_msix_irqs(bp);
6456 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6457 bp->flags &= ~USING_MSIX_FLAG;
6458
8badd27a
EG
6459 } else if (bp->flags & USING_MSI_FLAG) {
6460 free_irq(bp->pdev->irq, bp->dev);
6461 pci_disable_msi(bp->pdev);
6462 bp->flags &= ~USING_MSI_FLAG;
6463
a2fbb9ea
ET
6464 } else
6465 free_irq(bp->pdev->irq, bp->dev);
6466}
6467
6468static int bnx2x_enable_msix(struct bnx2x *bp)
6469{
8badd27a
EG
6470 int i, rc, offset = 1;
6471 int igu_vec = 0;
a2fbb9ea 6472
8badd27a
EG
6473 bp->msix_table[0].entry = igu_vec;
6474 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6475
34f80b04 6476 for_each_queue(bp, i) {
8badd27a 6477 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6478 bp->msix_table[i + offset].entry = igu_vec;
6479 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6480 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6481 }
6482
34f80b04 6483 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6484 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6485 if (rc) {
8badd27a
EG
6486 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6487 return rc;
34f80b04 6488 }
8badd27a 6489
a2fbb9ea
ET
6490 bp->flags |= USING_MSIX_FLAG;
6491
6492 return 0;
a2fbb9ea
ET
6493}
6494
a2fbb9ea
ET
6495static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6496{
34f80b04 6497 int i, rc, offset = 1;
a2fbb9ea 6498
a2fbb9ea
ET
6499 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6500 bp->dev->name, bp->dev);
a2fbb9ea
ET
6501 if (rc) {
6502 BNX2X_ERR("request sp irq failed\n");
6503 return -EBUSY;
6504 }
6505
6506 for_each_queue(bp, i) {
555f6c78
EG
6507 struct bnx2x_fastpath *fp = &bp->fp[i];
6508
6509 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6510 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6511 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6512 if (rc) {
555f6c78 6513 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6514 bnx2x_free_msix_irqs(bp);
6515 return -EBUSY;
6516 }
6517
555f6c78 6518 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6519 }
6520
555f6c78
EG
6521 i = BNX2X_NUM_QUEUES(bp);
6522 if (is_multi(bp))
6523 printk(KERN_INFO PFX
6524 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6525 bp->dev->name, bp->msix_table[0].vector,
6526 bp->msix_table[offset].vector,
6527 bp->msix_table[offset + i - 1].vector);
6528 else
6529 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6530 bp->dev->name, bp->msix_table[0].vector,
6531 bp->msix_table[offset + i - 1].vector);
6532
a2fbb9ea 6533 return 0;
a2fbb9ea
ET
6534}
6535
8badd27a
EG
6536static int bnx2x_enable_msi(struct bnx2x *bp)
6537{
6538 int rc;
6539
6540 rc = pci_enable_msi(bp->pdev);
6541 if (rc) {
6542 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6543 return -1;
6544 }
6545 bp->flags |= USING_MSI_FLAG;
6546
6547 return 0;
6548}
6549
a2fbb9ea
ET
6550static int bnx2x_req_irq(struct bnx2x *bp)
6551{
8badd27a 6552 unsigned long flags;
34f80b04 6553 int rc;
a2fbb9ea 6554
8badd27a
EG
6555 if (bp->flags & USING_MSI_FLAG)
6556 flags = 0;
6557 else
6558 flags = IRQF_SHARED;
6559
6560 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6561 bp->dev->name, bp->dev);
a2fbb9ea
ET
6562 if (!rc)
6563 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6564
6565 return rc;
a2fbb9ea
ET
6566}
6567
65abd74d
YG
6568static void bnx2x_napi_enable(struct bnx2x *bp)
6569{
6570 int i;
6571
555f6c78 6572 for_each_rx_queue(bp, i)
65abd74d
YG
6573 napi_enable(&bnx2x_fp(bp, i, napi));
6574}
6575
6576static void bnx2x_napi_disable(struct bnx2x *bp)
6577{
6578 int i;
6579
555f6c78 6580 for_each_rx_queue(bp, i)
65abd74d
YG
6581 napi_disable(&bnx2x_fp(bp, i, napi));
6582}
6583
6584static void bnx2x_netif_start(struct bnx2x *bp)
6585{
6586 if (atomic_dec_and_test(&bp->intr_sem)) {
6587 if (netif_running(bp->dev)) {
65abd74d
YG
6588 bnx2x_napi_enable(bp);
6589 bnx2x_int_enable(bp);
555f6c78
EG
6590 if (bp->state == BNX2X_STATE_OPEN)
6591 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6592 }
6593 }
6594}
6595
f8ef6e44 6596static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6597{
f8ef6e44 6598 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6599 bnx2x_napi_disable(bp);
762d5f6c
EG
6600 netif_tx_disable(bp->dev);
6601 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6602}
6603
a2fbb9ea
ET
6604/*
6605 * Init service functions
6606 */
6607
3101c2bc 6608static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6609{
6610 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6611 int port = BP_PORT(bp);
a2fbb9ea
ET
6612
6613 /* CAM allocation
6614 * unicasts 0-31:port0 32-63:port1
6615 * multicast 64-127:port0 128-191:port1
6616 */
8d9c5f34 6617 config->hdr.length = 2;
af246401 6618 config->hdr.offset = port ? 32 : 0;
0626b899 6619 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6620 config->hdr.reserved1 = 0;
6621
6622 /* primary MAC */
6623 config->config_table[0].cam_entry.msb_mac_addr =
6624 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6625 config->config_table[0].cam_entry.middle_mac_addr =
6626 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6627 config->config_table[0].cam_entry.lsb_mac_addr =
6628 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6629 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6630 if (set)
6631 config->config_table[0].target_table_entry.flags = 0;
6632 else
6633 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6634 config->config_table[0].target_table_entry.client_id = 0;
6635 config->config_table[0].target_table_entry.vlan_id = 0;
6636
3101c2bc
YG
6637 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6638 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6639 config->config_table[0].cam_entry.msb_mac_addr,
6640 config->config_table[0].cam_entry.middle_mac_addr,
6641 config->config_table[0].cam_entry.lsb_mac_addr);
6642
6643 /* broadcast */
4781bfad
EG
6644 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6645 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6646 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6647 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6648 if (set)
6649 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6650 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6651 else
6652 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6653 config->config_table[1].target_table_entry.client_id = 0;
6654 config->config_table[1].target_table_entry.vlan_id = 0;
6655
6656 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6657 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6658 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6659}
6660
3101c2bc 6661static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6662{
6663 struct mac_configuration_cmd_e1h *config =
6664 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6665
3101c2bc 6666 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6667 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6668 return;
6669 }
6670
6671 /* CAM allocation for E1H
6672 * unicasts: by func number
6673 * multicast: 20+FUNC*20, 20 each
6674 */
8d9c5f34 6675 config->hdr.length = 1;
34f80b04 6676 config->hdr.offset = BP_FUNC(bp);
0626b899 6677 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6678 config->hdr.reserved1 = 0;
6679
6680 /* primary MAC */
6681 config->config_table[0].msb_mac_addr =
6682 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6683 config->config_table[0].middle_mac_addr =
6684 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6685 config->config_table[0].lsb_mac_addr =
6686 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6687 config->config_table[0].client_id = BP_L_ID(bp);
6688 config->config_table[0].vlan_id = 0;
6689 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6690 if (set)
6691 config->config_table[0].flags = BP_PORT(bp);
6692 else
6693 config->config_table[0].flags =
6694 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6695
3101c2bc
YG
6696 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6697 (set ? "setting" : "clearing"),
34f80b04
EG
6698 config->config_table[0].msb_mac_addr,
6699 config->config_table[0].middle_mac_addr,
6700 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6701
6702 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6703 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6704 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6705}
6706
a2fbb9ea
ET
6707static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6708 int *state_p, int poll)
6709{
6710 /* can take a while if any port is running */
8b3a0f0b 6711 int cnt = 5000;
a2fbb9ea 6712
c14423fe
ET
6713 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6714 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6715
6716 might_sleep();
34f80b04 6717 while (cnt--) {
a2fbb9ea
ET
6718 if (poll) {
6719 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6720 /* if index is different from 0
6721 * the reply for some commands will
3101c2bc 6722 * be on the non default queue
a2fbb9ea
ET
6723 */
6724 if (idx)
6725 bnx2x_rx_int(&bp->fp[idx], 10);
6726 }
a2fbb9ea 6727
3101c2bc 6728 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6729 if (*state_p == state) {
6730#ifdef BNX2X_STOP_ON_ERROR
6731 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6732#endif
a2fbb9ea 6733 return 0;
8b3a0f0b 6734 }
a2fbb9ea 6735
a2fbb9ea 6736 msleep(1);
a2fbb9ea
ET
6737 }
6738
a2fbb9ea 6739 /* timeout! */
49d66772
ET
6740 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6741 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6742#ifdef BNX2X_STOP_ON_ERROR
6743 bnx2x_panic();
6744#endif
a2fbb9ea 6745
49d66772 6746 return -EBUSY;
a2fbb9ea
ET
6747}
6748
6749static int bnx2x_setup_leading(struct bnx2x *bp)
6750{
34f80b04 6751 int rc;
a2fbb9ea 6752
c14423fe 6753 /* reset IGU state */
34f80b04 6754 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6755
6756 /* SETUP ramrod */
6757 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6758
34f80b04
EG
6759 /* Wait for completion */
6760 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6761
34f80b04 6762 return rc;
a2fbb9ea
ET
6763}
6764
6765static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6766{
555f6c78
EG
6767 struct bnx2x_fastpath *fp = &bp->fp[index];
6768
a2fbb9ea 6769 /* reset IGU state */
555f6c78 6770 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6771
228241eb 6772 /* SETUP ramrod */
555f6c78
EG
6773 fp->state = BNX2X_FP_STATE_OPENING;
6774 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6775 fp->cl_id, 0);
a2fbb9ea
ET
6776
6777 /* Wait for completion */
6778 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6779 &(fp->state), 0);
a2fbb9ea
ET
6780}
6781
a2fbb9ea 6782static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6783
8badd27a 6784static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6785{
555f6c78 6786 int num_queues;
a2fbb9ea 6787
8badd27a
EG
6788 switch (int_mode) {
6789 case INT_MODE_INTx:
6790 case INT_MODE_MSI:
555f6c78
EG
6791 num_queues = 1;
6792 bp->num_rx_queues = num_queues;
6793 bp->num_tx_queues = num_queues;
6794 DP(NETIF_MSG_IFUP,
6795 "set number of queues to %d\n", num_queues);
8badd27a
EG
6796 break;
6797
6798 case INT_MODE_MSIX:
6799 default:
555f6c78
EG
6800 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6801 num_queues = min_t(u32, num_online_cpus(),
6802 BNX2X_MAX_QUEUES(bp));
34f80b04 6803 else
555f6c78
EG
6804 num_queues = 1;
6805 bp->num_rx_queues = num_queues;
6806 bp->num_tx_queues = num_queues;
6807 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6808 " number of tx queues to %d\n",
6809 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6810 /* if we can't use MSI-X we only need one fp,
6811 * so try to enable MSI-X with the requested number of fp's
6812 * and fallback to MSI or legacy INTx with one fp
6813 */
8badd27a 6814 if (bnx2x_enable_msix(bp)) {
34f80b04 6815 /* failed to enable MSI-X */
555f6c78
EG
6816 num_queues = 1;
6817 bp->num_rx_queues = num_queues;
6818 bp->num_tx_queues = num_queues;
6819 if (bp->multi_mode)
6820 BNX2X_ERR("Multi requested but failed to "
6821 "enable MSI-X set number of "
6822 "queues to %d\n", num_queues);
a2fbb9ea 6823 }
8badd27a 6824 break;
a2fbb9ea 6825 }
555f6c78 6826 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6827}
6828
6829static void bnx2x_set_rx_mode(struct net_device *dev);
6830
6831/* must be called with rtnl_lock */
6832static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6833{
6834 u32 load_code;
6835 int i, rc = 0;
6836#ifdef BNX2X_STOP_ON_ERROR
6837 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6838 if (unlikely(bp->panic))
6839 return -EPERM;
6840#endif
6841
6842 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6843
6844 bnx2x_set_int_mode(bp);
c14423fe 6845
a2fbb9ea
ET
6846 if (bnx2x_alloc_mem(bp))
6847 return -ENOMEM;
6848
555f6c78 6849 for_each_rx_queue(bp, i)
7a9b2557
VZ
6850 bnx2x_fp(bp, i, disable_tpa) =
6851 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6852
555f6c78 6853 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6854 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6855 bnx2x_poll, 128);
6856
6857#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6858 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6859 struct bnx2x_fastpath *fp = &bp->fp[i];
6860
6861 fp->poll_no_work = 0;
6862 fp->poll_calls = 0;
6863 fp->poll_max_calls = 0;
6864 fp->poll_complete = 0;
6865 fp->poll_exit = 0;
6866 }
6867#endif
6868 bnx2x_napi_enable(bp);
6869
34f80b04
EG
6870 if (bp->flags & USING_MSIX_FLAG) {
6871 rc = bnx2x_req_msix_irqs(bp);
6872 if (rc) {
6873 pci_disable_msix(bp->pdev);
2dfe0e1f 6874 goto load_error1;
34f80b04
EG
6875 }
6876 } else {
8badd27a
EG
6877 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6878 bnx2x_enable_msi(bp);
34f80b04
EG
6879 bnx2x_ack_int(bp);
6880 rc = bnx2x_req_irq(bp);
6881 if (rc) {
2dfe0e1f 6882 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6883 if (bp->flags & USING_MSI_FLAG)
6884 pci_disable_msi(bp->pdev);
2dfe0e1f 6885 goto load_error1;
a2fbb9ea 6886 }
8badd27a
EG
6887 if (bp->flags & USING_MSI_FLAG) {
6888 bp->dev->irq = bp->pdev->irq;
6889 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6890 bp->dev->name, bp->pdev->irq);
6891 }
a2fbb9ea
ET
6892 }
6893
2dfe0e1f
EG
6894 /* Send LOAD_REQUEST command to MCP
6895 Returns the type of LOAD command:
6896 if it is the first port to be initialized
6897 common blocks should be initialized, otherwise - not
6898 */
6899 if (!BP_NOMCP(bp)) {
6900 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6901 if (!load_code) {
6902 BNX2X_ERR("MCP response failure, aborting\n");
6903 rc = -EBUSY;
6904 goto load_error2;
6905 }
6906 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6907 rc = -EBUSY; /* other port in diagnostic mode */
6908 goto load_error2;
6909 }
6910
6911 } else {
6912 int port = BP_PORT(bp);
6913
f5372251 6914 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
6915 load_count[0], load_count[1], load_count[2]);
6916 load_count[0]++;
6917 load_count[1 + port]++;
f5372251 6918 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
6919 load_count[0], load_count[1], load_count[2]);
6920 if (load_count[0] == 1)
6921 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6922 else if (load_count[1 + port] == 1)
6923 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6924 else
6925 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6926 }
6927
6928 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6929 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6930 bp->port.pmf = 1;
6931 else
6932 bp->port.pmf = 0;
6933 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6934
a2fbb9ea 6935 /* Initialize HW */
34f80b04
EG
6936 rc = bnx2x_init_hw(bp, load_code);
6937 if (rc) {
a2fbb9ea 6938 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6939 goto load_error2;
a2fbb9ea
ET
6940 }
6941
a2fbb9ea 6942 /* Setup NIC internals and enable interrupts */
471de716 6943 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6944
6945 /* Send LOAD_DONE command to MCP */
34f80b04 6946 if (!BP_NOMCP(bp)) {
228241eb
ET
6947 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6948 if (!load_code) {
da5a662a 6949 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6950 rc = -EBUSY;
2dfe0e1f 6951 goto load_error3;
a2fbb9ea
ET
6952 }
6953 }
6954
6955 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6956
34f80b04
EG
6957 rc = bnx2x_setup_leading(bp);
6958 if (rc) {
da5a662a 6959 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6960 goto load_error3;
34f80b04 6961 }
a2fbb9ea 6962
34f80b04
EG
6963 if (CHIP_IS_E1H(bp))
6964 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 6965 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
6966 bp->state = BNX2X_STATE_DISABLED;
6967 }
a2fbb9ea 6968
34f80b04
EG
6969 if (bp->state == BNX2X_STATE_OPEN)
6970 for_each_nondefault_queue(bp, i) {
6971 rc = bnx2x_setup_multi(bp, i);
6972 if (rc)
2dfe0e1f 6973 goto load_error3;
34f80b04 6974 }
a2fbb9ea 6975
34f80b04 6976 if (CHIP_IS_E1(bp))
3101c2bc 6977 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6978 else
3101c2bc 6979 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6980
6981 if (bp->port.pmf)
b5bf9068 6982 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
6983
6984 /* Start fast path */
34f80b04
EG
6985 switch (load_mode) {
6986 case LOAD_NORMAL:
6987 /* Tx queue should be only reenabled */
555f6c78 6988 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6989 /* Initialize the receive filter. */
34f80b04
EG
6990 bnx2x_set_rx_mode(bp->dev);
6991 break;
6992
6993 case LOAD_OPEN:
555f6c78 6994 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6995 /* Initialize the receive filter. */
34f80b04 6996 bnx2x_set_rx_mode(bp->dev);
34f80b04 6997 break;
a2fbb9ea 6998
34f80b04 6999 case LOAD_DIAG:
2dfe0e1f 7000 /* Initialize the receive filter. */
a2fbb9ea 7001 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7002 bp->state = BNX2X_STATE_DIAG;
7003 break;
7004
7005 default:
7006 break;
a2fbb9ea
ET
7007 }
7008
34f80b04
EG
7009 if (!bp->port.pmf)
7010 bnx2x__link_status_update(bp);
7011
a2fbb9ea
ET
7012 /* start the timer */
7013 mod_timer(&bp->timer, jiffies + bp->current_interval);
7014
34f80b04 7015
a2fbb9ea
ET
7016 return 0;
7017
2dfe0e1f
EG
7018load_error3:
7019 bnx2x_int_disable_sync(bp, 1);
7020 if (!BP_NOMCP(bp)) {
7021 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7022 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7023 }
7024 bp->port.pmf = 0;
7a9b2557
VZ
7025 /* Free SKBs, SGEs, TPA pool and driver internals */
7026 bnx2x_free_skbs(bp);
555f6c78 7027 for_each_rx_queue(bp, i)
3196a88a 7028 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7029load_error2:
d1014634
YG
7030 /* Release IRQs */
7031 bnx2x_free_irq(bp);
2dfe0e1f
EG
7032load_error1:
7033 bnx2x_napi_disable(bp);
555f6c78 7034 for_each_rx_queue(bp, i)
7cde1c8b 7035 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7036 bnx2x_free_mem(bp);
7037
34f80b04 7038 return rc;
a2fbb9ea
ET
7039}
7040
7041static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7042{
555f6c78 7043 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7044 int rc;
7045
c14423fe 7046 /* halt the connection */
555f6c78
EG
7047 fp->state = BNX2X_FP_STATE_HALTING;
7048 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7049
34f80b04 7050 /* Wait for completion */
a2fbb9ea 7051 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7052 &(fp->state), 1);
c14423fe 7053 if (rc) /* timeout */
a2fbb9ea
ET
7054 return rc;
7055
7056 /* delete cfc entry */
7057 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7058
34f80b04
EG
7059 /* Wait for completion */
7060 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7061 &(fp->state), 1);
34f80b04 7062 return rc;
a2fbb9ea
ET
7063}
7064
da5a662a 7065static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7066{
4781bfad 7067 __le16 dsb_sp_prod_idx;
c14423fe 7068 /* if the other port is handling traffic,
a2fbb9ea 7069 this can take a lot of time */
34f80b04
EG
7070 int cnt = 500;
7071 int rc;
a2fbb9ea
ET
7072
7073 might_sleep();
7074
7075 /* Send HALT ramrod */
7076 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7077 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7078
34f80b04
EG
7079 /* Wait for completion */
7080 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7081 &(bp->fp[0].state), 1);
7082 if (rc) /* timeout */
da5a662a 7083 return rc;
a2fbb9ea 7084
49d66772 7085 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7086
228241eb 7087 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7088 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7089
49d66772 7090 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7091 we are going to reset the chip anyway
7092 so there is not much to do if this times out
7093 */
34f80b04 7094 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7095 if (!cnt) {
7096 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7097 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7098 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7099#ifdef BNX2X_STOP_ON_ERROR
7100 bnx2x_panic();
7101#endif
36e552ab 7102 rc = -EBUSY;
34f80b04
EG
7103 break;
7104 }
7105 cnt--;
da5a662a 7106 msleep(1);
5650d9d4 7107 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7108 }
7109 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7110 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7111
7112 return rc;
a2fbb9ea
ET
7113}
7114
34f80b04
EG
7115static void bnx2x_reset_func(struct bnx2x *bp)
7116{
7117 int port = BP_PORT(bp);
7118 int func = BP_FUNC(bp);
7119 int base, i;
7120
7121 /* Configure IGU */
7122 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7123 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7124
34f80b04
EG
7125 /* Clear ILT */
7126 base = FUNC_ILT_BASE(func);
7127 for (i = base; i < base + ILT_PER_FUNC; i++)
7128 bnx2x_ilt_wr(bp, i, 0);
7129}
7130
7131static void bnx2x_reset_port(struct bnx2x *bp)
7132{
7133 int port = BP_PORT(bp);
7134 u32 val;
7135
7136 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7137
7138 /* Do not rcv packets to BRB */
7139 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7140 /* Do not direct rcv packets that are not for MCP to the BRB */
7141 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7142 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7143
7144 /* Configure AEU */
7145 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7146
7147 msleep(100);
7148 /* Check for BRB port occupancy */
7149 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7150 if (val)
7151 DP(NETIF_MSG_IFDOWN,
33471629 7152 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7153
7154 /* TODO: Close Doorbell port? */
7155}
7156
34f80b04
EG
7157static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7158{
7159 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7160 BP_FUNC(bp), reset_code);
7161
7162 switch (reset_code) {
7163 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7164 bnx2x_reset_port(bp);
7165 bnx2x_reset_func(bp);
7166 bnx2x_reset_common(bp);
7167 break;
7168
7169 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7170 bnx2x_reset_port(bp);
7171 bnx2x_reset_func(bp);
7172 break;
7173
7174 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7175 bnx2x_reset_func(bp);
7176 break;
49d66772 7177
34f80b04
EG
7178 default:
7179 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7180 break;
7181 }
7182}
7183
33471629 7184/* must be called with rtnl_lock */
34f80b04 7185static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7186{
da5a662a 7187 int port = BP_PORT(bp);
a2fbb9ea 7188 u32 reset_code = 0;
da5a662a 7189 int i, cnt, rc;
a2fbb9ea
ET
7190
7191 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7192
228241eb
ET
7193 bp->rx_mode = BNX2X_RX_MODE_NONE;
7194 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7195
f8ef6e44 7196 bnx2x_netif_stop(bp, 1);
e94d8af3 7197
34f80b04
EG
7198 del_timer_sync(&bp->timer);
7199 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7200 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7201 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7202
70b9986c
EG
7203 /* Release IRQs */
7204 bnx2x_free_irq(bp);
7205
555f6c78
EG
7206 /* Wait until tx fastpath tasks complete */
7207 for_each_tx_queue(bp, i) {
228241eb
ET
7208 struct bnx2x_fastpath *fp = &bp->fp[i];
7209
34f80b04 7210 cnt = 1000;
e8b5fc51 7211 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7212
7961f791 7213 bnx2x_tx_int(fp);
34f80b04
EG
7214 if (!cnt) {
7215 BNX2X_ERR("timeout waiting for queue[%d]\n",
7216 i);
7217#ifdef BNX2X_STOP_ON_ERROR
7218 bnx2x_panic();
7219 return -EBUSY;
7220#else
7221 break;
7222#endif
7223 }
7224 cnt--;
da5a662a 7225 msleep(1);
34f80b04 7226 }
228241eb 7227 }
da5a662a
VZ
7228 /* Give HW time to discard old tx messages */
7229 msleep(1);
a2fbb9ea 7230
3101c2bc
YG
7231 if (CHIP_IS_E1(bp)) {
7232 struct mac_configuration_cmd *config =
7233 bnx2x_sp(bp, mcast_config);
7234
7235 bnx2x_set_mac_addr_e1(bp, 0);
7236
8d9c5f34 7237 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7238 CAM_INVALIDATE(config->config_table[i]);
7239
8d9c5f34 7240 config->hdr.length = i;
3101c2bc
YG
7241 if (CHIP_REV_IS_SLOW(bp))
7242 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7243 else
7244 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7245 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7246 config->hdr.reserved1 = 0;
7247
7248 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7249 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7250 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7251
7252 } else { /* E1H */
65abd74d
YG
7253 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7254
3101c2bc
YG
7255 bnx2x_set_mac_addr_e1h(bp, 0);
7256
7257 for (i = 0; i < MC_HASH_SIZE; i++)
7258 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7259 }
7260
65abd74d
YG
7261 if (unload_mode == UNLOAD_NORMAL)
7262 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7263
7264 else if (bp->flags & NO_WOL_FLAG) {
7265 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7266 if (CHIP_IS_E1H(bp))
7267 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7268
7269 } else if (bp->wol) {
7270 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7271 u8 *mac_addr = bp->dev->dev_addr;
7272 u32 val;
7273 /* The mac address is written to entries 1-4 to
7274 preserve entry 0 which is used by the PMF */
7275 u8 entry = (BP_E1HVN(bp) + 1)*8;
7276
7277 val = (mac_addr[0] << 8) | mac_addr[1];
7278 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7279
7280 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7281 (mac_addr[4] << 8) | mac_addr[5];
7282 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7283
7284 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7285
7286 } else
7287 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7288
34f80b04
EG
7289 /* Close multi and leading connections
7290 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7291 for_each_nondefault_queue(bp, i)
7292 if (bnx2x_stop_multi(bp, i))
228241eb 7293 goto unload_error;
a2fbb9ea 7294
da5a662a
VZ
7295 rc = bnx2x_stop_leading(bp);
7296 if (rc) {
34f80b04 7297 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7298#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7299 return -EBUSY;
da5a662a
VZ
7300#else
7301 goto unload_error;
34f80b04 7302#endif
228241eb
ET
7303 }
7304
7305unload_error:
34f80b04 7306 if (!BP_NOMCP(bp))
228241eb 7307 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7308 else {
f5372251 7309 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7310 load_count[0], load_count[1], load_count[2]);
7311 load_count[0]--;
da5a662a 7312 load_count[1 + port]--;
f5372251 7313 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7314 load_count[0], load_count[1], load_count[2]);
7315 if (load_count[0] == 0)
7316 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7317 else if (load_count[1 + port] == 0)
34f80b04
EG
7318 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7319 else
7320 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7321 }
a2fbb9ea 7322
34f80b04
EG
7323 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7324 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7325 bnx2x__link_reset(bp);
a2fbb9ea
ET
7326
7327 /* Reset the chip */
228241eb 7328 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7329
7330 /* Report UNLOAD_DONE to MCP */
34f80b04 7331 if (!BP_NOMCP(bp))
a2fbb9ea 7332 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7333
9a035440 7334 bp->port.pmf = 0;
a2fbb9ea 7335
7a9b2557 7336 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7337 bnx2x_free_skbs(bp);
555f6c78 7338 for_each_rx_queue(bp, i)
3196a88a 7339 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7340 for_each_rx_queue(bp, i)
7cde1c8b 7341 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7342 bnx2x_free_mem(bp);
7343
7344 bp->state = BNX2X_STATE_CLOSED;
228241eb 7345
a2fbb9ea
ET
7346 netif_carrier_off(bp->dev);
7347
7348 return 0;
7349}
7350
34f80b04
EG
7351static void bnx2x_reset_task(struct work_struct *work)
7352{
7353 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7354
7355#ifdef BNX2X_STOP_ON_ERROR
7356 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7357 " so reset not done to allow debug dump,\n"
ad361c98 7358 " you will need to reboot when done\n");
34f80b04
EG
7359 return;
7360#endif
7361
7362 rtnl_lock();
7363
7364 if (!netif_running(bp->dev))
7365 goto reset_task_exit;
7366
7367 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7368 bnx2x_nic_load(bp, LOAD_NORMAL);
7369
7370reset_task_exit:
7371 rtnl_unlock();
7372}
7373
a2fbb9ea
ET
7374/* end of nic load/unload */
7375
7376/* ethtool_ops */
7377
7378/*
7379 * Init service functions
7380 */
7381
f1ef27ef
EG
7382static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7383{
7384 switch (func) {
7385 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7386 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7387 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7388 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7389 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7390 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7391 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7392 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7393 default:
7394 BNX2X_ERR("Unsupported function index: %d\n", func);
7395 return (u32)(-1);
7396 }
7397}
7398
7399static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7400{
7401 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7402
7403 /* Flush all outstanding writes */
7404 mmiowb();
7405
7406 /* Pretend to be function 0 */
7407 REG_WR(bp, reg, 0);
7408 /* Flush the GRC transaction (in the chip) */
7409 new_val = REG_RD(bp, reg);
7410 if (new_val != 0) {
7411 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7412 new_val);
7413 BUG();
7414 }
7415
7416 /* From now we are in the "like-E1" mode */
7417 bnx2x_int_disable(bp);
7418
7419 /* Flush all outstanding writes */
7420 mmiowb();
7421
7422 /* Restore the original funtion settings */
7423 REG_WR(bp, reg, orig_func);
7424 new_val = REG_RD(bp, reg);
7425 if (new_val != orig_func) {
7426 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7427 orig_func, new_val);
7428 BUG();
7429 }
7430}
7431
7432static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7433{
7434 if (CHIP_IS_E1H(bp))
7435 bnx2x_undi_int_disable_e1h(bp, func);
7436 else
7437 bnx2x_int_disable(bp);
7438}
7439
34f80b04
EG
7440static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7441{
7442 u32 val;
7443
7444 /* Check if there is any driver already loaded */
7445 val = REG_RD(bp, MISC_REG_UNPREPARED);
7446 if (val == 0x1) {
7447 /* Check if it is the UNDI driver
7448 * UNDI driver initializes CID offset for normal bell to 0x7
7449 */
4a37fb66 7450 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7451 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7452 if (val == 0x7) {
7453 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7454 /* save our func */
34f80b04 7455 int func = BP_FUNC(bp);
da5a662a
VZ
7456 u32 swap_en;
7457 u32 swap_val;
34f80b04 7458
b4661739
EG
7459 /* clear the UNDI indication */
7460 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7461
34f80b04
EG
7462 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7463
7464 /* try unload UNDI on port 0 */
7465 bp->func = 0;
da5a662a
VZ
7466 bp->fw_seq =
7467 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7468 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7469 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7470
7471 /* if UNDI is loaded on the other port */
7472 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7473
da5a662a
VZ
7474 /* send "DONE" for previous unload */
7475 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7476
7477 /* unload UNDI on port 1 */
34f80b04 7478 bp->func = 1;
da5a662a
VZ
7479 bp->fw_seq =
7480 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7481 DRV_MSG_SEQ_NUMBER_MASK);
7482 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7483
7484 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7485 }
7486
b4661739
EG
7487 /* now it's safe to release the lock */
7488 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7489
f1ef27ef 7490 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7491
7492 /* close input traffic and wait for it */
7493 /* Do not rcv packets to BRB */
7494 REG_WR(bp,
7495 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7496 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7497 /* Do not direct rcv packets that are not for MCP to
7498 * the BRB */
7499 REG_WR(bp,
7500 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7501 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7502 /* clear AEU */
7503 REG_WR(bp,
7504 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7505 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7506 msleep(10);
7507
7508 /* save NIG port swap info */
7509 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7510 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7511 /* reset device */
7512 REG_WR(bp,
7513 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7514 0xd3ffffff);
34f80b04
EG
7515 REG_WR(bp,
7516 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7517 0x1403);
da5a662a
VZ
7518 /* take the NIG out of reset and restore swap values */
7519 REG_WR(bp,
7520 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7521 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7522 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7523 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7524
7525 /* send unload done to the MCP */
7526 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7527
7528 /* restore our func and fw_seq */
7529 bp->func = func;
7530 bp->fw_seq =
7531 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7532 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7533
7534 } else
7535 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7536 }
7537}
7538
7539static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7540{
7541 u32 val, val2, val3, val4, id;
72ce58c3 7542 u16 pmc;
34f80b04
EG
7543
7544 /* Get the chip revision id and number. */
7545 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7546 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7547 id = ((val & 0xffff) << 16);
7548 val = REG_RD(bp, MISC_REG_CHIP_REV);
7549 id |= ((val & 0xf) << 12);
7550 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7551 id |= ((val & 0xff) << 4);
5a40e08e 7552 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7553 id |= (val & 0xf);
7554 bp->common.chip_id = id;
7555 bp->link_params.chip_id = bp->common.chip_id;
7556 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7557
1c06328c
EG
7558 val = (REG_RD(bp, 0x2874) & 0x55);
7559 if ((bp->common.chip_id & 0x1) ||
7560 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7561 bp->flags |= ONE_PORT_FLAG;
7562 BNX2X_DEV_INFO("single port device\n");
7563 }
7564
34f80b04
EG
7565 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7566 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7567 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7568 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7569 bp->common.flash_size, bp->common.flash_size);
7570
7571 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7572 bp->link_params.shmem_base = bp->common.shmem_base;
7573 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7574
7575 if (!bp->common.shmem_base ||
7576 (bp->common.shmem_base < 0xA0000) ||
7577 (bp->common.shmem_base >= 0xC0000)) {
7578 BNX2X_DEV_INFO("MCP not active\n");
7579 bp->flags |= NO_MCP_FLAG;
7580 return;
7581 }
7582
7583 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7584 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7585 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7586 BNX2X_ERR("BAD MCP validity signature\n");
7587
7588 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7589 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7590
7591 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7592 SHARED_HW_CFG_LED_MODE_MASK) >>
7593 SHARED_HW_CFG_LED_MODE_SHIFT);
7594
c2c8b03e
EG
7595 bp->link_params.feature_config_flags = 0;
7596 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7597 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7598 bp->link_params.feature_config_flags |=
7599 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7600 else
7601 bp->link_params.feature_config_flags &=
7602 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7603
34f80b04
EG
7604 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7605 bp->common.bc_ver = val;
7606 BNX2X_DEV_INFO("bc_ver %X\n", val);
7607 if (val < BNX2X_BC_VER) {
7608 /* for now only warn
7609 * later we might need to enforce this */
7610 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7611 " please upgrade BC\n", BNX2X_BC_VER, val);
7612 }
72ce58c3
EG
7613
7614 if (BP_E1HVN(bp) == 0) {
7615 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7616 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7617 } else {
7618 /* no WOL capability for E1HVN != 0 */
7619 bp->flags |= NO_WOL_FLAG;
7620 }
7621 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7622 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7623
7624 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7625 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7626 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7627 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7628
7629 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7630 val, val2, val3, val4);
7631}
7632
7633static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7634 u32 switch_cfg)
a2fbb9ea 7635{
34f80b04 7636 int port = BP_PORT(bp);
a2fbb9ea
ET
7637 u32 ext_phy_type;
7638
a2fbb9ea
ET
7639 switch (switch_cfg) {
7640 case SWITCH_CFG_1G:
7641 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7642
c18487ee
YR
7643 ext_phy_type =
7644 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7645 switch (ext_phy_type) {
7646 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7647 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7648 ext_phy_type);
7649
34f80b04
EG
7650 bp->port.supported |= (SUPPORTED_10baseT_Half |
7651 SUPPORTED_10baseT_Full |
7652 SUPPORTED_100baseT_Half |
7653 SUPPORTED_100baseT_Full |
7654 SUPPORTED_1000baseT_Full |
7655 SUPPORTED_2500baseX_Full |
7656 SUPPORTED_TP |
7657 SUPPORTED_FIBRE |
7658 SUPPORTED_Autoneg |
7659 SUPPORTED_Pause |
7660 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7661 break;
7662
7663 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7664 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7665 ext_phy_type);
7666
34f80b04
EG
7667 bp->port.supported |= (SUPPORTED_10baseT_Half |
7668 SUPPORTED_10baseT_Full |
7669 SUPPORTED_100baseT_Half |
7670 SUPPORTED_100baseT_Full |
7671 SUPPORTED_1000baseT_Full |
7672 SUPPORTED_TP |
7673 SUPPORTED_FIBRE |
7674 SUPPORTED_Autoneg |
7675 SUPPORTED_Pause |
7676 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7677 break;
7678
7679 default:
7680 BNX2X_ERR("NVRAM config error. "
7681 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7682 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7683 return;
7684 }
7685
34f80b04
EG
7686 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7687 port*0x10);
7688 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7689 break;
7690
7691 case SWITCH_CFG_10G:
7692 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7693
c18487ee
YR
7694 ext_phy_type =
7695 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7696 switch (ext_phy_type) {
7697 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7698 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7699 ext_phy_type);
7700
34f80b04
EG
7701 bp->port.supported |= (SUPPORTED_10baseT_Half |
7702 SUPPORTED_10baseT_Full |
7703 SUPPORTED_100baseT_Half |
7704 SUPPORTED_100baseT_Full |
7705 SUPPORTED_1000baseT_Full |
7706 SUPPORTED_2500baseX_Full |
7707 SUPPORTED_10000baseT_Full |
7708 SUPPORTED_TP |
7709 SUPPORTED_FIBRE |
7710 SUPPORTED_Autoneg |
7711 SUPPORTED_Pause |
7712 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7713 break;
7714
589abe3a
EG
7715 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7716 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7717 ext_phy_type);
f1410647 7718
34f80b04 7719 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7720 SUPPORTED_1000baseT_Full |
34f80b04 7721 SUPPORTED_FIBRE |
589abe3a 7722 SUPPORTED_Autoneg |
34f80b04
EG
7723 SUPPORTED_Pause |
7724 SUPPORTED_Asym_Pause);
f1410647
ET
7725 break;
7726
589abe3a
EG
7727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7728 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7729 ext_phy_type);
7730
34f80b04 7731 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7732 SUPPORTED_2500baseX_Full |
34f80b04 7733 SUPPORTED_1000baseT_Full |
589abe3a
EG
7734 SUPPORTED_FIBRE |
7735 SUPPORTED_Autoneg |
7736 SUPPORTED_Pause |
7737 SUPPORTED_Asym_Pause);
7738 break;
7739
7740 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7741 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7742 ext_phy_type);
7743
7744 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7745 SUPPORTED_FIBRE |
7746 SUPPORTED_Pause |
7747 SUPPORTED_Asym_Pause);
f1410647
ET
7748 break;
7749
589abe3a
EG
7750 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7751 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7752 ext_phy_type);
7753
34f80b04
EG
7754 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7755 SUPPORTED_1000baseT_Full |
7756 SUPPORTED_FIBRE |
34f80b04
EG
7757 SUPPORTED_Pause |
7758 SUPPORTED_Asym_Pause);
f1410647
ET
7759 break;
7760
589abe3a
EG
7761 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7762 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7763 ext_phy_type);
7764
34f80b04 7765 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7766 SUPPORTED_1000baseT_Full |
34f80b04 7767 SUPPORTED_Autoneg |
589abe3a 7768 SUPPORTED_FIBRE |
34f80b04
EG
7769 SUPPORTED_Pause |
7770 SUPPORTED_Asym_Pause);
c18487ee
YR
7771 break;
7772
f1410647
ET
7773 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7774 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7775 ext_phy_type);
7776
34f80b04
EG
7777 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7778 SUPPORTED_TP |
7779 SUPPORTED_Autoneg |
7780 SUPPORTED_Pause |
7781 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7782 break;
7783
28577185
EG
7784 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7785 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7786 ext_phy_type);
7787
7788 bp->port.supported |= (SUPPORTED_10baseT_Half |
7789 SUPPORTED_10baseT_Full |
7790 SUPPORTED_100baseT_Half |
7791 SUPPORTED_100baseT_Full |
7792 SUPPORTED_1000baseT_Full |
7793 SUPPORTED_10000baseT_Full |
7794 SUPPORTED_TP |
7795 SUPPORTED_Autoneg |
7796 SUPPORTED_Pause |
7797 SUPPORTED_Asym_Pause);
7798 break;
7799
c18487ee
YR
7800 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7801 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7802 bp->link_params.ext_phy_config);
7803 break;
7804
a2fbb9ea
ET
7805 default:
7806 BNX2X_ERR("NVRAM config error. "
7807 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7808 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7809 return;
7810 }
7811
34f80b04
EG
7812 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7813 port*0x18);
7814 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7815
a2fbb9ea
ET
7816 break;
7817
7818 default:
7819 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7820 bp->port.link_config);
a2fbb9ea
ET
7821 return;
7822 }
34f80b04 7823 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7824
7825 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7826 if (!(bp->link_params.speed_cap_mask &
7827 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7828 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7829
c18487ee
YR
7830 if (!(bp->link_params.speed_cap_mask &
7831 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7832 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7833
c18487ee
YR
7834 if (!(bp->link_params.speed_cap_mask &
7835 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7836 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7837
c18487ee
YR
7838 if (!(bp->link_params.speed_cap_mask &
7839 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7840 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7841
c18487ee
YR
7842 if (!(bp->link_params.speed_cap_mask &
7843 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7844 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7845 SUPPORTED_1000baseT_Full);
a2fbb9ea 7846
c18487ee
YR
7847 if (!(bp->link_params.speed_cap_mask &
7848 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7849 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7850
c18487ee
YR
7851 if (!(bp->link_params.speed_cap_mask &
7852 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7853 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7854
34f80b04 7855 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7856}
7857
34f80b04 7858static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7859{
c18487ee 7860 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7861
34f80b04 7862 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7863 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7864 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7865 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7866 bp->port.advertising = bp->port.supported;
a2fbb9ea 7867 } else {
c18487ee
YR
7868 u32 ext_phy_type =
7869 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7870
7871 if ((ext_phy_type ==
7872 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7873 (ext_phy_type ==
7874 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7875 /* force 10G, no AN */
c18487ee 7876 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7877 bp->port.advertising =
a2fbb9ea
ET
7878 (ADVERTISED_10000baseT_Full |
7879 ADVERTISED_FIBRE);
7880 break;
7881 }
7882 BNX2X_ERR("NVRAM config error. "
7883 "Invalid link_config 0x%x"
7884 " Autoneg not supported\n",
34f80b04 7885 bp->port.link_config);
a2fbb9ea
ET
7886 return;
7887 }
7888 break;
7889
7890 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7891 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7892 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7893 bp->port.advertising = (ADVERTISED_10baseT_Full |
7894 ADVERTISED_TP);
a2fbb9ea
ET
7895 } else {
7896 BNX2X_ERR("NVRAM config error. "
7897 "Invalid link_config 0x%x"
7898 " speed_cap_mask 0x%x\n",
34f80b04 7899 bp->port.link_config,
c18487ee 7900 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7901 return;
7902 }
7903 break;
7904
7905 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7906 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7907 bp->link_params.req_line_speed = SPEED_10;
7908 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7909 bp->port.advertising = (ADVERTISED_10baseT_Half |
7910 ADVERTISED_TP);
a2fbb9ea
ET
7911 } else {
7912 BNX2X_ERR("NVRAM config error. "
7913 "Invalid link_config 0x%x"
7914 " speed_cap_mask 0x%x\n",
34f80b04 7915 bp->port.link_config,
c18487ee 7916 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7917 return;
7918 }
7919 break;
7920
7921 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7922 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7923 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7924 bp->port.advertising = (ADVERTISED_100baseT_Full |
7925 ADVERTISED_TP);
a2fbb9ea
ET
7926 } else {
7927 BNX2X_ERR("NVRAM config error. "
7928 "Invalid link_config 0x%x"
7929 " speed_cap_mask 0x%x\n",
34f80b04 7930 bp->port.link_config,
c18487ee 7931 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7932 return;
7933 }
7934 break;
7935
7936 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7937 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7938 bp->link_params.req_line_speed = SPEED_100;
7939 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7940 bp->port.advertising = (ADVERTISED_100baseT_Half |
7941 ADVERTISED_TP);
a2fbb9ea
ET
7942 } else {
7943 BNX2X_ERR("NVRAM config error. "
7944 "Invalid link_config 0x%x"
7945 " speed_cap_mask 0x%x\n",
34f80b04 7946 bp->port.link_config,
c18487ee 7947 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7948 return;
7949 }
7950 break;
7951
7952 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7953 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7954 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7955 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7956 ADVERTISED_TP);
a2fbb9ea
ET
7957 } else {
7958 BNX2X_ERR("NVRAM config error. "
7959 "Invalid link_config 0x%x"
7960 " speed_cap_mask 0x%x\n",
34f80b04 7961 bp->port.link_config,
c18487ee 7962 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7963 return;
7964 }
7965 break;
7966
7967 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7968 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7969 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7970 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7971 ADVERTISED_TP);
a2fbb9ea
ET
7972 } else {
7973 BNX2X_ERR("NVRAM config error. "
7974 "Invalid link_config 0x%x"
7975 " speed_cap_mask 0x%x\n",
34f80b04 7976 bp->port.link_config,
c18487ee 7977 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7978 return;
7979 }
7980 break;
7981
7982 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7983 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7984 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7985 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7986 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7987 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7988 ADVERTISED_FIBRE);
a2fbb9ea
ET
7989 } else {
7990 BNX2X_ERR("NVRAM config error. "
7991 "Invalid link_config 0x%x"
7992 " speed_cap_mask 0x%x\n",
34f80b04 7993 bp->port.link_config,
c18487ee 7994 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7995 return;
7996 }
7997 break;
7998
7999 default:
8000 BNX2X_ERR("NVRAM config error. "
8001 "BAD link speed link_config 0x%x\n",
34f80b04 8002 bp->port.link_config);
c18487ee 8003 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8004 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8005 break;
8006 }
a2fbb9ea 8007
34f80b04
EG
8008 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8009 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8010 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8011 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8012 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8013
c18487ee 8014 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8015 " advertising 0x%x\n",
c18487ee
YR
8016 bp->link_params.req_line_speed,
8017 bp->link_params.req_duplex,
34f80b04 8018 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8019}
8020
34f80b04 8021static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8022{
34f80b04
EG
8023 int port = BP_PORT(bp);
8024 u32 val, val2;
589abe3a 8025 u32 config;
c2c8b03e 8026 u16 i;
a2fbb9ea 8027
c18487ee 8028 bp->link_params.bp = bp;
34f80b04 8029 bp->link_params.port = port;
c18487ee 8030
c18487ee 8031 bp->link_params.lane_config =
a2fbb9ea 8032 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8033 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8034 SHMEM_RD(bp,
8035 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8036 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8037 SHMEM_RD(bp,
8038 dev_info.port_hw_config[port].speed_capability_mask);
8039
34f80b04 8040 bp->port.link_config =
a2fbb9ea
ET
8041 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8042
c2c8b03e
EG
8043 /* Get the 4 lanes xgxs config rx and tx */
8044 for (i = 0; i < 2; i++) {
8045 val = SHMEM_RD(bp,
8046 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8047 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8048 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8049
8050 val = SHMEM_RD(bp,
8051 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8052 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8053 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8054 }
8055
589abe3a
EG
8056 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8057 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8058 bp->link_params.feature_config_flags |=
8059 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8060 else
8061 bp->link_params.feature_config_flags &=
8062 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8063
3ce2c3f9
EG
8064 /* If the device is capable of WoL, set the default state according
8065 * to the HW
8066 */
8067 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8068 (config & PORT_FEATURE_WOL_ENABLED));
8069
c2c8b03e
EG
8070 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8071 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8072 bp->link_params.lane_config,
8073 bp->link_params.ext_phy_config,
34f80b04 8074 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8075
34f80b04 8076 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8077 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8078 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8079
8080 bnx2x_link_settings_requested(bp);
8081
8082 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8083 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8084 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8085 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8086 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8087 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8088 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8089 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8090 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8091 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8092}
8093
8094static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8095{
8096 int func = BP_FUNC(bp);
8097 u32 val, val2;
8098 int rc = 0;
a2fbb9ea 8099
34f80b04 8100 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8101
34f80b04
EG
8102 bp->e1hov = 0;
8103 bp->e1hmf = 0;
8104 if (CHIP_IS_E1H(bp)) {
8105 bp->mf_config =
8106 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8107
3196a88a
EG
8108 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8109 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8110 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8111
34f80b04
EG
8112 bp->e1hov = val;
8113 bp->e1hmf = 1;
8114 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8115 "(0x%04x)\n",
8116 func, bp->e1hov, bp->e1hov);
8117 } else {
f5372251 8118 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8119 if (BP_E1HVN(bp)) {
8120 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8121 " aborting\n", func);
8122 rc = -EPERM;
8123 }
8124 }
8125 }
a2fbb9ea 8126
34f80b04
EG
8127 if (!BP_NOMCP(bp)) {
8128 bnx2x_get_port_hwinfo(bp);
8129
8130 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8131 DRV_MSG_SEQ_NUMBER_MASK);
8132 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8133 }
8134
8135 if (IS_E1HMF(bp)) {
8136 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8137 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8138 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8139 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8140 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8141 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8142 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8143 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8144 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8145 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8146 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8147 ETH_ALEN);
8148 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8149 ETH_ALEN);
a2fbb9ea 8150 }
34f80b04
EG
8151
8152 return rc;
a2fbb9ea
ET
8153 }
8154
34f80b04
EG
8155 if (BP_NOMCP(bp)) {
8156 /* only supposed to happen on emulation/FPGA */
33471629 8157 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8158 random_ether_addr(bp->dev->dev_addr);
8159 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8160 }
a2fbb9ea 8161
34f80b04
EG
8162 return rc;
8163}
8164
8165static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8166{
8167 int func = BP_FUNC(bp);
87942b46 8168 int timer_interval;
34f80b04
EG
8169 int rc;
8170
da5a662a
VZ
8171 /* Disable interrupt handling until HW is initialized */
8172 atomic_set(&bp->intr_sem, 1);
8173
34f80b04 8174 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8175
1cf167f2 8176 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8177 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8178
8179 rc = bnx2x_get_hwinfo(bp);
8180
8181 /* need to reset chip if undi was active */
8182 if (!BP_NOMCP(bp))
8183 bnx2x_undi_unload(bp);
8184
8185 if (CHIP_REV_IS_FPGA(bp))
8186 printk(KERN_ERR PFX "FPGA detected\n");
8187
8188 if (BP_NOMCP(bp) && (func == 0))
8189 printk(KERN_ERR PFX
8190 "MCP disabled, must load devices in order!\n");
8191
555f6c78 8192 /* Set multi queue mode */
8badd27a
EG
8193 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8194 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8195 printk(KERN_ERR PFX
8badd27a 8196 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8197 multi_mode = ETH_RSS_MODE_DISABLED;
8198 }
8199 bp->multi_mode = multi_mode;
8200
8201
7a9b2557
VZ
8202 /* Set TPA flags */
8203 if (disable_tpa) {
8204 bp->flags &= ~TPA_ENABLE_FLAG;
8205 bp->dev->features &= ~NETIF_F_LRO;
8206 } else {
8207 bp->flags |= TPA_ENABLE_FLAG;
8208 bp->dev->features |= NETIF_F_LRO;
8209 }
8210
8d5726c4 8211 bp->mrrs = mrrs;
7a9b2557 8212
34f80b04
EG
8213 bp->tx_ring_size = MAX_TX_AVAIL;
8214 bp->rx_ring_size = MAX_RX_AVAIL;
8215
8216 bp->rx_csum = 1;
34f80b04
EG
8217
8218 bp->tx_ticks = 50;
8219 bp->rx_ticks = 25;
8220
87942b46
EG
8221 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8222 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8223
8224 init_timer(&bp->timer);
8225 bp->timer.expires = jiffies + bp->current_interval;
8226 bp->timer.data = (unsigned long) bp;
8227 bp->timer.function = bnx2x_timer;
8228
8229 return rc;
a2fbb9ea
ET
8230}
8231
8232/*
8233 * ethtool service functions
8234 */
8235
8236/* All ethtool functions called with rtnl_lock */
8237
8238static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8239{
8240 struct bnx2x *bp = netdev_priv(dev);
8241
34f80b04
EG
8242 cmd->supported = bp->port.supported;
8243 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8244
8245 if (netif_carrier_ok(dev)) {
c18487ee
YR
8246 cmd->speed = bp->link_vars.line_speed;
8247 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8248 } else {
c18487ee
YR
8249 cmd->speed = bp->link_params.req_line_speed;
8250 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8251 }
34f80b04
EG
8252 if (IS_E1HMF(bp)) {
8253 u16 vn_max_rate;
8254
8255 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8256 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8257 if (vn_max_rate < cmd->speed)
8258 cmd->speed = vn_max_rate;
8259 }
a2fbb9ea 8260
c18487ee
YR
8261 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8262 u32 ext_phy_type =
8263 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8264
8265 switch (ext_phy_type) {
8266 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8267 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8268 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8269 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8270 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8272 cmd->port = PORT_FIBRE;
8273 break;
8274
8275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8276 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8277 cmd->port = PORT_TP;
8278 break;
8279
c18487ee
YR
8280 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8281 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8282 bp->link_params.ext_phy_config);
8283 break;
8284
f1410647
ET
8285 default:
8286 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8287 bp->link_params.ext_phy_config);
8288 break;
f1410647
ET
8289 }
8290 } else
a2fbb9ea 8291 cmd->port = PORT_TP;
a2fbb9ea 8292
34f80b04 8293 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8294 cmd->transceiver = XCVR_INTERNAL;
8295
c18487ee 8296 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8297 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8298 else
a2fbb9ea 8299 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8300
8301 cmd->maxtxpkt = 0;
8302 cmd->maxrxpkt = 0;
8303
8304 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8305 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8306 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8307 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8308 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8309 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8310 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8311
8312 return 0;
8313}
8314
8315static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8316{
8317 struct bnx2x *bp = netdev_priv(dev);
8318 u32 advertising;
8319
34f80b04
EG
8320 if (IS_E1HMF(bp))
8321 return 0;
8322
a2fbb9ea
ET
8323 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8324 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8325 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8326 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8327 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8328 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8329 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8330
a2fbb9ea 8331 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8332 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8333 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8334 return -EINVAL;
f1410647 8335 }
a2fbb9ea
ET
8336
8337 /* advertise the requested speed and duplex if supported */
34f80b04 8338 cmd->advertising &= bp->port.supported;
a2fbb9ea 8339
c18487ee
YR
8340 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8341 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8342 bp->port.advertising |= (ADVERTISED_Autoneg |
8343 cmd->advertising);
a2fbb9ea
ET
8344
8345 } else { /* forced speed */
8346 /* advertise the requested speed and duplex if supported */
8347 switch (cmd->speed) {
8348 case SPEED_10:
8349 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8350 if (!(bp->port.supported &
f1410647
ET
8351 SUPPORTED_10baseT_Full)) {
8352 DP(NETIF_MSG_LINK,
8353 "10M full not supported\n");
a2fbb9ea 8354 return -EINVAL;
f1410647 8355 }
a2fbb9ea
ET
8356
8357 advertising = (ADVERTISED_10baseT_Full |
8358 ADVERTISED_TP);
8359 } else {
34f80b04 8360 if (!(bp->port.supported &
f1410647
ET
8361 SUPPORTED_10baseT_Half)) {
8362 DP(NETIF_MSG_LINK,
8363 "10M half not supported\n");
a2fbb9ea 8364 return -EINVAL;
f1410647 8365 }
a2fbb9ea
ET
8366
8367 advertising = (ADVERTISED_10baseT_Half |
8368 ADVERTISED_TP);
8369 }
8370 break;
8371
8372 case SPEED_100:
8373 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8374 if (!(bp->port.supported &
f1410647
ET
8375 SUPPORTED_100baseT_Full)) {
8376 DP(NETIF_MSG_LINK,
8377 "100M full not supported\n");
a2fbb9ea 8378 return -EINVAL;
f1410647 8379 }
a2fbb9ea
ET
8380
8381 advertising = (ADVERTISED_100baseT_Full |
8382 ADVERTISED_TP);
8383 } else {
34f80b04 8384 if (!(bp->port.supported &
f1410647
ET
8385 SUPPORTED_100baseT_Half)) {
8386 DP(NETIF_MSG_LINK,
8387 "100M half not supported\n");
a2fbb9ea 8388 return -EINVAL;
f1410647 8389 }
a2fbb9ea
ET
8390
8391 advertising = (ADVERTISED_100baseT_Half |
8392 ADVERTISED_TP);
8393 }
8394 break;
8395
8396 case SPEED_1000:
f1410647
ET
8397 if (cmd->duplex != DUPLEX_FULL) {
8398 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8399 return -EINVAL;
f1410647 8400 }
a2fbb9ea 8401
34f80b04 8402 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8403 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8404 return -EINVAL;
f1410647 8405 }
a2fbb9ea
ET
8406
8407 advertising = (ADVERTISED_1000baseT_Full |
8408 ADVERTISED_TP);
8409 break;
8410
8411 case SPEED_2500:
f1410647
ET
8412 if (cmd->duplex != DUPLEX_FULL) {
8413 DP(NETIF_MSG_LINK,
8414 "2.5G half not supported\n");
a2fbb9ea 8415 return -EINVAL;
f1410647 8416 }
a2fbb9ea 8417
34f80b04 8418 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8419 DP(NETIF_MSG_LINK,
8420 "2.5G full not supported\n");
a2fbb9ea 8421 return -EINVAL;
f1410647 8422 }
a2fbb9ea 8423
f1410647 8424 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8425 ADVERTISED_TP);
8426 break;
8427
8428 case SPEED_10000:
f1410647
ET
8429 if (cmd->duplex != DUPLEX_FULL) {
8430 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8431 return -EINVAL;
f1410647 8432 }
a2fbb9ea 8433
34f80b04 8434 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8435 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8436 return -EINVAL;
f1410647 8437 }
a2fbb9ea
ET
8438
8439 advertising = (ADVERTISED_10000baseT_Full |
8440 ADVERTISED_FIBRE);
8441 break;
8442
8443 default:
f1410647 8444 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8445 return -EINVAL;
8446 }
8447
c18487ee
YR
8448 bp->link_params.req_line_speed = cmd->speed;
8449 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8450 bp->port.advertising = advertising;
a2fbb9ea
ET
8451 }
8452
c18487ee 8453 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8454 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8455 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8456 bp->port.advertising);
a2fbb9ea 8457
34f80b04 8458 if (netif_running(dev)) {
bb2a0f7a 8459 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8460 bnx2x_link_set(bp);
8461 }
a2fbb9ea
ET
8462
8463 return 0;
8464}
8465
c18487ee
YR
8466#define PHY_FW_VER_LEN 10
8467
a2fbb9ea
ET
8468static void bnx2x_get_drvinfo(struct net_device *dev,
8469 struct ethtool_drvinfo *info)
8470{
8471 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8472 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8473
8474 strcpy(info->driver, DRV_MODULE_NAME);
8475 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8476
8477 phy_fw_ver[0] = '\0';
34f80b04 8478 if (bp->port.pmf) {
4a37fb66 8479 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8480 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8481 (bp->state != BNX2X_STATE_CLOSED),
8482 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8483 bnx2x_release_phy_lock(bp);
34f80b04 8484 }
c18487ee 8485
f0e53a84
EG
8486 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8487 (bp->common.bc_ver & 0xff0000) >> 16,
8488 (bp->common.bc_ver & 0xff00) >> 8,
8489 (bp->common.bc_ver & 0xff),
8490 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8491 strcpy(info->bus_info, pci_name(bp->pdev));
8492 info->n_stats = BNX2X_NUM_STATS;
8493 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8494 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8495 info->regdump_len = 0;
8496}
8497
0a64ea57
EG
8498#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8499#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8500
8501static int bnx2x_get_regs_len(struct net_device *dev)
8502{
8503 static u32 regdump_len;
8504 struct bnx2x *bp = netdev_priv(dev);
8505 int i;
8506
8507 if (regdump_len)
8508 return regdump_len;
8509
8510 if (CHIP_IS_E1(bp)) {
8511 for (i = 0; i < REGS_COUNT; i++)
8512 if (IS_E1_ONLINE(reg_addrs[i].info))
8513 regdump_len += reg_addrs[i].size;
8514
8515 for (i = 0; i < WREGS_COUNT_E1; i++)
8516 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8517 regdump_len += wreg_addrs_e1[i].size *
8518 (1 + wreg_addrs_e1[i].read_regs_count);
8519
8520 } else { /* E1H */
8521 for (i = 0; i < REGS_COUNT; i++)
8522 if (IS_E1H_ONLINE(reg_addrs[i].info))
8523 regdump_len += reg_addrs[i].size;
8524
8525 for (i = 0; i < WREGS_COUNT_E1H; i++)
8526 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8527 regdump_len += wreg_addrs_e1h[i].size *
8528 (1 + wreg_addrs_e1h[i].read_regs_count);
8529 }
8530 regdump_len *= 4;
8531 regdump_len += sizeof(struct dump_hdr);
8532
8533 return regdump_len;
8534}
8535
8536static void bnx2x_get_regs(struct net_device *dev,
8537 struct ethtool_regs *regs, void *_p)
8538{
8539 u32 *p = _p, i, j;
8540 struct bnx2x *bp = netdev_priv(dev);
8541 struct dump_hdr dump_hdr = {0};
8542
8543 regs->version = 0;
8544 memset(p, 0, regs->len);
8545
8546 if (!netif_running(bp->dev))
8547 return;
8548
8549 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8550 dump_hdr.dump_sign = dump_sign_all;
8551 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8552 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8553 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8554 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8555 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8556
8557 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8558 p += dump_hdr.hdr_size + 1;
8559
8560 if (CHIP_IS_E1(bp)) {
8561 for (i = 0; i < REGS_COUNT; i++)
8562 if (IS_E1_ONLINE(reg_addrs[i].info))
8563 for (j = 0; j < reg_addrs[i].size; j++)
8564 *p++ = REG_RD(bp,
8565 reg_addrs[i].addr + j*4);
8566
8567 } else { /* E1H */
8568 for (i = 0; i < REGS_COUNT; i++)
8569 if (IS_E1H_ONLINE(reg_addrs[i].info))
8570 for (j = 0; j < reg_addrs[i].size; j++)
8571 *p++ = REG_RD(bp,
8572 reg_addrs[i].addr + j*4);
8573 }
8574}
8575
a2fbb9ea
ET
8576static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8577{
8578 struct bnx2x *bp = netdev_priv(dev);
8579
8580 if (bp->flags & NO_WOL_FLAG) {
8581 wol->supported = 0;
8582 wol->wolopts = 0;
8583 } else {
8584 wol->supported = WAKE_MAGIC;
8585 if (bp->wol)
8586 wol->wolopts = WAKE_MAGIC;
8587 else
8588 wol->wolopts = 0;
8589 }
8590 memset(&wol->sopass, 0, sizeof(wol->sopass));
8591}
8592
8593static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8594{
8595 struct bnx2x *bp = netdev_priv(dev);
8596
8597 if (wol->wolopts & ~WAKE_MAGIC)
8598 return -EINVAL;
8599
8600 if (wol->wolopts & WAKE_MAGIC) {
8601 if (bp->flags & NO_WOL_FLAG)
8602 return -EINVAL;
8603
8604 bp->wol = 1;
34f80b04 8605 } else
a2fbb9ea 8606 bp->wol = 0;
34f80b04 8607
a2fbb9ea
ET
8608 return 0;
8609}
8610
8611static u32 bnx2x_get_msglevel(struct net_device *dev)
8612{
8613 struct bnx2x *bp = netdev_priv(dev);
8614
8615 return bp->msglevel;
8616}
8617
8618static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8619{
8620 struct bnx2x *bp = netdev_priv(dev);
8621
8622 if (capable(CAP_NET_ADMIN))
8623 bp->msglevel = level;
8624}
8625
8626static int bnx2x_nway_reset(struct net_device *dev)
8627{
8628 struct bnx2x *bp = netdev_priv(dev);
8629
34f80b04
EG
8630 if (!bp->port.pmf)
8631 return 0;
a2fbb9ea 8632
34f80b04 8633 if (netif_running(dev)) {
bb2a0f7a 8634 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8635 bnx2x_link_set(bp);
8636 }
a2fbb9ea
ET
8637
8638 return 0;
8639}
8640
01e53298
NO
8641static u32
8642bnx2x_get_link(struct net_device *dev)
8643{
8644 struct bnx2x *bp = netdev_priv(dev);
8645
8646 return bp->link_vars.link_up;
8647}
8648
a2fbb9ea
ET
8649static int bnx2x_get_eeprom_len(struct net_device *dev)
8650{
8651 struct bnx2x *bp = netdev_priv(dev);
8652
34f80b04 8653 return bp->common.flash_size;
a2fbb9ea
ET
8654}
8655
8656static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8657{
34f80b04 8658 int port = BP_PORT(bp);
a2fbb9ea
ET
8659 int count, i;
8660 u32 val = 0;
8661
8662 /* adjust timeout for emulation/FPGA */
8663 count = NVRAM_TIMEOUT_COUNT;
8664 if (CHIP_REV_IS_SLOW(bp))
8665 count *= 100;
8666
8667 /* request access to nvram interface */
8668 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8669 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8670
8671 for (i = 0; i < count*10; i++) {
8672 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8673 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8674 break;
8675
8676 udelay(5);
8677 }
8678
8679 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8680 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8681 return -EBUSY;
8682 }
8683
8684 return 0;
8685}
8686
8687static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8688{
34f80b04 8689 int port = BP_PORT(bp);
a2fbb9ea
ET
8690 int count, i;
8691 u32 val = 0;
8692
8693 /* adjust timeout for emulation/FPGA */
8694 count = NVRAM_TIMEOUT_COUNT;
8695 if (CHIP_REV_IS_SLOW(bp))
8696 count *= 100;
8697
8698 /* relinquish nvram interface */
8699 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8700 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8701
8702 for (i = 0; i < count*10; i++) {
8703 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8704 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8705 break;
8706
8707 udelay(5);
8708 }
8709
8710 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8711 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8712 return -EBUSY;
8713 }
8714
8715 return 0;
8716}
8717
8718static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8719{
8720 u32 val;
8721
8722 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8723
8724 /* enable both bits, even on read */
8725 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8726 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8727 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8728}
8729
8730static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8731{
8732 u32 val;
8733
8734 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8735
8736 /* disable both bits, even after read */
8737 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8738 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8739 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8740}
8741
4781bfad 8742static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8743 u32 cmd_flags)
8744{
f1410647 8745 int count, i, rc;
a2fbb9ea
ET
8746 u32 val;
8747
8748 /* build the command word */
8749 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8750
8751 /* need to clear DONE bit separately */
8752 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8753
8754 /* address of the NVRAM to read from */
8755 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8756 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8757
8758 /* issue a read command */
8759 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8760
8761 /* adjust timeout for emulation/FPGA */
8762 count = NVRAM_TIMEOUT_COUNT;
8763 if (CHIP_REV_IS_SLOW(bp))
8764 count *= 100;
8765
8766 /* wait for completion */
8767 *ret_val = 0;
8768 rc = -EBUSY;
8769 for (i = 0; i < count; i++) {
8770 udelay(5);
8771 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8772
8773 if (val & MCPR_NVM_COMMAND_DONE) {
8774 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8775 /* we read nvram data in cpu order
8776 * but ethtool sees it as an array of bytes
8777 * converting to big-endian will do the work */
4781bfad 8778 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8779 rc = 0;
8780 break;
8781 }
8782 }
8783
8784 return rc;
8785}
8786
8787static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8788 int buf_size)
8789{
8790 int rc;
8791 u32 cmd_flags;
4781bfad 8792 __be32 val;
a2fbb9ea
ET
8793
8794 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8795 DP(BNX2X_MSG_NVM,
c14423fe 8796 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8797 offset, buf_size);
8798 return -EINVAL;
8799 }
8800
34f80b04
EG
8801 if (offset + buf_size > bp->common.flash_size) {
8802 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8803 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8804 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8805 return -EINVAL;
8806 }
8807
8808 /* request access to nvram interface */
8809 rc = bnx2x_acquire_nvram_lock(bp);
8810 if (rc)
8811 return rc;
8812
8813 /* enable access to nvram interface */
8814 bnx2x_enable_nvram_access(bp);
8815
8816 /* read the first word(s) */
8817 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8818 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8819 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8820 memcpy(ret_buf, &val, 4);
8821
8822 /* advance to the next dword */
8823 offset += sizeof(u32);
8824 ret_buf += sizeof(u32);
8825 buf_size -= sizeof(u32);
8826 cmd_flags = 0;
8827 }
8828
8829 if (rc == 0) {
8830 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8831 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8832 memcpy(ret_buf, &val, 4);
8833 }
8834
8835 /* disable access to nvram interface */
8836 bnx2x_disable_nvram_access(bp);
8837 bnx2x_release_nvram_lock(bp);
8838
8839 return rc;
8840}
8841
8842static int bnx2x_get_eeprom(struct net_device *dev,
8843 struct ethtool_eeprom *eeprom, u8 *eebuf)
8844{
8845 struct bnx2x *bp = netdev_priv(dev);
8846 int rc;
8847
2add3acb
EG
8848 if (!netif_running(dev))
8849 return -EAGAIN;
8850
34f80b04 8851 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8852 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8853 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8854 eeprom->len, eeprom->len);
8855
8856 /* parameters already validated in ethtool_get_eeprom */
8857
8858 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8859
8860 return rc;
8861}
8862
8863static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8864 u32 cmd_flags)
8865{
f1410647 8866 int count, i, rc;
a2fbb9ea
ET
8867
8868 /* build the command word */
8869 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8870
8871 /* need to clear DONE bit separately */
8872 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8873
8874 /* write the data */
8875 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8876
8877 /* address of the NVRAM to write to */
8878 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8879 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8880
8881 /* issue the write command */
8882 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8883
8884 /* adjust timeout for emulation/FPGA */
8885 count = NVRAM_TIMEOUT_COUNT;
8886 if (CHIP_REV_IS_SLOW(bp))
8887 count *= 100;
8888
8889 /* wait for completion */
8890 rc = -EBUSY;
8891 for (i = 0; i < count; i++) {
8892 udelay(5);
8893 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8894 if (val & MCPR_NVM_COMMAND_DONE) {
8895 rc = 0;
8896 break;
8897 }
8898 }
8899
8900 return rc;
8901}
8902
f1410647 8903#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8904
8905static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8906 int buf_size)
8907{
8908 int rc;
8909 u32 cmd_flags;
8910 u32 align_offset;
4781bfad 8911 __be32 val;
a2fbb9ea 8912
34f80b04
EG
8913 if (offset + buf_size > bp->common.flash_size) {
8914 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8915 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8916 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8917 return -EINVAL;
8918 }
8919
8920 /* request access to nvram interface */
8921 rc = bnx2x_acquire_nvram_lock(bp);
8922 if (rc)
8923 return rc;
8924
8925 /* enable access to nvram interface */
8926 bnx2x_enable_nvram_access(bp);
8927
8928 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8929 align_offset = (offset & ~0x03);
8930 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8931
8932 if (rc == 0) {
8933 val &= ~(0xff << BYTE_OFFSET(offset));
8934 val |= (*data_buf << BYTE_OFFSET(offset));
8935
8936 /* nvram data is returned as an array of bytes
8937 * convert it back to cpu order */
8938 val = be32_to_cpu(val);
8939
a2fbb9ea
ET
8940 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8941 cmd_flags);
8942 }
8943
8944 /* disable access to nvram interface */
8945 bnx2x_disable_nvram_access(bp);
8946 bnx2x_release_nvram_lock(bp);
8947
8948 return rc;
8949}
8950
8951static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8952 int buf_size)
8953{
8954 int rc;
8955 u32 cmd_flags;
8956 u32 val;
8957 u32 written_so_far;
8958
34f80b04 8959 if (buf_size == 1) /* ethtool */
a2fbb9ea 8960 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8961
8962 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8963 DP(BNX2X_MSG_NVM,
c14423fe 8964 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8965 offset, buf_size);
8966 return -EINVAL;
8967 }
8968
34f80b04
EG
8969 if (offset + buf_size > bp->common.flash_size) {
8970 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8971 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8972 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8973 return -EINVAL;
8974 }
8975
8976 /* request access to nvram interface */
8977 rc = bnx2x_acquire_nvram_lock(bp);
8978 if (rc)
8979 return rc;
8980
8981 /* enable access to nvram interface */
8982 bnx2x_enable_nvram_access(bp);
8983
8984 written_so_far = 0;
8985 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8986 while ((written_so_far < buf_size) && (rc == 0)) {
8987 if (written_so_far == (buf_size - sizeof(u32)))
8988 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8989 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8990 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8991 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8992 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8993
8994 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8995
8996 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8997
8998 /* advance to the next dword */
8999 offset += sizeof(u32);
9000 data_buf += sizeof(u32);
9001 written_so_far += sizeof(u32);
9002 cmd_flags = 0;
9003 }
9004
9005 /* disable access to nvram interface */
9006 bnx2x_disable_nvram_access(bp);
9007 bnx2x_release_nvram_lock(bp);
9008
9009 return rc;
9010}
9011
9012static int bnx2x_set_eeprom(struct net_device *dev,
9013 struct ethtool_eeprom *eeprom, u8 *eebuf)
9014{
9015 struct bnx2x *bp = netdev_priv(dev);
9016 int rc;
9017
9f4c9583
EG
9018 if (!netif_running(dev))
9019 return -EAGAIN;
9020
34f80b04 9021 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9022 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9023 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9024 eeprom->len, eeprom->len);
9025
9026 /* parameters already validated in ethtool_set_eeprom */
9027
c18487ee 9028 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
9029 if (eeprom->magic == 0x00504859)
9030 if (bp->port.pmf) {
9031
4a37fb66 9032 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
9033 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9034 bp->link_params.ext_phy_config,
9035 (bp->state != BNX2X_STATE_CLOSED),
9036 eebuf, eeprom->len);
bb2a0f7a
YG
9037 if ((bp->state == BNX2X_STATE_OPEN) ||
9038 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 9039 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 9040 &bp->link_vars, 1);
34f80b04
EG
9041 rc |= bnx2x_phy_init(&bp->link_params,
9042 &bp->link_vars);
bb2a0f7a 9043 }
4a37fb66 9044 bnx2x_release_phy_lock(bp);
34f80b04
EG
9045
9046 } else /* Only the PMF can access the PHY */
9047 return -EINVAL;
9048 else
c18487ee 9049 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9050
9051 return rc;
9052}
9053
9054static int bnx2x_get_coalesce(struct net_device *dev,
9055 struct ethtool_coalesce *coal)
9056{
9057 struct bnx2x *bp = netdev_priv(dev);
9058
9059 memset(coal, 0, sizeof(struct ethtool_coalesce));
9060
9061 coal->rx_coalesce_usecs = bp->rx_ticks;
9062 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9063
9064 return 0;
9065}
9066
9067static int bnx2x_set_coalesce(struct net_device *dev,
9068 struct ethtool_coalesce *coal)
9069{
9070 struct bnx2x *bp = netdev_priv(dev);
9071
9072 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
1e9d9987
EG
9073 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9074 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea
ET
9075
9076 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
1e9d9987
EG
9077 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9078 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 9079
34f80b04 9080 if (netif_running(dev))
a2fbb9ea
ET
9081 bnx2x_update_coalesce(bp);
9082
9083 return 0;
9084}
9085
9086static void bnx2x_get_ringparam(struct net_device *dev,
9087 struct ethtool_ringparam *ering)
9088{
9089 struct bnx2x *bp = netdev_priv(dev);
9090
9091 ering->rx_max_pending = MAX_RX_AVAIL;
9092 ering->rx_mini_max_pending = 0;
9093 ering->rx_jumbo_max_pending = 0;
9094
9095 ering->rx_pending = bp->rx_ring_size;
9096 ering->rx_mini_pending = 0;
9097 ering->rx_jumbo_pending = 0;
9098
9099 ering->tx_max_pending = MAX_TX_AVAIL;
9100 ering->tx_pending = bp->tx_ring_size;
9101}
9102
9103static int bnx2x_set_ringparam(struct net_device *dev,
9104 struct ethtool_ringparam *ering)
9105{
9106 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9107 int rc = 0;
a2fbb9ea
ET
9108
9109 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9110 (ering->tx_pending > MAX_TX_AVAIL) ||
9111 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9112 return -EINVAL;
9113
9114 bp->rx_ring_size = ering->rx_pending;
9115 bp->tx_ring_size = ering->tx_pending;
9116
34f80b04
EG
9117 if (netif_running(dev)) {
9118 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9119 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9120 }
9121
34f80b04 9122 return rc;
a2fbb9ea
ET
9123}
9124
9125static void bnx2x_get_pauseparam(struct net_device *dev,
9126 struct ethtool_pauseparam *epause)
9127{
9128 struct bnx2x *bp = netdev_priv(dev);
9129
356e2385
EG
9130 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9131 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9132 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9133
c0700f90
DM
9134 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9135 BNX2X_FLOW_CTRL_RX);
9136 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9137 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9138
9139 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9140 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9141 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9142}
9143
9144static int bnx2x_set_pauseparam(struct net_device *dev,
9145 struct ethtool_pauseparam *epause)
9146{
9147 struct bnx2x *bp = netdev_priv(dev);
9148
34f80b04
EG
9149 if (IS_E1HMF(bp))
9150 return 0;
9151
a2fbb9ea
ET
9152 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9153 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9154 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9155
c0700f90 9156 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9157
f1410647 9158 if (epause->rx_pause)
c0700f90 9159 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9160
f1410647 9161 if (epause->tx_pause)
c0700f90 9162 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9163
c0700f90
DM
9164 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9165 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9166
c18487ee 9167 if (epause->autoneg) {
34f80b04 9168 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9169 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9170 return -EINVAL;
9171 }
a2fbb9ea 9172
c18487ee 9173 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9174 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9175 }
a2fbb9ea 9176
c18487ee
YR
9177 DP(NETIF_MSG_LINK,
9178 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9179
9180 if (netif_running(dev)) {
bb2a0f7a 9181 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9182 bnx2x_link_set(bp);
9183 }
a2fbb9ea
ET
9184
9185 return 0;
9186}
9187
df0f2343
VZ
9188static int bnx2x_set_flags(struct net_device *dev, u32 data)
9189{
9190 struct bnx2x *bp = netdev_priv(dev);
9191 int changed = 0;
9192 int rc = 0;
9193
9194 /* TPA requires Rx CSUM offloading */
9195 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9196 if (!(dev->features & NETIF_F_LRO)) {
9197 dev->features |= NETIF_F_LRO;
9198 bp->flags |= TPA_ENABLE_FLAG;
9199 changed = 1;
9200 }
9201
9202 } else if (dev->features & NETIF_F_LRO) {
9203 dev->features &= ~NETIF_F_LRO;
9204 bp->flags &= ~TPA_ENABLE_FLAG;
9205 changed = 1;
9206 }
9207
9208 if (changed && netif_running(dev)) {
9209 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9210 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9211 }
9212
9213 return rc;
9214}
9215
a2fbb9ea
ET
9216static u32 bnx2x_get_rx_csum(struct net_device *dev)
9217{
9218 struct bnx2x *bp = netdev_priv(dev);
9219
9220 return bp->rx_csum;
9221}
9222
9223static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9224{
9225 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9226 int rc = 0;
a2fbb9ea
ET
9227
9228 bp->rx_csum = data;
df0f2343
VZ
9229
9230 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9231 TPA'ed packets will be discarded due to wrong TCP CSUM */
9232 if (!data) {
9233 u32 flags = ethtool_op_get_flags(dev);
9234
9235 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9236 }
9237
9238 return rc;
a2fbb9ea
ET
9239}
9240
9241static int bnx2x_set_tso(struct net_device *dev, u32 data)
9242{
755735eb 9243 if (data) {
a2fbb9ea 9244 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9245 dev->features |= NETIF_F_TSO6;
9246 } else {
a2fbb9ea 9247 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9248 dev->features &= ~NETIF_F_TSO6;
9249 }
9250
a2fbb9ea
ET
9251 return 0;
9252}
9253
f3c87cdd 9254static const struct {
a2fbb9ea
ET
9255 char string[ETH_GSTRING_LEN];
9256} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9257 { "register_test (offline)" },
9258 { "memory_test (offline)" },
9259 { "loopback_test (offline)" },
9260 { "nvram_test (online)" },
9261 { "interrupt_test (online)" },
9262 { "link_test (online)" },
d3d4f495 9263 { "idle check (online)" }
a2fbb9ea
ET
9264};
9265
9266static int bnx2x_self_test_count(struct net_device *dev)
9267{
9268 return BNX2X_NUM_TESTS;
9269}
9270
f3c87cdd
YG
9271static int bnx2x_test_registers(struct bnx2x *bp)
9272{
9273 int idx, i, rc = -ENODEV;
9274 u32 wr_val = 0;
9dabc424 9275 int port = BP_PORT(bp);
f3c87cdd
YG
9276 static const struct {
9277 u32 offset0;
9278 u32 offset1;
9279 u32 mask;
9280 } reg_tbl[] = {
9281/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9282 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9283 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9284 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9285 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9286 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9287 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9288 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9289 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9290 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9291/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9292 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9293 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9294 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9295 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9296 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9297 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9298 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9299 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9300 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9301/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9302 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9303 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9304 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9305 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9306 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9307 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9308 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9309 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9310 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9311/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9312 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9313 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9314 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9315 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9316 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9317 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9318 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9319
9320 { 0xffffffff, 0, 0x00000000 }
9321 };
9322
9323 if (!netif_running(bp->dev))
9324 return rc;
9325
9326 /* Repeat the test twice:
9327 First by writing 0x00000000, second by writing 0xffffffff */
9328 for (idx = 0; idx < 2; idx++) {
9329
9330 switch (idx) {
9331 case 0:
9332 wr_val = 0;
9333 break;
9334 case 1:
9335 wr_val = 0xffffffff;
9336 break;
9337 }
9338
9339 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9340 u32 offset, mask, save_val, val;
f3c87cdd
YG
9341
9342 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9343 mask = reg_tbl[i].mask;
9344
9345 save_val = REG_RD(bp, offset);
9346
9347 REG_WR(bp, offset, wr_val);
9348 val = REG_RD(bp, offset);
9349
9350 /* Restore the original register's value */
9351 REG_WR(bp, offset, save_val);
9352
9353 /* verify that value is as expected value */
9354 if ((val & mask) != (wr_val & mask))
9355 goto test_reg_exit;
9356 }
9357 }
9358
9359 rc = 0;
9360
9361test_reg_exit:
9362 return rc;
9363}
9364
9365static int bnx2x_test_memory(struct bnx2x *bp)
9366{
9367 int i, j, rc = -ENODEV;
9368 u32 val;
9369 static const struct {
9370 u32 offset;
9371 int size;
9372 } mem_tbl[] = {
9373 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9374 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9375 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9376 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9377 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9378 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9379 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9380
9381 { 0xffffffff, 0 }
9382 };
9383 static const struct {
9384 char *name;
9385 u32 offset;
9dabc424
YG
9386 u32 e1_mask;
9387 u32 e1h_mask;
f3c87cdd 9388 } prty_tbl[] = {
9dabc424
YG
9389 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9390 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9391 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9392 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9393 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9394 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9395
9396 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9397 };
9398
9399 if (!netif_running(bp->dev))
9400 return rc;
9401
9402 /* Go through all the memories */
9403 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9404 for (j = 0; j < mem_tbl[i].size; j++)
9405 REG_RD(bp, mem_tbl[i].offset + j*4);
9406
9407 /* Check the parity status */
9408 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9409 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9410 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9411 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9412 DP(NETIF_MSG_HW,
9413 "%s is 0x%x\n", prty_tbl[i].name, val);
9414 goto test_mem_exit;
9415 }
9416 }
9417
9418 rc = 0;
9419
9420test_mem_exit:
9421 return rc;
9422}
9423
f3c87cdd
YG
9424static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9425{
9426 int cnt = 1000;
9427
9428 if (link_up)
9429 while (bnx2x_link_test(bp) && cnt--)
9430 msleep(10);
9431}
9432
9433static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9434{
9435 unsigned int pkt_size, num_pkts, i;
9436 struct sk_buff *skb;
9437 unsigned char *packet;
9438 struct bnx2x_fastpath *fp = &bp->fp[0];
9439 u16 tx_start_idx, tx_idx;
9440 u16 rx_start_idx, rx_idx;
9441 u16 pkt_prod;
9442 struct sw_tx_bd *tx_buf;
9443 struct eth_tx_bd *tx_bd;
9444 dma_addr_t mapping;
9445 union eth_rx_cqe *cqe;
9446 u8 cqe_fp_flags;
9447 struct sw_rx_bd *rx_buf;
9448 u16 len;
9449 int rc = -ENODEV;
9450
b5bf9068
EG
9451 /* check the loopback mode */
9452 switch (loopback_mode) {
9453 case BNX2X_PHY_LOOPBACK:
9454 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9455 return -EINVAL;
9456 break;
9457 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9458 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9459 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9460 break;
9461 default:
f3c87cdd 9462 return -EINVAL;
b5bf9068 9463 }
f3c87cdd 9464
b5bf9068
EG
9465 /* prepare the loopback packet */
9466 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9467 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9468 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9469 if (!skb) {
9470 rc = -ENOMEM;
9471 goto test_loopback_exit;
9472 }
9473 packet = skb_put(skb, pkt_size);
9474 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9475 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9476 for (i = ETH_HLEN; i < pkt_size; i++)
9477 packet[i] = (unsigned char) (i & 0xff);
9478
b5bf9068 9479 /* send the loopback packet */
f3c87cdd
YG
9480 num_pkts = 0;
9481 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9482 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9483
9484 pkt_prod = fp->tx_pkt_prod++;
9485 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9486 tx_buf->first_bd = fp->tx_bd_prod;
9487 tx_buf->skb = skb;
9488
9489 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9490 mapping = pci_map_single(bp->pdev, skb->data,
9491 skb_headlen(skb), PCI_DMA_TODEVICE);
9492 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9493 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9494 tx_bd->nbd = cpu_to_le16(1);
9495 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9496 tx_bd->vlan = cpu_to_le16(pkt_prod);
9497 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9498 ETH_TX_BD_FLAGS_END_BD);
9499 tx_bd->general_data = ((UNICAST_ADDRESS <<
9500 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9501
58f4c4cf
EG
9502 wmb();
9503
4781bfad 9504 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9505 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9506 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9507 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9508
9509 mmiowb();
9510
9511 num_pkts++;
9512 fp->tx_bd_prod++;
9513 bp->dev->trans_start = jiffies;
9514
9515 udelay(100);
9516
9517 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9518 if (tx_idx != tx_start_idx + num_pkts)
9519 goto test_loopback_exit;
9520
9521 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9522 if (rx_idx != rx_start_idx + num_pkts)
9523 goto test_loopback_exit;
9524
9525 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9526 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9527 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9528 goto test_loopback_rx_exit;
9529
9530 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9531 if (len != pkt_size)
9532 goto test_loopback_rx_exit;
9533
9534 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9535 skb = rx_buf->skb;
9536 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9537 for (i = ETH_HLEN; i < pkt_size; i++)
9538 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9539 goto test_loopback_rx_exit;
9540
9541 rc = 0;
9542
9543test_loopback_rx_exit:
f3c87cdd
YG
9544
9545 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9546 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9547 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9548 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9549
9550 /* Update producers */
9551 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9552 fp->rx_sge_prod);
f3c87cdd
YG
9553
9554test_loopback_exit:
9555 bp->link_params.loopback_mode = LOOPBACK_NONE;
9556
9557 return rc;
9558}
9559
9560static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9561{
b5bf9068 9562 int rc = 0, res;
f3c87cdd
YG
9563
9564 if (!netif_running(bp->dev))
9565 return BNX2X_LOOPBACK_FAILED;
9566
f8ef6e44 9567 bnx2x_netif_stop(bp, 1);
3910c8ae 9568 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9569
b5bf9068
EG
9570 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9571 if (res) {
9572 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9573 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9574 }
9575
b5bf9068
EG
9576 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9577 if (res) {
9578 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9579 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9580 }
9581
3910c8ae 9582 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9583 bnx2x_netif_start(bp);
9584
9585 return rc;
9586}
9587
9588#define CRC32_RESIDUAL 0xdebb20e3
9589
9590static int bnx2x_test_nvram(struct bnx2x *bp)
9591{
9592 static const struct {
9593 int offset;
9594 int size;
9595 } nvram_tbl[] = {
9596 { 0, 0x14 }, /* bootstrap */
9597 { 0x14, 0xec }, /* dir */
9598 { 0x100, 0x350 }, /* manuf_info */
9599 { 0x450, 0xf0 }, /* feature_info */
9600 { 0x640, 0x64 }, /* upgrade_key_info */
9601 { 0x6a4, 0x64 },
9602 { 0x708, 0x70 }, /* manuf_key_info */
9603 { 0x778, 0x70 },
9604 { 0, 0 }
9605 };
4781bfad 9606 __be32 buf[0x350 / 4];
f3c87cdd
YG
9607 u8 *data = (u8 *)buf;
9608 int i, rc;
9609 u32 magic, csum;
9610
9611 rc = bnx2x_nvram_read(bp, 0, data, 4);
9612 if (rc) {
f5372251 9613 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9614 goto test_nvram_exit;
9615 }
9616
9617 magic = be32_to_cpu(buf[0]);
9618 if (magic != 0x669955aa) {
9619 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9620 rc = -ENODEV;
9621 goto test_nvram_exit;
9622 }
9623
9624 for (i = 0; nvram_tbl[i].size; i++) {
9625
9626 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9627 nvram_tbl[i].size);
9628 if (rc) {
9629 DP(NETIF_MSG_PROBE,
f5372251 9630 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9631 goto test_nvram_exit;
9632 }
9633
9634 csum = ether_crc_le(nvram_tbl[i].size, data);
9635 if (csum != CRC32_RESIDUAL) {
9636 DP(NETIF_MSG_PROBE,
9637 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9638 rc = -ENODEV;
9639 goto test_nvram_exit;
9640 }
9641 }
9642
9643test_nvram_exit:
9644 return rc;
9645}
9646
9647static int bnx2x_test_intr(struct bnx2x *bp)
9648{
9649 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9650 int i, rc;
9651
9652 if (!netif_running(bp->dev))
9653 return -ENODEV;
9654
8d9c5f34 9655 config->hdr.length = 0;
af246401
EG
9656 if (CHIP_IS_E1(bp))
9657 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9658 else
9659 config->hdr.offset = BP_FUNC(bp);
0626b899 9660 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9661 config->hdr.reserved1 = 0;
9662
9663 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9664 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9665 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9666 if (rc == 0) {
9667 bp->set_mac_pending++;
9668 for (i = 0; i < 10; i++) {
9669 if (!bp->set_mac_pending)
9670 break;
9671 msleep_interruptible(10);
9672 }
9673 if (i == 10)
9674 rc = -ENODEV;
9675 }
9676
9677 return rc;
9678}
9679
a2fbb9ea
ET
9680static void bnx2x_self_test(struct net_device *dev,
9681 struct ethtool_test *etest, u64 *buf)
9682{
9683 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9684
9685 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9686
f3c87cdd 9687 if (!netif_running(dev))
a2fbb9ea 9688 return;
a2fbb9ea 9689
33471629 9690 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9691 if (IS_E1HMF(bp))
9692 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9693
9694 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9695 u8 link_up;
9696
9697 link_up = bp->link_vars.link_up;
9698 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9699 bnx2x_nic_load(bp, LOAD_DIAG);
9700 /* wait until link state is restored */
9701 bnx2x_wait_for_link(bp, link_up);
9702
9703 if (bnx2x_test_registers(bp) != 0) {
9704 buf[0] = 1;
9705 etest->flags |= ETH_TEST_FL_FAILED;
9706 }
9707 if (bnx2x_test_memory(bp) != 0) {
9708 buf[1] = 1;
9709 etest->flags |= ETH_TEST_FL_FAILED;
9710 }
9711 buf[2] = bnx2x_test_loopback(bp, link_up);
9712 if (buf[2] != 0)
9713 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9714
f3c87cdd
YG
9715 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9716 bnx2x_nic_load(bp, LOAD_NORMAL);
9717 /* wait until link state is restored */
9718 bnx2x_wait_for_link(bp, link_up);
9719 }
9720 if (bnx2x_test_nvram(bp) != 0) {
9721 buf[3] = 1;
a2fbb9ea
ET
9722 etest->flags |= ETH_TEST_FL_FAILED;
9723 }
f3c87cdd
YG
9724 if (bnx2x_test_intr(bp) != 0) {
9725 buf[4] = 1;
9726 etest->flags |= ETH_TEST_FL_FAILED;
9727 }
9728 if (bp->port.pmf)
9729 if (bnx2x_link_test(bp) != 0) {
9730 buf[5] = 1;
9731 etest->flags |= ETH_TEST_FL_FAILED;
9732 }
f3c87cdd
YG
9733
9734#ifdef BNX2X_EXTRA_DEBUG
9735 bnx2x_panic_dump(bp);
9736#endif
a2fbb9ea
ET
9737}
9738
de832a55
EG
9739static const struct {
9740 long offset;
9741 int size;
9742 u8 string[ETH_GSTRING_LEN];
9743} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9744/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9745 { Q_STATS_OFFSET32(error_bytes_received_hi),
9746 8, "[%d]: rx_error_bytes" },
9747 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9748 8, "[%d]: rx_ucast_packets" },
9749 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9750 8, "[%d]: rx_mcast_packets" },
9751 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9752 8, "[%d]: rx_bcast_packets" },
9753 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9754 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9755 4, "[%d]: rx_phy_ip_err_discards"},
9756 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9757 4, "[%d]: rx_skb_alloc_discard" },
9758 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9759
9760/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9761 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9762 8, "[%d]: tx_packets" }
9763};
9764
bb2a0f7a
YG
9765static const struct {
9766 long offset;
9767 int size;
9768 u32 flags;
66e855f3
YG
9769#define STATS_FLAGS_PORT 1
9770#define STATS_FLAGS_FUNC 2
de832a55 9771#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9772 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9773} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9774/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9775 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9776 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9777 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9778 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9779 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9780 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9781 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9782 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9783 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9784 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9785 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9786 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9787 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9788 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9789 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9790 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9791 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9792/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9793 8, STATS_FLAGS_PORT, "rx_fragments" },
9794 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9795 8, STATS_FLAGS_PORT, "rx_jabbers" },
9796 { STATS_OFFSET32(no_buff_discard_hi),
9797 8, STATS_FLAGS_BOTH, "rx_discards" },
9798 { STATS_OFFSET32(mac_filter_discard),
9799 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9800 { STATS_OFFSET32(xxoverflow_discard),
9801 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9802 { STATS_OFFSET32(brb_drop_hi),
9803 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9804 { STATS_OFFSET32(brb_truncate_hi),
9805 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9806 { STATS_OFFSET32(pause_frames_received_hi),
9807 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9808 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9809 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9810 { STATS_OFFSET32(nig_timer_max),
9811 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9812/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9813 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9814 { STATS_OFFSET32(rx_skb_alloc_failed),
9815 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9816 { STATS_OFFSET32(hw_csum_err),
9817 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9818
9819 { STATS_OFFSET32(total_bytes_transmitted_hi),
9820 8, STATS_FLAGS_BOTH, "tx_bytes" },
9821 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9822 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9823 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9824 8, STATS_FLAGS_BOTH, "tx_packets" },
9825 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9826 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9827 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9828 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9829 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9830 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9831 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9832 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9833/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9834 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9835 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9836 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9837 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9838 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9839 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9840 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9841 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9842 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9843 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9844 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9845 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9846 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9847 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9848 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9849 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9850 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9851 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9852 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9853/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9854 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9855 { STATS_OFFSET32(pause_frames_sent_hi),
9856 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9857};
9858
de832a55
EG
9859#define IS_PORT_STAT(i) \
9860 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9861#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9862#define IS_E1HMF_MODE_STAT(bp) \
9863 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9864
a2fbb9ea
ET
9865static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9866{
bb2a0f7a 9867 struct bnx2x *bp = netdev_priv(dev);
de832a55 9868 int i, j, k;
bb2a0f7a 9869
a2fbb9ea
ET
9870 switch (stringset) {
9871 case ETH_SS_STATS:
de832a55
EG
9872 if (is_multi(bp)) {
9873 k = 0;
9874 for_each_queue(bp, i) {
9875 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9876 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9877 bnx2x_q_stats_arr[j].string, i);
9878 k += BNX2X_NUM_Q_STATS;
9879 }
9880 if (IS_E1HMF_MODE_STAT(bp))
9881 break;
9882 for (j = 0; j < BNX2X_NUM_STATS; j++)
9883 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9884 bnx2x_stats_arr[j].string);
9885 } else {
9886 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9887 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9888 continue;
9889 strcpy(buf + j*ETH_GSTRING_LEN,
9890 bnx2x_stats_arr[i].string);
9891 j++;
9892 }
bb2a0f7a 9893 }
a2fbb9ea
ET
9894 break;
9895
9896 case ETH_SS_TEST:
9897 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9898 break;
9899 }
9900}
9901
9902static int bnx2x_get_stats_count(struct net_device *dev)
9903{
bb2a0f7a 9904 struct bnx2x *bp = netdev_priv(dev);
de832a55 9905 int i, num_stats;
bb2a0f7a 9906
de832a55
EG
9907 if (is_multi(bp)) {
9908 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9909 if (!IS_E1HMF_MODE_STAT(bp))
9910 num_stats += BNX2X_NUM_STATS;
9911 } else {
9912 if (IS_E1HMF_MODE_STAT(bp)) {
9913 num_stats = 0;
9914 for (i = 0; i < BNX2X_NUM_STATS; i++)
9915 if (IS_FUNC_STAT(i))
9916 num_stats++;
9917 } else
9918 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9919 }
de832a55 9920
bb2a0f7a 9921 return num_stats;
a2fbb9ea
ET
9922}
9923
9924static void bnx2x_get_ethtool_stats(struct net_device *dev,
9925 struct ethtool_stats *stats, u64 *buf)
9926{
9927 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9928 u32 *hw_stats, *offset;
9929 int i, j, k;
bb2a0f7a 9930
de832a55
EG
9931 if (is_multi(bp)) {
9932 k = 0;
9933 for_each_queue(bp, i) {
9934 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9935 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9936 if (bnx2x_q_stats_arr[j].size == 0) {
9937 /* skip this counter */
9938 buf[k + j] = 0;
9939 continue;
9940 }
9941 offset = (hw_stats +
9942 bnx2x_q_stats_arr[j].offset);
9943 if (bnx2x_q_stats_arr[j].size == 4) {
9944 /* 4-byte counter */
9945 buf[k + j] = (u64) *offset;
9946 continue;
9947 }
9948 /* 8-byte counter */
9949 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9950 }
9951 k += BNX2X_NUM_Q_STATS;
9952 }
9953 if (IS_E1HMF_MODE_STAT(bp))
9954 return;
9955 hw_stats = (u32 *)&bp->eth_stats;
9956 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9957 if (bnx2x_stats_arr[j].size == 0) {
9958 /* skip this counter */
9959 buf[k + j] = 0;
9960 continue;
9961 }
9962 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9963 if (bnx2x_stats_arr[j].size == 4) {
9964 /* 4-byte counter */
9965 buf[k + j] = (u64) *offset;
9966 continue;
9967 }
9968 /* 8-byte counter */
9969 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9970 }
de832a55
EG
9971 } else {
9972 hw_stats = (u32 *)&bp->eth_stats;
9973 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9974 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9975 continue;
9976 if (bnx2x_stats_arr[i].size == 0) {
9977 /* skip this counter */
9978 buf[j] = 0;
9979 j++;
9980 continue;
9981 }
9982 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9983 if (bnx2x_stats_arr[i].size == 4) {
9984 /* 4-byte counter */
9985 buf[j] = (u64) *offset;
9986 j++;
9987 continue;
9988 }
9989 /* 8-byte counter */
9990 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9991 j++;
a2fbb9ea 9992 }
a2fbb9ea
ET
9993 }
9994}
9995
9996static int bnx2x_phys_id(struct net_device *dev, u32 data)
9997{
9998 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9999 int port = BP_PORT(bp);
a2fbb9ea
ET
10000 int i;
10001
34f80b04
EG
10002 if (!netif_running(dev))
10003 return 0;
10004
10005 if (!bp->port.pmf)
10006 return 0;
10007
a2fbb9ea
ET
10008 if (data == 0)
10009 data = 2;
10010
10011 for (i = 0; i < (data * 2); i++) {
c18487ee 10012 if ((i % 2) == 0)
34f80b04 10013 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10014 bp->link_params.hw_led_mode,
10015 bp->link_params.chip_id);
10016 else
34f80b04 10017 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10018 bp->link_params.hw_led_mode,
10019 bp->link_params.chip_id);
10020
a2fbb9ea
ET
10021 msleep_interruptible(500);
10022 if (signal_pending(current))
10023 break;
10024 }
10025
c18487ee 10026 if (bp->link_vars.link_up)
34f80b04 10027 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10028 bp->link_vars.line_speed,
10029 bp->link_params.hw_led_mode,
10030 bp->link_params.chip_id);
a2fbb9ea
ET
10031
10032 return 0;
10033}
10034
10035static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10036 .get_settings = bnx2x_get_settings,
10037 .set_settings = bnx2x_set_settings,
10038 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10039 .get_regs_len = bnx2x_get_regs_len,
10040 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10041 .get_wol = bnx2x_get_wol,
10042 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10043 .get_msglevel = bnx2x_get_msglevel,
10044 .set_msglevel = bnx2x_set_msglevel,
10045 .nway_reset = bnx2x_nway_reset,
01e53298 10046 .get_link = bnx2x_get_link,
7a9b2557
VZ
10047 .get_eeprom_len = bnx2x_get_eeprom_len,
10048 .get_eeprom = bnx2x_get_eeprom,
10049 .set_eeprom = bnx2x_set_eeprom,
10050 .get_coalesce = bnx2x_get_coalesce,
10051 .set_coalesce = bnx2x_set_coalesce,
10052 .get_ringparam = bnx2x_get_ringparam,
10053 .set_ringparam = bnx2x_set_ringparam,
10054 .get_pauseparam = bnx2x_get_pauseparam,
10055 .set_pauseparam = bnx2x_set_pauseparam,
10056 .get_rx_csum = bnx2x_get_rx_csum,
10057 .set_rx_csum = bnx2x_set_rx_csum,
10058 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10059 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10060 .set_flags = bnx2x_set_flags,
10061 .get_flags = ethtool_op_get_flags,
10062 .get_sg = ethtool_op_get_sg,
10063 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10064 .get_tso = ethtool_op_get_tso,
10065 .set_tso = bnx2x_set_tso,
10066 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10067 .self_test = bnx2x_self_test,
10068 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10069 .phys_id = bnx2x_phys_id,
10070 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10071 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10072};
10073
10074/* end of ethtool_ops */
10075
10076/****************************************************************************
10077* General service functions
10078****************************************************************************/
10079
10080static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10081{
10082 u16 pmcsr;
10083
10084 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10085
10086 switch (state) {
10087 case PCI_D0:
34f80b04 10088 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10089 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10090 PCI_PM_CTRL_PME_STATUS));
10091
10092 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10093 /* delay required during transition out of D3hot */
a2fbb9ea 10094 msleep(20);
34f80b04 10095 break;
a2fbb9ea 10096
34f80b04
EG
10097 case PCI_D3hot:
10098 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10099 pmcsr |= 3;
a2fbb9ea 10100
34f80b04
EG
10101 if (bp->wol)
10102 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10103
34f80b04
EG
10104 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10105 pmcsr);
a2fbb9ea 10106
34f80b04
EG
10107 /* No more memory access after this point until
10108 * device is brought back to D0.
10109 */
10110 break;
10111
10112 default:
10113 return -EINVAL;
10114 }
10115 return 0;
a2fbb9ea
ET
10116}
10117
237907c1
EG
10118static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10119{
10120 u16 rx_cons_sb;
10121
10122 /* Tell compiler that status block fields can change */
10123 barrier();
10124 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10125 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10126 rx_cons_sb++;
10127 return (fp->rx_comp_cons != rx_cons_sb);
10128}
10129
34f80b04
EG
10130/*
10131 * net_device service functions
10132 */
10133
a2fbb9ea
ET
10134static int bnx2x_poll(struct napi_struct *napi, int budget)
10135{
10136 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10137 napi);
10138 struct bnx2x *bp = fp->bp;
10139 int work_done = 0;
10140
10141#ifdef BNX2X_STOP_ON_ERROR
10142 if (unlikely(bp->panic))
34f80b04 10143 goto poll_panic;
a2fbb9ea
ET
10144#endif
10145
10146 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10147 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10148 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10149
10150 bnx2x_update_fpsb_idx(fp);
10151
237907c1 10152 if (bnx2x_has_tx_work(fp))
7961f791 10153 bnx2x_tx_int(fp);
a2fbb9ea 10154
8534f32c 10155 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10156 work_done = bnx2x_rx_int(fp, budget);
356e2385 10157
8534f32c
EG
10158 /* must not complete if we consumed full budget */
10159 if (work_done >= budget)
10160 goto poll_again;
10161 }
a2fbb9ea 10162
8534f32c
EG
10163 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10164 * ensure that status block indices have been actually read
10165 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10166 * so that we won't write the "newer" value of the status block to IGU
10167 * (if there was a DMA right after BNX2X_HAS_WORK and
10168 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10169 * may be postponed to right before bnx2x_ack_sb). In this case
10170 * there will never be another interrupt until there is another update
10171 * of the status block, while there is still unhandled work.
10172 */
10173 rmb();
a2fbb9ea 10174
8534f32c 10175 if (!BNX2X_HAS_WORK(fp)) {
a2fbb9ea 10176#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10177poll_panic:
a2fbb9ea 10178#endif
288379f0 10179 napi_complete(napi);
a2fbb9ea 10180
0626b899 10181 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10182 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10183 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10184 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10185 }
356e2385 10186
8534f32c 10187poll_again:
a2fbb9ea
ET
10188 return work_done;
10189}
10190
755735eb
EG
10191
10192/* we split the first BD into headers and data BDs
33471629 10193 * to ease the pain of our fellow microcode engineers
755735eb
EG
10194 * we use one mapping for both BDs
10195 * So far this has only been observed to happen
10196 * in Other Operating Systems(TM)
10197 */
10198static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10199 struct bnx2x_fastpath *fp,
10200 struct eth_tx_bd **tx_bd, u16 hlen,
10201 u16 bd_prod, int nbd)
10202{
10203 struct eth_tx_bd *h_tx_bd = *tx_bd;
10204 struct eth_tx_bd *d_tx_bd;
10205 dma_addr_t mapping;
10206 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10207
10208 /* first fix first BD */
10209 h_tx_bd->nbd = cpu_to_le16(nbd);
10210 h_tx_bd->nbytes = cpu_to_le16(hlen);
10211
10212 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10213 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10214 h_tx_bd->addr_lo, h_tx_bd->nbd);
10215
10216 /* now get a new data BD
10217 * (after the pbd) and fill it */
10218 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10219 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10220
10221 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10222 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10223
10224 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10225 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10226 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10227 d_tx_bd->vlan = 0;
10228 /* this marks the BD as one that has no individual mapping
10229 * the FW ignores this flag in a BD not marked start
10230 */
10231 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10232 DP(NETIF_MSG_TX_QUEUED,
10233 "TSO split data size is %d (%x:%x)\n",
10234 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10235
10236 /* update tx_bd for marking the last BD flag */
10237 *tx_bd = d_tx_bd;
10238
10239 return bd_prod;
10240}
10241
10242static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10243{
10244 if (fix > 0)
10245 csum = (u16) ~csum_fold(csum_sub(csum,
10246 csum_partial(t_header - fix, fix, 0)));
10247
10248 else if (fix < 0)
10249 csum = (u16) ~csum_fold(csum_add(csum,
10250 csum_partial(t_header, -fix, 0)));
10251
10252 return swab16(csum);
10253}
10254
10255static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10256{
10257 u32 rc;
10258
10259 if (skb->ip_summed != CHECKSUM_PARTIAL)
10260 rc = XMIT_PLAIN;
10261
10262 else {
4781bfad 10263 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10264 rc = XMIT_CSUM_V6;
10265 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10266 rc |= XMIT_CSUM_TCP;
10267
10268 } else {
10269 rc = XMIT_CSUM_V4;
10270 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10271 rc |= XMIT_CSUM_TCP;
10272 }
10273 }
10274
10275 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10276 rc |= XMIT_GSO_V4;
10277
10278 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10279 rc |= XMIT_GSO_V6;
10280
10281 return rc;
10282}
10283
632da4d6 10284#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10285/* check if packet requires linearization (packet is too fragmented)
10286 no need to check fragmentation if page size > 8K (there will be no
10287 violation to FW restrictions) */
755735eb
EG
10288static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10289 u32 xmit_type)
10290{
10291 int to_copy = 0;
10292 int hlen = 0;
10293 int first_bd_sz = 0;
10294
10295 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10296 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10297
10298 if (xmit_type & XMIT_GSO) {
10299 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10300 /* Check if LSO packet needs to be copied:
10301 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10302 int wnd_size = MAX_FETCH_BD - 3;
33471629 10303 /* Number of windows to check */
755735eb
EG
10304 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10305 int wnd_idx = 0;
10306 int frag_idx = 0;
10307 u32 wnd_sum = 0;
10308
10309 /* Headers length */
10310 hlen = (int)(skb_transport_header(skb) - skb->data) +
10311 tcp_hdrlen(skb);
10312
10313 /* Amount of data (w/o headers) on linear part of SKB*/
10314 first_bd_sz = skb_headlen(skb) - hlen;
10315
10316 wnd_sum = first_bd_sz;
10317
10318 /* Calculate the first sum - it's special */
10319 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10320 wnd_sum +=
10321 skb_shinfo(skb)->frags[frag_idx].size;
10322
10323 /* If there was data on linear skb data - check it */
10324 if (first_bd_sz > 0) {
10325 if (unlikely(wnd_sum < lso_mss)) {
10326 to_copy = 1;
10327 goto exit_lbl;
10328 }
10329
10330 wnd_sum -= first_bd_sz;
10331 }
10332
10333 /* Others are easier: run through the frag list and
10334 check all windows */
10335 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10336 wnd_sum +=
10337 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10338
10339 if (unlikely(wnd_sum < lso_mss)) {
10340 to_copy = 1;
10341 break;
10342 }
10343 wnd_sum -=
10344 skb_shinfo(skb)->frags[wnd_idx].size;
10345 }
755735eb
EG
10346 } else {
10347 /* in non-LSO too fragmented packet should always
10348 be linearized */
10349 to_copy = 1;
10350 }
10351 }
10352
10353exit_lbl:
10354 if (unlikely(to_copy))
10355 DP(NETIF_MSG_TX_QUEUED,
10356 "Linearization IS REQUIRED for %s packet. "
10357 "num_frags %d hlen %d first_bd_sz %d\n",
10358 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10359 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10360
10361 return to_copy;
10362}
632da4d6 10363#endif
755735eb
EG
10364
10365/* called with netif_tx_lock
a2fbb9ea 10366 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10367 * netif_wake_queue()
a2fbb9ea
ET
10368 */
10369static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10370{
10371 struct bnx2x *bp = netdev_priv(dev);
10372 struct bnx2x_fastpath *fp;
555f6c78 10373 struct netdev_queue *txq;
a2fbb9ea
ET
10374 struct sw_tx_bd *tx_buf;
10375 struct eth_tx_bd *tx_bd;
10376 struct eth_tx_parse_bd *pbd = NULL;
10377 u16 pkt_prod, bd_prod;
755735eb 10378 int nbd, fp_index;
a2fbb9ea 10379 dma_addr_t mapping;
755735eb
EG
10380 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10381 int vlan_off = (bp->e1hov ? 4 : 0);
10382 int i;
10383 u8 hlen = 0;
a2fbb9ea
ET
10384
10385#ifdef BNX2X_STOP_ON_ERROR
10386 if (unlikely(bp->panic))
10387 return NETDEV_TX_BUSY;
10388#endif
10389
555f6c78
EG
10390 fp_index = skb_get_queue_mapping(skb);
10391 txq = netdev_get_tx_queue(dev, fp_index);
10392
a2fbb9ea 10393 fp = &bp->fp[fp_index];
755735eb 10394
231fd58a 10395 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10396 fp->eth_q_stats.driver_xoff++,
555f6c78 10397 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10398 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10399 return NETDEV_TX_BUSY;
10400 }
10401
755735eb
EG
10402 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10403 " gso type %x xmit_type %x\n",
10404 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10405 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10406
632da4d6 10407#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10408 /* First, check if we need to linearize the skb (due to FW
10409 restrictions). No need to check fragmentation if page size > 8K
10410 (there will be no violation to FW restrictions) */
755735eb
EG
10411 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10412 /* Statistics of linearization */
10413 bp->lin_cnt++;
10414 if (skb_linearize(skb) != 0) {
10415 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10416 "silently dropping this SKB\n");
10417 dev_kfree_skb_any(skb);
da5a662a 10418 return NETDEV_TX_OK;
755735eb
EG
10419 }
10420 }
632da4d6 10421#endif
755735eb 10422
a2fbb9ea 10423 /*
755735eb 10424 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10425 then for TSO or xsum we have a parsing info BD,
755735eb 10426 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10427 (don't forget to mark the last one as last,
10428 and to unmap only AFTER you write to the BD ...)
755735eb 10429 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10430 */
10431
10432 pkt_prod = fp->tx_pkt_prod++;
755735eb 10433 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10434
755735eb 10435 /* get a tx_buf and first BD */
a2fbb9ea
ET
10436 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10437 tx_bd = &fp->tx_desc_ring[bd_prod];
10438
10439 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10440 tx_bd->general_data = (UNICAST_ADDRESS <<
10441 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10442 /* header nbd */
10443 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10444
755735eb
EG
10445 /* remember the first BD of the packet */
10446 tx_buf->first_bd = fp->tx_bd_prod;
10447 tx_buf->skb = skb;
a2fbb9ea
ET
10448
10449 DP(NETIF_MSG_TX_QUEUED,
10450 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10451 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10452
0c6671b0
EG
10453#ifdef BCM_VLAN
10454 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10455 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10456 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10457 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10458 vlan_off += 4;
10459 } else
0c6671b0 10460#endif
755735eb 10461 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10462
755735eb 10463 if (xmit_type) {
755735eb 10464 /* turn on parsing and get a BD */
a2fbb9ea
ET
10465 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10466 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10467
10468 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10469 }
10470
10471 if (xmit_type & XMIT_CSUM) {
10472 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10473
10474 /* for now NS flag is not used in Linux */
4781bfad
EG
10475 pbd->global_data =
10476 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10477 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10478
755735eb
EG
10479 pbd->ip_hlen = (skb_transport_header(skb) -
10480 skb_network_header(skb)) / 2;
10481
10482 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10483
755735eb
EG
10484 pbd->total_hlen = cpu_to_le16(hlen);
10485 hlen = hlen*2 - vlan_off;
a2fbb9ea 10486
755735eb
EG
10487 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10488
10489 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10490 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10491 ETH_TX_BD_FLAGS_IP_CSUM;
10492 else
10493 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10494
10495 if (xmit_type & XMIT_CSUM_TCP) {
10496 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10497
10498 } else {
10499 s8 fix = SKB_CS_OFF(skb); /* signed! */
10500
a2fbb9ea 10501 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10502 pbd->cs_offset = fix / 2;
a2fbb9ea 10503
755735eb
EG
10504 DP(NETIF_MSG_TX_QUEUED,
10505 "hlen %d offset %d fix %d csum before fix %x\n",
10506 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10507 SKB_CS(skb));
10508
10509 /* HW bug: fixup the CSUM */
10510 pbd->tcp_pseudo_csum =
10511 bnx2x_csum_fix(skb_transport_header(skb),
10512 SKB_CS(skb), fix);
10513
10514 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10515 pbd->tcp_pseudo_csum);
10516 }
a2fbb9ea
ET
10517 }
10518
10519 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10520 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10521
10522 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10523 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10524 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10525 tx_bd->nbd = cpu_to_le16(nbd);
10526 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10527
10528 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10529 " nbytes %d flags %x vlan %x\n",
10530 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10531 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10532 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10533
755735eb 10534 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10535
10536 DP(NETIF_MSG_TX_QUEUED,
10537 "TSO packet len %d hlen %d total len %d tso size %d\n",
10538 skb->len, hlen, skb_headlen(skb),
10539 skb_shinfo(skb)->gso_size);
10540
10541 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10542
755735eb
EG
10543 if (unlikely(skb_headlen(skb) > hlen))
10544 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10545 bd_prod, ++nbd);
a2fbb9ea
ET
10546
10547 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10548 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10549 pbd->tcp_flags = pbd_tcp_flags(skb);
10550
10551 if (xmit_type & XMIT_GSO_V4) {
10552 pbd->ip_id = swab16(ip_hdr(skb)->id);
10553 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10554 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10555 ip_hdr(skb)->daddr,
10556 0, IPPROTO_TCP, 0));
755735eb
EG
10557
10558 } else
10559 pbd->tcp_pseudo_csum =
10560 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10561 &ipv6_hdr(skb)->daddr,
10562 0, IPPROTO_TCP, 0));
10563
a2fbb9ea
ET
10564 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10565 }
10566
755735eb
EG
10567 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10568 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10569
755735eb
EG
10570 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10571 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10572
755735eb
EG
10573 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10574 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10575
755735eb
EG
10576 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10577 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10578 tx_bd->nbytes = cpu_to_le16(frag->size);
10579 tx_bd->vlan = cpu_to_le16(pkt_prod);
10580 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10581
755735eb
EG
10582 DP(NETIF_MSG_TX_QUEUED,
10583 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10584 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10585 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10586 }
10587
755735eb 10588 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10589 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10590
10591 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10592 tx_bd, tx_bd->bd_flags.as_bitfield);
10593
a2fbb9ea
ET
10594 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10595
755735eb 10596 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10597 * if the packet contains or ends with it
10598 */
10599 if (TX_BD_POFF(bd_prod) < nbd)
10600 nbd++;
10601
10602 if (pbd)
10603 DP(NETIF_MSG_TX_QUEUED,
10604 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10605 " tcp_flags %x xsum %x seq %u hlen %u\n",
10606 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10607 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10608 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10609
755735eb 10610 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10611
58f4c4cf
EG
10612 /*
10613 * Make sure that the BD data is updated before updating the producer
10614 * since FW might read the BD right after the producer is updated.
10615 * This is only applicable for weak-ordered memory model archs such
10616 * as IA-64. The following barrier is also mandatory since FW will
10617 * assumes packets must have BDs.
10618 */
10619 wmb();
10620
4781bfad 10621 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10622 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10623 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10624 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10625
10626 mmiowb();
10627
755735eb 10628 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10629
10630 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10631 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10632 if we put Tx into XOFF state. */
10633 smp_mb();
555f6c78 10634 netif_tx_stop_queue(txq);
de832a55 10635 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10636 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10637 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10638 }
10639 fp->tx_pkt++;
10640
10641 return NETDEV_TX_OK;
10642}
10643
bb2a0f7a 10644/* called with rtnl_lock */
a2fbb9ea
ET
10645static int bnx2x_open(struct net_device *dev)
10646{
10647 struct bnx2x *bp = netdev_priv(dev);
10648
6eccabb3
EG
10649 netif_carrier_off(dev);
10650
a2fbb9ea
ET
10651 bnx2x_set_power_state(bp, PCI_D0);
10652
bb2a0f7a 10653 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10654}
10655
bb2a0f7a 10656/* called with rtnl_lock */
a2fbb9ea
ET
10657static int bnx2x_close(struct net_device *dev)
10658{
a2fbb9ea
ET
10659 struct bnx2x *bp = netdev_priv(dev);
10660
10661 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10662 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10663 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10664 if (!CHIP_REV_IS_SLOW(bp))
10665 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10666
10667 return 0;
10668}
10669
f5372251 10670/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10671static void bnx2x_set_rx_mode(struct net_device *dev)
10672{
10673 struct bnx2x *bp = netdev_priv(dev);
10674 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10675 int port = BP_PORT(bp);
10676
10677 if (bp->state != BNX2X_STATE_OPEN) {
10678 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10679 return;
10680 }
10681
10682 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10683
10684 if (dev->flags & IFF_PROMISC)
10685 rx_mode = BNX2X_RX_MODE_PROMISC;
10686
10687 else if ((dev->flags & IFF_ALLMULTI) ||
10688 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10689 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10690
10691 else { /* some multicasts */
10692 if (CHIP_IS_E1(bp)) {
10693 int i, old, offset;
10694 struct dev_mc_list *mclist;
10695 struct mac_configuration_cmd *config =
10696 bnx2x_sp(bp, mcast_config);
10697
10698 for (i = 0, mclist = dev->mc_list;
10699 mclist && (i < dev->mc_count);
10700 i++, mclist = mclist->next) {
10701
10702 config->config_table[i].
10703 cam_entry.msb_mac_addr =
10704 swab16(*(u16 *)&mclist->dmi_addr[0]);
10705 config->config_table[i].
10706 cam_entry.middle_mac_addr =
10707 swab16(*(u16 *)&mclist->dmi_addr[2]);
10708 config->config_table[i].
10709 cam_entry.lsb_mac_addr =
10710 swab16(*(u16 *)&mclist->dmi_addr[4]);
10711 config->config_table[i].cam_entry.flags =
10712 cpu_to_le16(port);
10713 config->config_table[i].
10714 target_table_entry.flags = 0;
10715 config->config_table[i].
10716 target_table_entry.client_id = 0;
10717 config->config_table[i].
10718 target_table_entry.vlan_id = 0;
10719
10720 DP(NETIF_MSG_IFUP,
10721 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10722 config->config_table[i].
10723 cam_entry.msb_mac_addr,
10724 config->config_table[i].
10725 cam_entry.middle_mac_addr,
10726 config->config_table[i].
10727 cam_entry.lsb_mac_addr);
10728 }
8d9c5f34 10729 old = config->hdr.length;
34f80b04
EG
10730 if (old > i) {
10731 for (; i < old; i++) {
10732 if (CAM_IS_INVALID(config->
10733 config_table[i])) {
af246401 10734 /* already invalidated */
34f80b04
EG
10735 break;
10736 }
10737 /* invalidate */
10738 CAM_INVALIDATE(config->
10739 config_table[i]);
10740 }
10741 }
10742
10743 if (CHIP_REV_IS_SLOW(bp))
10744 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10745 else
10746 offset = BNX2X_MAX_MULTICAST*(1 + port);
10747
8d9c5f34 10748 config->hdr.length = i;
34f80b04 10749 config->hdr.offset = offset;
8d9c5f34 10750 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10751 config->hdr.reserved1 = 0;
10752
10753 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10754 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10755 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10756 0);
10757 } else { /* E1H */
10758 /* Accept one or more multicasts */
10759 struct dev_mc_list *mclist;
10760 u32 mc_filter[MC_HASH_SIZE];
10761 u32 crc, bit, regidx;
10762 int i;
10763
10764 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10765
10766 for (i = 0, mclist = dev->mc_list;
10767 mclist && (i < dev->mc_count);
10768 i++, mclist = mclist->next) {
10769
7c510e4b
JB
10770 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10771 mclist->dmi_addr);
34f80b04
EG
10772
10773 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10774 bit = (crc >> 24) & 0xff;
10775 regidx = bit >> 5;
10776 bit &= 0x1f;
10777 mc_filter[regidx] |= (1 << bit);
10778 }
10779
10780 for (i = 0; i < MC_HASH_SIZE; i++)
10781 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10782 mc_filter[i]);
10783 }
10784 }
10785
10786 bp->rx_mode = rx_mode;
10787 bnx2x_set_storm_rx_mode(bp);
10788}
10789
10790/* called with rtnl_lock */
a2fbb9ea
ET
10791static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10792{
10793 struct sockaddr *addr = p;
10794 struct bnx2x *bp = netdev_priv(dev);
10795
34f80b04 10796 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10797 return -EINVAL;
10798
10799 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10800 if (netif_running(dev)) {
10801 if (CHIP_IS_E1(bp))
3101c2bc 10802 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10803 else
3101c2bc 10804 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10805 }
a2fbb9ea
ET
10806
10807 return 0;
10808}
10809
c18487ee 10810/* called with rtnl_lock */
a2fbb9ea
ET
10811static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10812{
10813 struct mii_ioctl_data *data = if_mii(ifr);
10814 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10815 int port = BP_PORT(bp);
a2fbb9ea
ET
10816 int err;
10817
10818 switch (cmd) {
10819 case SIOCGMIIPHY:
34f80b04 10820 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10821
c14423fe 10822 /* fallthrough */
c18487ee 10823
a2fbb9ea 10824 case SIOCGMIIREG: {
c18487ee 10825 u16 mii_regval;
a2fbb9ea 10826
c18487ee
YR
10827 if (!netif_running(dev))
10828 return -EAGAIN;
a2fbb9ea 10829
34f80b04 10830 mutex_lock(&bp->port.phy_mutex);
3196a88a 10831 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10832 DEFAULT_PHY_DEV_ADDR,
10833 (data->reg_num & 0x1f), &mii_regval);
10834 data->val_out = mii_regval;
34f80b04 10835 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10836 return err;
10837 }
10838
10839 case SIOCSMIIREG:
10840 if (!capable(CAP_NET_ADMIN))
10841 return -EPERM;
10842
c18487ee
YR
10843 if (!netif_running(dev))
10844 return -EAGAIN;
10845
34f80b04 10846 mutex_lock(&bp->port.phy_mutex);
3196a88a 10847 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10848 DEFAULT_PHY_DEV_ADDR,
10849 (data->reg_num & 0x1f), data->val_in);
34f80b04 10850 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10851 return err;
10852
10853 default:
10854 /* do nothing */
10855 break;
10856 }
10857
10858 return -EOPNOTSUPP;
10859}
10860
34f80b04 10861/* called with rtnl_lock */
a2fbb9ea
ET
10862static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10863{
10864 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10865 int rc = 0;
a2fbb9ea
ET
10866
10867 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10868 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10869 return -EINVAL;
10870
10871 /* This does not race with packet allocation
c14423fe 10872 * because the actual alloc size is
a2fbb9ea
ET
10873 * only updated as part of load
10874 */
10875 dev->mtu = new_mtu;
10876
10877 if (netif_running(dev)) {
34f80b04
EG
10878 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10879 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10880 }
34f80b04
EG
10881
10882 return rc;
a2fbb9ea
ET
10883}
10884
10885static void bnx2x_tx_timeout(struct net_device *dev)
10886{
10887 struct bnx2x *bp = netdev_priv(dev);
10888
10889#ifdef BNX2X_STOP_ON_ERROR
10890 if (!bp->panic)
10891 bnx2x_panic();
10892#endif
10893 /* This allows the netif to be shutdown gracefully before resetting */
10894 schedule_work(&bp->reset_task);
10895}
10896
10897#ifdef BCM_VLAN
34f80b04 10898/* called with rtnl_lock */
a2fbb9ea
ET
10899static void bnx2x_vlan_rx_register(struct net_device *dev,
10900 struct vlan_group *vlgrp)
10901{
10902 struct bnx2x *bp = netdev_priv(dev);
10903
10904 bp->vlgrp = vlgrp;
0c6671b0
EG
10905
10906 /* Set flags according to the required capabilities */
10907 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10908
10909 if (dev->features & NETIF_F_HW_VLAN_TX)
10910 bp->flags |= HW_VLAN_TX_FLAG;
10911
10912 if (dev->features & NETIF_F_HW_VLAN_RX)
10913 bp->flags |= HW_VLAN_RX_FLAG;
10914
a2fbb9ea 10915 if (netif_running(dev))
49d66772 10916 bnx2x_set_client_config(bp);
a2fbb9ea 10917}
34f80b04 10918
a2fbb9ea
ET
10919#endif
10920
10921#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10922static void poll_bnx2x(struct net_device *dev)
10923{
10924 struct bnx2x *bp = netdev_priv(dev);
10925
10926 disable_irq(bp->pdev->irq);
10927 bnx2x_interrupt(bp->pdev->irq, dev);
10928 enable_irq(bp->pdev->irq);
10929}
10930#endif
10931
c64213cd
SH
10932static const struct net_device_ops bnx2x_netdev_ops = {
10933 .ndo_open = bnx2x_open,
10934 .ndo_stop = bnx2x_close,
10935 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 10936 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
10937 .ndo_set_mac_address = bnx2x_change_mac_addr,
10938 .ndo_validate_addr = eth_validate_addr,
10939 .ndo_do_ioctl = bnx2x_ioctl,
10940 .ndo_change_mtu = bnx2x_change_mtu,
10941 .ndo_tx_timeout = bnx2x_tx_timeout,
10942#ifdef BCM_VLAN
10943 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10944#endif
10945#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10946 .ndo_poll_controller = poll_bnx2x,
10947#endif
10948};
10949
34f80b04
EG
10950static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10951 struct net_device *dev)
a2fbb9ea
ET
10952{
10953 struct bnx2x *bp;
10954 int rc;
10955
10956 SET_NETDEV_DEV(dev, &pdev->dev);
10957 bp = netdev_priv(dev);
10958
34f80b04
EG
10959 bp->dev = dev;
10960 bp->pdev = pdev;
a2fbb9ea 10961 bp->flags = 0;
34f80b04 10962 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10963
10964 rc = pci_enable_device(pdev);
10965 if (rc) {
10966 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10967 goto err_out;
10968 }
10969
10970 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10971 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10972 " aborting\n");
10973 rc = -ENODEV;
10974 goto err_out_disable;
10975 }
10976
10977 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10978 printk(KERN_ERR PFX "Cannot find second PCI device"
10979 " base address, aborting\n");
10980 rc = -ENODEV;
10981 goto err_out_disable;
10982 }
10983
34f80b04
EG
10984 if (atomic_read(&pdev->enable_cnt) == 1) {
10985 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10986 if (rc) {
10987 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10988 " aborting\n");
10989 goto err_out_disable;
10990 }
a2fbb9ea 10991
34f80b04
EG
10992 pci_set_master(pdev);
10993 pci_save_state(pdev);
10994 }
a2fbb9ea
ET
10995
10996 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10997 if (bp->pm_cap == 0) {
10998 printk(KERN_ERR PFX "Cannot find power management"
10999 " capability, aborting\n");
11000 rc = -EIO;
11001 goto err_out_release;
11002 }
11003
11004 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11005 if (bp->pcie_cap == 0) {
11006 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11007 " aborting\n");
11008 rc = -EIO;
11009 goto err_out_release;
11010 }
11011
6a35528a 11012 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11013 bp->flags |= USING_DAC_FLAG;
6a35528a 11014 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11015 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11016 " failed, aborting\n");
11017 rc = -EIO;
11018 goto err_out_release;
11019 }
11020
284901a9 11021 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11022 printk(KERN_ERR PFX "System does not support DMA,"
11023 " aborting\n");
11024 rc = -EIO;
11025 goto err_out_release;
11026 }
11027
34f80b04
EG
11028 dev->mem_start = pci_resource_start(pdev, 0);
11029 dev->base_addr = dev->mem_start;
11030 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11031
11032 dev->irq = pdev->irq;
11033
275f165f 11034 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11035 if (!bp->regview) {
11036 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11037 rc = -ENOMEM;
11038 goto err_out_release;
11039 }
11040
34f80b04
EG
11041 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11042 min_t(u64, BNX2X_DB_SIZE,
11043 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11044 if (!bp->doorbells) {
11045 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11046 rc = -ENOMEM;
11047 goto err_out_unmap;
11048 }
11049
11050 bnx2x_set_power_state(bp, PCI_D0);
11051
34f80b04
EG
11052 /* clean indirect addresses */
11053 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11054 PCICFG_VENDOR_ID_OFFSET);
11055 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11056 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11057 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11058 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11059
34f80b04 11060 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11061
c64213cd 11062 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11063 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11064 dev->features |= NETIF_F_SG;
11065 dev->features |= NETIF_F_HW_CSUM;
11066 if (bp->flags & USING_DAC_FLAG)
11067 dev->features |= NETIF_F_HIGHDMA;
11068#ifdef BCM_VLAN
11069 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11070 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
11071#endif
11072 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 11073 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
11074
11075 return 0;
11076
11077err_out_unmap:
11078 if (bp->regview) {
11079 iounmap(bp->regview);
11080 bp->regview = NULL;
11081 }
a2fbb9ea
ET
11082 if (bp->doorbells) {
11083 iounmap(bp->doorbells);
11084 bp->doorbells = NULL;
11085 }
11086
11087err_out_release:
34f80b04
EG
11088 if (atomic_read(&pdev->enable_cnt) == 1)
11089 pci_release_regions(pdev);
a2fbb9ea
ET
11090
11091err_out_disable:
11092 pci_disable_device(pdev);
11093 pci_set_drvdata(pdev, NULL);
11094
11095err_out:
11096 return rc;
11097}
11098
25047950
ET
11099static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11100{
11101 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11102
11103 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11104 return val;
11105}
11106
11107/* return value of 1=2.5GHz 2=5GHz */
11108static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11109{
11110 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11111
11112 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11113 return val;
11114}
94a78b79
VZ
11115static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11116{
11117 struct bnx2x_fw_file_hdr *fw_hdr;
11118 struct bnx2x_fw_file_section *sections;
11119 u16 *ops_offsets;
11120 u32 offset, len, num_ops;
11121 int i;
11122 const struct firmware *firmware = bp->firmware;
11123 const u8 * fw_ver;
11124
11125 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11126 return -EINVAL;
11127
11128 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11129 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11130
11131 /* Make sure none of the offsets and sizes make us read beyond
11132 * the end of the firmware data */
11133 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11134 offset = be32_to_cpu(sections[i].offset);
11135 len = be32_to_cpu(sections[i].len);
11136 if (offset + len > firmware->size) {
11137 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11138 return -EINVAL;
11139 }
11140 }
11141
11142 /* Likewise for the init_ops offsets */
11143 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11144 ops_offsets = (u16 *)(firmware->data + offset);
11145 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11146
11147 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11148 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11149 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11150 return -EINVAL;
11151 }
11152 }
11153
11154 /* Check FW version */
11155 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11156 fw_ver = firmware->data + offset;
11157 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11158 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11159 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11160 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11161 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11162 " Should be %d.%d.%d.%d\n",
11163 fw_ver[0], fw_ver[1], fw_ver[2],
11164 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11165 BCM_5710_FW_MINOR_VERSION,
11166 BCM_5710_FW_REVISION_VERSION,
11167 BCM_5710_FW_ENGINEERING_VERSION);
11168 return -EINVAL;
11169 }
11170
11171 return 0;
11172}
11173
11174static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11175{
11176 u32 i;
11177 const __be32 *source = (const __be32*)_source;
11178 u32 *target = (u32*)_target;
11179
11180 for (i = 0; i < n/4; i++)
11181 target[i] = be32_to_cpu(source[i]);
11182}
11183
11184/*
11185 Ops array is stored in the following format:
11186 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11187 */
11188static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11189{
11190 u32 i, j, tmp;
11191 const __be32 *source = (const __be32*)_source;
11192 struct raw_op *target = (struct raw_op*)_target;
11193
11194 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11195 tmp = be32_to_cpu(source[j]);
11196 target[i].op = (tmp >> 24) & 0xff;
11197 target[i].offset = tmp & 0xffffff;
11198 target[i].raw_data = be32_to_cpu(source[j+1]);
11199 }
11200}
11201static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11202{
11203 u32 i;
11204 u16 *target = (u16*)_target;
11205 const __be16 *source = (const __be16*)_source;
11206
11207 for (i = 0; i < n/2; i++)
11208 target[i] = be16_to_cpu(source[i]);
11209}
11210
11211#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11212 do { \
11213 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11214 bp->arr = kmalloc(len, GFP_KERNEL); \
11215 if (!bp->arr) { \
11216 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11217 goto lbl; \
11218 } \
11219 func(bp->firmware->data + \
11220 be32_to_cpu(fw_hdr->arr.offset), \
11221 (u8*)bp->arr, len); \
11222 } while (0)
11223
11224
11225static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11226{
11227 char fw_file_name[40] = {0};
11228 int rc, offset;
11229 struct bnx2x_fw_file_hdr *fw_hdr;
11230
11231 /* Create a FW file name */
11232 if (CHIP_IS_E1(bp))
11233 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11234 else
11235 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11236
11237 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11238 BCM_5710_FW_MAJOR_VERSION,
11239 BCM_5710_FW_MINOR_VERSION,
11240 BCM_5710_FW_REVISION_VERSION,
11241 BCM_5710_FW_ENGINEERING_VERSION);
11242
11243 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11244
11245 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11246 if (rc) {
11247 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11248 goto request_firmware_exit;
11249 }
11250
11251 rc = bnx2x_check_firmware(bp);
11252 if (rc) {
11253 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11254 goto request_firmware_exit;
11255 }
11256
11257 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11258
11259 /* Initialize the pointers to the init arrays */
11260 /* Blob */
11261 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11262
11263 /* Opcodes */
11264 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11265
11266 /* Offsets */
11267 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11268
11269 /* STORMs firmware */
11270 bp->tsem_int_table_data = bp->firmware->data +
11271 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11272 bp->tsem_pram_data = bp->firmware->data +
11273 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11274 bp->usem_int_table_data = bp->firmware->data +
11275 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11276 bp->usem_pram_data = bp->firmware->data +
11277 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11278 bp->xsem_int_table_data = bp->firmware->data +
11279 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11280 bp->xsem_pram_data = bp->firmware->data +
11281 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11282 bp->csem_int_table_data = bp->firmware->data +
11283 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11284 bp->csem_pram_data = bp->firmware->data +
11285 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11286
11287 return 0;
11288init_offsets_alloc_err:
11289 kfree(bp->init_ops);
11290init_ops_alloc_err:
11291 kfree(bp->init_data);
11292request_firmware_exit:
11293 release_firmware(bp->firmware);
11294
11295 return rc;
11296}
11297
11298
25047950 11299
a2fbb9ea
ET
11300static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11301 const struct pci_device_id *ent)
11302{
11303 static int version_printed;
11304 struct net_device *dev = NULL;
11305 struct bnx2x *bp;
25047950 11306 int rc;
a2fbb9ea
ET
11307
11308 if (version_printed++ == 0)
11309 printk(KERN_INFO "%s", version);
11310
11311 /* dev zeroed in init_etherdev */
555f6c78 11312 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11313 if (!dev) {
11314 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11315 return -ENOMEM;
34f80b04 11316 }
a2fbb9ea 11317
a2fbb9ea
ET
11318 bp = netdev_priv(dev);
11319 bp->msglevel = debug;
11320
34f80b04 11321 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11322 if (rc < 0) {
11323 free_netdev(dev);
11324 return rc;
11325 }
11326
a2fbb9ea
ET
11327 pci_set_drvdata(pdev, dev);
11328
34f80b04 11329 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11330 if (rc)
11331 goto init_one_exit;
11332
94a78b79
VZ
11333 /* Set init arrays */
11334 rc = bnx2x_init_firmware(bp, &pdev->dev);
11335 if (rc) {
11336 printk(KERN_ERR PFX "Error loading firmware\n");
11337 goto init_one_exit;
11338 }
11339
693fc0d1 11340 rc = register_netdev(dev);
34f80b04 11341 if (rc) {
693fc0d1 11342 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11343 goto init_one_exit;
11344 }
11345
25047950 11346 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11347 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11348 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11349 bnx2x_get_pcie_width(bp),
11350 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11351 dev->base_addr, bp->pdev->irq);
e174961c 11352 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11353
a2fbb9ea 11354 return 0;
34f80b04
EG
11355
11356init_one_exit:
11357 if (bp->regview)
11358 iounmap(bp->regview);
11359
11360 if (bp->doorbells)
11361 iounmap(bp->doorbells);
11362
11363 free_netdev(dev);
11364
11365 if (atomic_read(&pdev->enable_cnt) == 1)
11366 pci_release_regions(pdev);
11367
11368 pci_disable_device(pdev);
11369 pci_set_drvdata(pdev, NULL);
11370
11371 return rc;
a2fbb9ea
ET
11372}
11373
11374static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11375{
11376 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11377 struct bnx2x *bp;
11378
11379 if (!dev) {
228241eb
ET
11380 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11381 return;
11382 }
228241eb 11383 bp = netdev_priv(dev);
a2fbb9ea 11384
a2fbb9ea
ET
11385 unregister_netdev(dev);
11386
94a78b79
VZ
11387 kfree(bp->init_ops_offsets);
11388 kfree(bp->init_ops);
11389 kfree(bp->init_data);
11390 release_firmware(bp->firmware);
11391
a2fbb9ea
ET
11392 if (bp->regview)
11393 iounmap(bp->regview);
11394
11395 if (bp->doorbells)
11396 iounmap(bp->doorbells);
11397
11398 free_netdev(dev);
34f80b04
EG
11399
11400 if (atomic_read(&pdev->enable_cnt) == 1)
11401 pci_release_regions(pdev);
11402
a2fbb9ea
ET
11403 pci_disable_device(pdev);
11404 pci_set_drvdata(pdev, NULL);
11405}
11406
11407static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11408{
11409 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11410 struct bnx2x *bp;
11411
34f80b04
EG
11412 if (!dev) {
11413 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11414 return -ENODEV;
11415 }
11416 bp = netdev_priv(dev);
a2fbb9ea 11417
34f80b04 11418 rtnl_lock();
a2fbb9ea 11419
34f80b04 11420 pci_save_state(pdev);
228241eb 11421
34f80b04
EG
11422 if (!netif_running(dev)) {
11423 rtnl_unlock();
11424 return 0;
11425 }
a2fbb9ea
ET
11426
11427 netif_device_detach(dev);
a2fbb9ea 11428
da5a662a 11429 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11430
a2fbb9ea 11431 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11432
34f80b04
EG
11433 rtnl_unlock();
11434
a2fbb9ea
ET
11435 return 0;
11436}
11437
11438static int bnx2x_resume(struct pci_dev *pdev)
11439{
11440 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11441 struct bnx2x *bp;
a2fbb9ea
ET
11442 int rc;
11443
228241eb
ET
11444 if (!dev) {
11445 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11446 return -ENODEV;
11447 }
228241eb 11448 bp = netdev_priv(dev);
a2fbb9ea 11449
34f80b04
EG
11450 rtnl_lock();
11451
228241eb 11452 pci_restore_state(pdev);
34f80b04
EG
11453
11454 if (!netif_running(dev)) {
11455 rtnl_unlock();
11456 return 0;
11457 }
11458
a2fbb9ea
ET
11459 bnx2x_set_power_state(bp, PCI_D0);
11460 netif_device_attach(dev);
11461
da5a662a 11462 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11463
34f80b04
EG
11464 rtnl_unlock();
11465
11466 return rc;
a2fbb9ea
ET
11467}
11468
f8ef6e44
YG
11469static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11470{
11471 int i;
11472
11473 bp->state = BNX2X_STATE_ERROR;
11474
11475 bp->rx_mode = BNX2X_RX_MODE_NONE;
11476
11477 bnx2x_netif_stop(bp, 0);
11478
11479 del_timer_sync(&bp->timer);
11480 bp->stats_state = STATS_STATE_DISABLED;
11481 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11482
11483 /* Release IRQs */
11484 bnx2x_free_irq(bp);
11485
11486 if (CHIP_IS_E1(bp)) {
11487 struct mac_configuration_cmd *config =
11488 bnx2x_sp(bp, mcast_config);
11489
8d9c5f34 11490 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11491 CAM_INVALIDATE(config->config_table[i]);
11492 }
11493
11494 /* Free SKBs, SGEs, TPA pool and driver internals */
11495 bnx2x_free_skbs(bp);
555f6c78 11496 for_each_rx_queue(bp, i)
f8ef6e44 11497 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11498 for_each_rx_queue(bp, i)
7cde1c8b 11499 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11500 bnx2x_free_mem(bp);
11501
11502 bp->state = BNX2X_STATE_CLOSED;
11503
11504 netif_carrier_off(bp->dev);
11505
11506 return 0;
11507}
11508
11509static void bnx2x_eeh_recover(struct bnx2x *bp)
11510{
11511 u32 val;
11512
11513 mutex_init(&bp->port.phy_mutex);
11514
11515 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11516 bp->link_params.shmem_base = bp->common.shmem_base;
11517 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11518
11519 if (!bp->common.shmem_base ||
11520 (bp->common.shmem_base < 0xA0000) ||
11521 (bp->common.shmem_base >= 0xC0000)) {
11522 BNX2X_DEV_INFO("MCP not active\n");
11523 bp->flags |= NO_MCP_FLAG;
11524 return;
11525 }
11526
11527 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11528 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11529 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11530 BNX2X_ERR("BAD MCP validity signature\n");
11531
11532 if (!BP_NOMCP(bp)) {
11533 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11534 & DRV_MSG_SEQ_NUMBER_MASK);
11535 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11536 }
11537}
11538
493adb1f
WX
11539/**
11540 * bnx2x_io_error_detected - called when PCI error is detected
11541 * @pdev: Pointer to PCI device
11542 * @state: The current pci connection state
11543 *
11544 * This function is called after a PCI bus error affecting
11545 * this device has been detected.
11546 */
11547static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11548 pci_channel_state_t state)
11549{
11550 struct net_device *dev = pci_get_drvdata(pdev);
11551 struct bnx2x *bp = netdev_priv(dev);
11552
11553 rtnl_lock();
11554
11555 netif_device_detach(dev);
11556
11557 if (netif_running(dev))
f8ef6e44 11558 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11559
11560 pci_disable_device(pdev);
11561
11562 rtnl_unlock();
11563
11564 /* Request a slot reset */
11565 return PCI_ERS_RESULT_NEED_RESET;
11566}
11567
11568/**
11569 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11570 * @pdev: Pointer to PCI device
11571 *
11572 * Restart the card from scratch, as if from a cold-boot.
11573 */
11574static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11575{
11576 struct net_device *dev = pci_get_drvdata(pdev);
11577 struct bnx2x *bp = netdev_priv(dev);
11578
11579 rtnl_lock();
11580
11581 if (pci_enable_device(pdev)) {
11582 dev_err(&pdev->dev,
11583 "Cannot re-enable PCI device after reset\n");
11584 rtnl_unlock();
11585 return PCI_ERS_RESULT_DISCONNECT;
11586 }
11587
11588 pci_set_master(pdev);
11589 pci_restore_state(pdev);
11590
11591 if (netif_running(dev))
11592 bnx2x_set_power_state(bp, PCI_D0);
11593
11594 rtnl_unlock();
11595
11596 return PCI_ERS_RESULT_RECOVERED;
11597}
11598
11599/**
11600 * bnx2x_io_resume - called when traffic can start flowing again
11601 * @pdev: Pointer to PCI device
11602 *
11603 * This callback is called when the error recovery driver tells us that
11604 * its OK to resume normal operation.
11605 */
11606static void bnx2x_io_resume(struct pci_dev *pdev)
11607{
11608 struct net_device *dev = pci_get_drvdata(pdev);
11609 struct bnx2x *bp = netdev_priv(dev);
11610
11611 rtnl_lock();
11612
f8ef6e44
YG
11613 bnx2x_eeh_recover(bp);
11614
493adb1f 11615 if (netif_running(dev))
f8ef6e44 11616 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11617
11618 netif_device_attach(dev);
11619
11620 rtnl_unlock();
11621}
11622
11623static struct pci_error_handlers bnx2x_err_handler = {
11624 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11625 .slot_reset = bnx2x_io_slot_reset,
11626 .resume = bnx2x_io_resume,
493adb1f
WX
11627};
11628
a2fbb9ea 11629static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11630 .name = DRV_MODULE_NAME,
11631 .id_table = bnx2x_pci_tbl,
11632 .probe = bnx2x_init_one,
11633 .remove = __devexit_p(bnx2x_remove_one),
11634 .suspend = bnx2x_suspend,
11635 .resume = bnx2x_resume,
11636 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11637};
11638
11639static int __init bnx2x_init(void)
11640{
dd21ca6d
SG
11641 int ret;
11642
1cf167f2
EG
11643 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11644 if (bnx2x_wq == NULL) {
11645 printk(KERN_ERR PFX "Cannot create workqueue\n");
11646 return -ENOMEM;
11647 }
11648
dd21ca6d
SG
11649 ret = pci_register_driver(&bnx2x_pci_driver);
11650 if (ret) {
11651 printk(KERN_ERR PFX "Cannot register driver\n");
11652 destroy_workqueue(bnx2x_wq);
11653 }
11654 return ret;
a2fbb9ea
ET
11655}
11656
11657static void __exit bnx2x_cleanup(void)
11658{
11659 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11660
11661 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11662}
11663
11664module_init(bnx2x_init);
11665module_exit(bnx2x_cleanup);
11666
94a78b79 11667