]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/bnx2x_main.c
bnx2x: Reduce the likelihood of smb_mb
[mirror_ubuntu-bionic-kernel.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
56
2b144023
EG
57#define DRV_MODULE_VERSION "1.48.102"
58#define DRV_MODULE_RELDATE "2009/02/12"
34f80b04 59#define BNX2X_BC_VER 0x040200
a2fbb9ea 60
34f80b04
EG
61/* Time in jiffies before concluding the transmitter is hung */
62#define TX_TIMEOUT (5*HZ)
a2fbb9ea 63
53a10565 64static char version[] __devinitdata =
34f80b04 65 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
66 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
24e3fcef 68MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 69MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
70MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 72
555f6c78
EG
73static int multi_mode = 1;
74module_param(multi_mode, int, 0);
75
19680c48 76static int disable_tpa;
19680c48 77module_param(disable_tpa, int, 0);
9898f86d 78MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
79
80static int int_mode;
81module_param(int_mode, int, 0);
82MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
83
9898f86d 84static int poll;
a2fbb9ea 85module_param(poll, int, 0);
9898f86d 86MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
87
88static int mrrs = -1;
89module_param(mrrs, int, 0);
90MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
91
9898f86d 92static int debug;
a2fbb9ea 93module_param(debug, int, 0);
9898f86d
EG
94MODULE_PARM_DESC(debug, " Default debug msglevel");
95
96static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 97
1cf167f2 98static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
ad8d3948 238 if (!cnt) {
a2fbb9ea
ET
239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
ad8d3948 242 cnt--;
12469401
YG
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
a2fbb9ea 248 }
ad8d3948
EG
249
250 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
251}
252
c18487ee 253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 254{
ad8d3948 255 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
34f80b04
EG
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 292 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 293
ad8d3948 294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
301
302 *wb_comp = 0;
303
34f80b04 304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
305
306 udelay(5);
ad8d3948
EG
307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
ad8d3948 310 if (!cnt) {
a2fbb9ea
ET
311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
ad8d3948 314 cnt--;
12469401
YG
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
a2fbb9ea 320 }
ad8d3948 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
324
325 mutex_unlock(&bp->dmae_mutex);
326}
327
328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 336}
a2fbb9ea 337
ad8d3948
EG
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
a2fbb9ea
ET
349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
a2fbb9ea 351 char last_idx;
34f80b04
EG
352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
354
355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
380 }
381 }
382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
a2fbb9ea
ET
464 }
465 }
34f80b04 466
a2fbb9ea
ET
467 return rc;
468}
c14423fe 469
a2fbb9ea
ET
470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
4781bfad 473 __be32 data[9];
a2fbb9ea
ET
474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
49d66772 485 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
49d66772 492 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
66e855f3
YG
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
a2fbb9ea
ET
505 BNX2X_ERR("begin crash dump -----------------\n");
506
8440d2b6
EG
507 /* Indices */
508 /* Common */
509 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
510 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
511 " spq_prod_idx(%u)\n",
512 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
514
515 /* Rx */
516 for_each_rx_queue(bp, i) {
a2fbb9ea 517 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 518
8440d2b6 519 BNX2X_ERR("queue[%d]: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
520 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
521 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 522 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
523 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
526 " fp_u_idx(%x) *sb_u_idx(%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge,
528 le16_to_cpu(fp->fp_u_idx),
529 fp->status_blk->u_status_block.status_block_index);
530 }
a2fbb9ea 531
8440d2b6
EG
532 /* Tx */
533 for_each_tx_queue(bp, i) {
534 struct bnx2x_fastpath *fp = &bp->fp[i];
535 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 536
8440d2b6
EG
537 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
538 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
539 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
540 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
541 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
542 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
543 fp->status_blk->c_status_block.status_block_index,
544 hw_prods->packets_prod, hw_prods->bds_prod);
545 }
a2fbb9ea 546
8440d2b6
EG
547 /* Rings */
548 /* Rx */
549 for_each_rx_queue(bp, i) {
550 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
551
552 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 554 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
555 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
557
558 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 559 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
560 }
561
3196a88a
EG
562 start = RX_SGE(fp->rx_sge_prod);
563 end = RX_SGE(fp->last_max_sge);
8440d2b6 564 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
565 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
567
568 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
569 j, rx_sge[1], rx_sge[0], sw_page->page);
570 }
571
a2fbb9ea
ET
572 start = RCQ_BD(fp->rx_comp_cons - 10);
573 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 574 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
575 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
576
577 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578 j, cqe[0], cqe[1], cqe[2], cqe[3]);
579 }
580 }
581
8440d2b6
EG
582 /* Tx */
583 for_each_tx_queue(bp, i) {
584 struct bnx2x_fastpath *fp = &bp->fp[i];
585
586 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588 for (j = start; j != end; j = TX_BD(j + 1)) {
589 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
590
591 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592 sw_bd->skb, sw_bd->first_bd);
593 }
594
595 start = TX_BD(fp->tx_bd_cons - 10);
596 end = TX_BD(fp->tx_bd_cons + 254);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
599
600 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
602 }
603 }
a2fbb9ea 604
34f80b04 605 bnx2x_fw_dump(bp);
a2fbb9ea
ET
606 bnx2x_mc_assert(bp);
607 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
608}
609
615f8fd9 610static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 611{
34f80b04 612 int port = BP_PORT(bp);
a2fbb9ea
ET
613 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614 u32 val = REG_RD(bp, addr);
615 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 616 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
617
618 if (msix) {
8badd27a
EG
619 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
621 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
623 } else if (msi) {
624 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
628 } else {
629 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 630 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
631 HC_CONFIG_0_REG_INT_LINE_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 633
8badd27a
EG
634 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
635 val, port, addr);
615f8fd9
ET
636
637 REG_WR(bp, addr, val);
638
a2fbb9ea
ET
639 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
640 }
641
8badd27a
EG
642 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
643 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
644
645 REG_WR(bp, addr, val);
34f80b04
EG
646
647 if (CHIP_IS_E1H(bp)) {
648 /* init leading/trailing edge */
649 if (IS_E1HMF(bp)) {
8badd27a 650 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 651 if (bp->port.pmf)
4acac6a5
EG
652 /* enable nig and gpio3 attention */
653 val |= 0x1100;
34f80b04
EG
654 } else
655 val = 0xffff;
656
657 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
659 }
a2fbb9ea
ET
660}
661
615f8fd9 662static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 663{
34f80b04 664 int port = BP_PORT(bp);
a2fbb9ea
ET
665 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666 u32 val = REG_RD(bp, addr);
667
668 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
675
8badd27a
EG
676 /* flush all outstanding writes */
677 mmiowb();
678
a2fbb9ea
ET
679 REG_WR(bp, addr, val);
680 if (REG_RD(bp, addr) != val)
681 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 682
a2fbb9ea
ET
683}
684
f8ef6e44 685static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 686{
a2fbb9ea 687 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 688 int i, offset;
a2fbb9ea 689
34f80b04 690 /* disable interrupt handling */
a2fbb9ea 691 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
692 if (disable_hw)
693 /* prevent the HW from sending interrupts */
694 bnx2x_int_disable(bp);
a2fbb9ea
ET
695
696 /* make sure all ISRs are done */
697 if (msix) {
8badd27a
EG
698 synchronize_irq(bp->msix_table[0].vector);
699 offset = 1;
a2fbb9ea 700 for_each_queue(bp, i)
8badd27a 701 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
702 } else
703 synchronize_irq(bp->pdev->irq);
704
705 /* make sure sp_task is not running */
1cf167f2
EG
706 cancel_delayed_work(&bp->sp_task);
707 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
708}
709
34f80b04 710/* fast path */
a2fbb9ea
ET
711
712/*
34f80b04 713 * General service functions
a2fbb9ea
ET
714 */
715
34f80b04 716static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
717 u8 storm, u16 index, u8 op, u8 update)
718{
5c862848
EG
719 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
720 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
721 struct igu_ack_register igu_ack;
722
723 igu_ack.status_block_index = index;
724 igu_ack.sb_id_and_flags =
34f80b04 725 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
726 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
727 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
728 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
729
5c862848
EG
730 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
731 (*(u32 *)&igu_ack), hc_addr);
732 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
733}
734
735static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
736{
737 struct host_status_block *fpsb = fp->status_blk;
738 u16 rc = 0;
739
740 barrier(); /* status block is written to by the chip */
741 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
742 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
743 rc |= 1;
744 }
745 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
746 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
747 rc |= 2;
748 }
749 return rc;
750}
751
a2fbb9ea
ET
752static u16 bnx2x_ack_int(struct bnx2x *bp)
753{
5c862848
EG
754 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
755 COMMAND_REG_SIMD_MASK);
756 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 757
5c862848
EG
758 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
759 result, hc_addr);
a2fbb9ea 760
a2fbb9ea
ET
761 return result;
762}
763
764
765/*
766 * fast path service functions
767 */
768
237907c1
EG
769static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
770{
771 u16 tx_cons_sb;
772
773 /* Tell compiler that status block fields can change */
774 barrier();
775 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
776 return (fp->tx_pkt_cons != tx_cons_sb);
777}
778
779static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
780{
781 /* Tell compiler that consumer and producer can change */
782 barrier();
783 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
784}
785
a2fbb9ea
ET
786/* free skb in the packet ring at pos idx
787 * return idx of last bd freed
788 */
789static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
790 u16 idx)
791{
792 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793 struct eth_tx_bd *tx_bd;
794 struct sk_buff *skb = tx_buf->skb;
34f80b04 795 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
796 int nbd;
797
798 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
799 idx, tx_buf, skb);
800
801 /* unmap first bd */
802 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
806
807 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 808 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
809#ifdef BNX2X_STOP_ON_ERROR
810 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 811 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
812 bnx2x_panic();
813 }
814#endif
815
816 /* Skip a parse bd and the TSO split header bd
817 since they have no mapping */
818 if (nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820
821 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822 ETH_TX_BD_FLAGS_TCP_CSUM |
823 ETH_TX_BD_FLAGS_SW_LSO)) {
824 if (--nbd)
825 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 /* is this a TSO split header bd? */
828 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
829 if (--nbd)
830 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
831 }
832 }
833
834 /* now free frags */
835 while (nbd > 0) {
836
837 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838 tx_bd = &fp->tx_desc_ring[bd_idx];
839 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
841 if (--nbd)
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843 }
844
845 /* release skb */
53e5e96e 846 WARN_ON(!skb);
a2fbb9ea
ET
847 dev_kfree_skb(skb);
848 tx_buf->first_bd = 0;
849 tx_buf->skb = NULL;
850
34f80b04 851 return new_cons;
a2fbb9ea
ET
852}
853
34f80b04 854static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 855{
34f80b04
EG
856 s16 used;
857 u16 prod;
858 u16 cons;
a2fbb9ea 859
34f80b04 860 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
861 prod = fp->tx_bd_prod;
862 cons = fp->tx_bd_cons;
863
34f80b04
EG
864 /* NUM_TX_RINGS = number of "next-page" entries
865 It will be used as a threshold */
866 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 867
34f80b04 868#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
869 WARN_ON(used < 0);
870 WARN_ON(used > fp->bp->tx_ring_size);
871 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 872#endif
a2fbb9ea 873
34f80b04 874 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
875}
876
877static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
878{
879 struct bnx2x *bp = fp->bp;
555f6c78 880 struct netdev_queue *txq;
a2fbb9ea
ET
881 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
882 int done = 0;
883
884#ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
886 return;
887#endif
888
555f6c78 889 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
890 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891 sw_cons = fp->tx_pkt_cons;
892
893 while (sw_cons != hw_cons) {
894 u16 pkt_cons;
895
896 pkt_cons = TX_BD(sw_cons);
897
898 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
899
34f80b04 900 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
901 hw_cons, sw_cons, pkt_cons);
902
34f80b04 903/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
904 rmb();
905 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
906 }
907*/
908 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
909 sw_cons++;
910 done++;
911
912 if (done == work)
913 break;
914 }
915
916 fp->tx_pkt_cons = sw_cons;
917 fp->tx_bd_cons = bd_cons;
918
a2fbb9ea 919 /* TBD need a thresh? */
555f6c78 920 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 921
555f6c78 922 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 923
6044735d
EG
924 /* Need to make the tx_bd_cons update visible to start_xmit()
925 * before checking for netif_tx_queue_stopped(). Without the
926 * memory barrier, there is a small possibility that
927 * start_xmit() will miss it and cause the queue to be stopped
928 * forever.
929 */
930 smp_mb();
931
555f6c78 932 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 933 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 934 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 935 netif_tx_wake_queue(txq);
a2fbb9ea 936
555f6c78 937 __netif_tx_unlock(txq);
a2fbb9ea
ET
938 }
939}
940
3196a88a 941
a2fbb9ea
ET
942static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
943 union eth_rx_cqe *rr_cqe)
944{
945 struct bnx2x *bp = fp->bp;
946 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
947 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
948
34f80b04 949 DP(BNX2X_MSG_SP,
a2fbb9ea 950 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 951 fp->index, cid, command, bp->state,
34f80b04 952 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
953
954 bp->spq_left++;
955
0626b899 956 if (fp->index) {
a2fbb9ea
ET
957 switch (command | fp->state) {
958 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
959 BNX2X_FP_STATE_OPENING):
960 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
961 cid);
962 fp->state = BNX2X_FP_STATE_OPEN;
963 break;
964
965 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
966 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
967 cid);
968 fp->state = BNX2X_FP_STATE_HALTED;
969 break;
970
971 default:
34f80b04
EG
972 BNX2X_ERR("unexpected MC reply (%d) "
973 "fp->state is %x\n", command, fp->state);
974 break;
a2fbb9ea 975 }
34f80b04 976 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
977 return;
978 }
c14423fe 979
a2fbb9ea
ET
980 switch (command | bp->state) {
981 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
982 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
983 bp->state = BNX2X_STATE_OPEN;
984 break;
985
986 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
987 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
988 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
989 fp->state = BNX2X_FP_STATE_HALTED;
990 break;
991
a2fbb9ea 992 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 993 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 994 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
995 break;
996
3196a88a 997
a2fbb9ea 998 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 999 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1000 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1001 bp->set_mac_pending = 0;
a2fbb9ea
ET
1002 break;
1003
49d66772 1004 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1005 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1006 break;
1007
a2fbb9ea 1008 default:
34f80b04 1009 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1010 command, bp->state);
34f80b04 1011 break;
a2fbb9ea 1012 }
34f80b04 1013 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1014}
1015
7a9b2557
VZ
1016static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1017 struct bnx2x_fastpath *fp, u16 index)
1018{
1019 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1020 struct page *page = sw_buf->page;
1021 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1022
1023 /* Skip "next page" elements */
1024 if (!page)
1025 return;
1026
1027 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1028 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1029 __free_pages(page, PAGES_PER_SGE_SHIFT);
1030
1031 sw_buf->page = NULL;
1032 sge->addr_hi = 0;
1033 sge->addr_lo = 0;
1034}
1035
1036static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1037 struct bnx2x_fastpath *fp, int last)
1038{
1039 int i;
1040
1041 for (i = 0; i < last; i++)
1042 bnx2x_free_rx_sge(bp, fp, i);
1043}
1044
1045static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1046 struct bnx2x_fastpath *fp, u16 index)
1047{
1048 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1049 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1050 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1051 dma_addr_t mapping;
1052
1053 if (unlikely(page == NULL))
1054 return -ENOMEM;
1055
4f40f2cb 1056 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1057 PCI_DMA_FROMDEVICE);
8d8bb39b 1058 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1059 __free_pages(page, PAGES_PER_SGE_SHIFT);
1060 return -ENOMEM;
1061 }
1062
1063 sw_buf->page = page;
1064 pci_unmap_addr_set(sw_buf, mapping, mapping);
1065
1066 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1067 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1068
1069 return 0;
1070}
1071
a2fbb9ea
ET
1072static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1073 struct bnx2x_fastpath *fp, u16 index)
1074{
1075 struct sk_buff *skb;
1076 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1077 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1078 dma_addr_t mapping;
1079
1080 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1081 if (unlikely(skb == NULL))
1082 return -ENOMEM;
1083
437cf2f1 1084 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1085 PCI_DMA_FROMDEVICE);
8d8bb39b 1086 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1087 dev_kfree_skb(skb);
1088 return -ENOMEM;
1089 }
1090
1091 rx_buf->skb = skb;
1092 pci_unmap_addr_set(rx_buf, mapping, mapping);
1093
1094 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1095 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1096
1097 return 0;
1098}
1099
1100/* note that we are not allocating a new skb,
1101 * we are just moving one from cons to prod
1102 * we are not creating a new mapping,
1103 * so there is no need to check for dma_mapping_error().
1104 */
1105static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1106 struct sk_buff *skb, u16 cons, u16 prod)
1107{
1108 struct bnx2x *bp = fp->bp;
1109 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1110 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1111 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1112 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1113
1114 pci_dma_sync_single_for_device(bp->pdev,
1115 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1116 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1117
1118 prod_rx_buf->skb = cons_rx_buf->skb;
1119 pci_unmap_addr_set(prod_rx_buf, mapping,
1120 pci_unmap_addr(cons_rx_buf, mapping));
1121 *prod_bd = *cons_bd;
1122}
1123
7a9b2557
VZ
1124static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1125 u16 idx)
1126{
1127 u16 last_max = fp->last_max_sge;
1128
1129 if (SUB_S16(idx, last_max) > 0)
1130 fp->last_max_sge = idx;
1131}
1132
1133static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1134{
1135 int i, j;
1136
1137 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1138 int idx = RX_SGE_CNT * i - 1;
1139
1140 for (j = 0; j < 2; j++) {
1141 SGE_MASK_CLEAR_BIT(fp, idx);
1142 idx--;
1143 }
1144 }
1145}
1146
1147static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1148 struct eth_fast_path_rx_cqe *fp_cqe)
1149{
1150 struct bnx2x *bp = fp->bp;
4f40f2cb 1151 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1152 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1153 SGE_PAGE_SHIFT;
7a9b2557
VZ
1154 u16 last_max, last_elem, first_elem;
1155 u16 delta = 0;
1156 u16 i;
1157
1158 if (!sge_len)
1159 return;
1160
1161 /* First mark all used pages */
1162 for (i = 0; i < sge_len; i++)
1163 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1164
1165 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1166 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1167
1168 /* Here we assume that the last SGE index is the biggest */
1169 prefetch((void *)(fp->sge_mask));
1170 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1171
1172 last_max = RX_SGE(fp->last_max_sge);
1173 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1174 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1175
1176 /* If ring is not full */
1177 if (last_elem + 1 != first_elem)
1178 last_elem++;
1179
1180 /* Now update the prod */
1181 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1182 if (likely(fp->sge_mask[i]))
1183 break;
1184
1185 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1186 delta += RX_SGE_MASK_ELEM_SZ;
1187 }
1188
1189 if (delta > 0) {
1190 fp->rx_sge_prod += delta;
1191 /* clear page-end entries */
1192 bnx2x_clear_sge_mask_next_elems(fp);
1193 }
1194
1195 DP(NETIF_MSG_RX_STATUS,
1196 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1197 fp->last_max_sge, fp->rx_sge_prod);
1198}
1199
1200static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1201{
1202 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1203 memset(fp->sge_mask, 0xff,
1204 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1205
33471629
EG
1206 /* Clear the two last indices in the page to 1:
1207 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1208 hence will never be indicated and should be removed from
1209 the calculations. */
1210 bnx2x_clear_sge_mask_next_elems(fp);
1211}
1212
1213static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1214 struct sk_buff *skb, u16 cons, u16 prod)
1215{
1216 struct bnx2x *bp = fp->bp;
1217 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1218 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1219 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1220 dma_addr_t mapping;
1221
1222 /* move empty skb from pool to prod and map it */
1223 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1224 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1225 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1226 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1227
1228 /* move partial skb from cons to pool (don't unmap yet) */
1229 fp->tpa_pool[queue] = *cons_rx_buf;
1230
1231 /* mark bin state as start - print error if current state != stop */
1232 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1233 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1234
1235 fp->tpa_state[queue] = BNX2X_TPA_START;
1236
1237 /* point prod_bd to new skb */
1238 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1239 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1240
1241#ifdef BNX2X_STOP_ON_ERROR
1242 fp->tpa_queue_used |= (1 << queue);
1243#ifdef __powerpc64__
1244 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1245#else
1246 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1247#endif
1248 fp->tpa_queue_used);
1249#endif
1250}
1251
1252static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1253 struct sk_buff *skb,
1254 struct eth_fast_path_rx_cqe *fp_cqe,
1255 u16 cqe_idx)
1256{
1257 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1258 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1259 u32 i, frag_len, frag_size, pages;
1260 int err;
1261 int j;
1262
1263 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1264 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1265
1266 /* This is needed in order to enable forwarding support */
1267 if (frag_size)
4f40f2cb 1268 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1269 max(frag_size, (u32)len_on_bd));
1270
1271#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1272 if (pages >
1273 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1274 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1275 pages, cqe_idx);
1276 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1277 fp_cqe->pkt_len, len_on_bd);
1278 bnx2x_panic();
1279 return -EINVAL;
1280 }
1281#endif
1282
1283 /* Run through the SGL and compose the fragmented skb */
1284 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1285 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1286
1287 /* FW gives the indices of the SGE as if the ring is an array
1288 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1289 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1290 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1291 old_rx_pg = *rx_pg;
1292
1293 /* If we fail to allocate a substitute page, we simply stop
1294 where we are and drop the whole packet */
1295 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1296 if (unlikely(err)) {
de832a55 1297 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1298 return err;
1299 }
1300
1301 /* Unmap the page as we r going to pass it to the stack */
1302 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1303 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1304
1305 /* Add one frag and update the appropriate fields in the skb */
1306 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1307
1308 skb->data_len += frag_len;
1309 skb->truesize += frag_len;
1310 skb->len += frag_len;
1311
1312 frag_size -= frag_len;
1313 }
1314
1315 return 0;
1316}
1317
1318static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1319 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1320 u16 cqe_idx)
1321{
1322 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1323 struct sk_buff *skb = rx_buf->skb;
1324 /* alloc new skb */
1325 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1326
1327 /* Unmap skb in the pool anyway, as we are going to change
1328 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1329 fails. */
1330 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1331 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1332
7a9b2557 1333 if (likely(new_skb)) {
66e855f3
YG
1334 /* fix ip xsum and give it to the stack */
1335 /* (no need to map the new skb) */
0c6671b0
EG
1336#ifdef BCM_VLAN
1337 int is_vlan_cqe =
1338 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1339 PARSING_FLAGS_VLAN);
1340 int is_not_hwaccel_vlan_cqe =
1341 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1342#endif
7a9b2557
VZ
1343
1344 prefetch(skb);
1345 prefetch(((char *)(skb)) + 128);
1346
7a9b2557
VZ
1347#ifdef BNX2X_STOP_ON_ERROR
1348 if (pad + len > bp->rx_buf_size) {
1349 BNX2X_ERR("skb_put is about to fail... "
1350 "pad %d len %d rx_buf_size %d\n",
1351 pad, len, bp->rx_buf_size);
1352 bnx2x_panic();
1353 return;
1354 }
1355#endif
1356
1357 skb_reserve(skb, pad);
1358 skb_put(skb, len);
1359
1360 skb->protocol = eth_type_trans(skb, bp->dev);
1361 skb->ip_summed = CHECKSUM_UNNECESSARY;
1362
1363 {
1364 struct iphdr *iph;
1365
1366 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1367#ifdef BCM_VLAN
1368 /* If there is no Rx VLAN offloading -
1369 take VLAN tag into an account */
1370 if (unlikely(is_not_hwaccel_vlan_cqe))
1371 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1372#endif
7a9b2557
VZ
1373 iph->check = 0;
1374 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1375 }
1376
1377 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1378 &cqe->fast_path_cqe, cqe_idx)) {
1379#ifdef BCM_VLAN
0c6671b0
EG
1380 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1381 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1382 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1383 le16_to_cpu(cqe->fast_path_cqe.
1384 vlan_tag));
1385 else
1386#endif
1387 netif_receive_skb(skb);
1388 } else {
1389 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1390 " - dropping packet!\n");
1391 dev_kfree_skb(skb);
1392 }
1393
7a9b2557
VZ
1394
1395 /* put new skb in bin */
1396 fp->tpa_pool[queue].skb = new_skb;
1397
1398 } else {
66e855f3 1399 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1400 DP(NETIF_MSG_RX_STATUS,
1401 "Failed to allocate new skb - dropping packet!\n");
de832a55 1402 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1403 }
1404
1405 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1406}
1407
1408static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1409 struct bnx2x_fastpath *fp,
1410 u16 bd_prod, u16 rx_comp_prod,
1411 u16 rx_sge_prod)
1412{
8d9c5f34 1413 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1414 int i;
1415
1416 /* Update producers */
1417 rx_prods.bd_prod = bd_prod;
1418 rx_prods.cqe_prod = rx_comp_prod;
1419 rx_prods.sge_prod = rx_sge_prod;
1420
58f4c4cf
EG
1421 /*
1422 * Make sure that the BD and SGE data is updated before updating the
1423 * producers since FW might read the BD/SGE right after the producer
1424 * is updated.
1425 * This is only applicable for weak-ordered memory model archs such
1426 * as IA-64. The following barrier is also mandatory since FW will
1427 * assumes BDs must have buffers.
1428 */
1429 wmb();
1430
8d9c5f34
EG
1431 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1432 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1433 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1434 ((u32 *)&rx_prods)[i]);
1435
58f4c4cf
EG
1436 mmiowb(); /* keep prod updates ordered */
1437
7a9b2557 1438 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1439 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1440 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1441}
1442
a2fbb9ea
ET
1443static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1444{
1445 struct bnx2x *bp = fp->bp;
34f80b04 1446 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1447 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1448 int rx_pkt = 0;
1449
1450#ifdef BNX2X_STOP_ON_ERROR
1451 if (unlikely(bp->panic))
1452 return 0;
1453#endif
1454
34f80b04
EG
1455 /* CQ "next element" is of the size of the regular element,
1456 that's why it's ok here */
a2fbb9ea
ET
1457 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1458 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1459 hw_comp_cons++;
1460
1461 bd_cons = fp->rx_bd_cons;
1462 bd_prod = fp->rx_bd_prod;
34f80b04 1463 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1464 sw_comp_cons = fp->rx_comp_cons;
1465 sw_comp_prod = fp->rx_comp_prod;
1466
1467 /* Memory barrier necessary as speculative reads of the rx
1468 * buffer can be ahead of the index in the status block
1469 */
1470 rmb();
1471
1472 DP(NETIF_MSG_RX_STATUS,
1473 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1474 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1475
1476 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1477 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1478 struct sk_buff *skb;
1479 union eth_rx_cqe *cqe;
34f80b04
EG
1480 u8 cqe_fp_flags;
1481 u16 len, pad;
a2fbb9ea
ET
1482
1483 comp_ring_cons = RCQ_BD(sw_comp_cons);
1484 bd_prod = RX_BD(bd_prod);
1485 bd_cons = RX_BD(bd_cons);
1486
1487 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1488 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1489
a2fbb9ea 1490 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1491 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1492 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1493 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1494 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1495 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1496
1497 /* is this a slowpath msg? */
34f80b04 1498 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1499 bnx2x_sp_event(fp, cqe);
1500 goto next_cqe;
1501
1502 /* this is an rx packet */
1503 } else {
1504 rx_buf = &fp->rx_buf_ring[bd_cons];
1505 skb = rx_buf->skb;
a2fbb9ea
ET
1506 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1507 pad = cqe->fast_path_cqe.placement_offset;
1508
7a9b2557
VZ
1509 /* If CQE is marked both TPA_START and TPA_END
1510 it is a non-TPA CQE */
1511 if ((!fp->disable_tpa) &&
1512 (TPA_TYPE(cqe_fp_flags) !=
1513 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1514 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1515
1516 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1517 DP(NETIF_MSG_RX_STATUS,
1518 "calling tpa_start on queue %d\n",
1519 queue);
1520
1521 bnx2x_tpa_start(fp, queue, skb,
1522 bd_cons, bd_prod);
1523 goto next_rx;
1524 }
1525
1526 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1527 DP(NETIF_MSG_RX_STATUS,
1528 "calling tpa_stop on queue %d\n",
1529 queue);
1530
1531 if (!BNX2X_RX_SUM_FIX(cqe))
1532 BNX2X_ERR("STOP on none TCP "
1533 "data\n");
1534
1535 /* This is a size of the linear data
1536 on this skb */
1537 len = le16_to_cpu(cqe->fast_path_cqe.
1538 len_on_bd);
1539 bnx2x_tpa_stop(bp, fp, queue, pad,
1540 len, cqe, comp_ring_cons);
1541#ifdef BNX2X_STOP_ON_ERROR
1542 if (bp->panic)
1543 return -EINVAL;
1544#endif
1545
1546 bnx2x_update_sge_prod(fp,
1547 &cqe->fast_path_cqe);
1548 goto next_cqe;
1549 }
1550 }
1551
a2fbb9ea
ET
1552 pci_dma_sync_single_for_device(bp->pdev,
1553 pci_unmap_addr(rx_buf, mapping),
1554 pad + RX_COPY_THRESH,
1555 PCI_DMA_FROMDEVICE);
1556 prefetch(skb);
1557 prefetch(((char *)(skb)) + 128);
1558
1559 /* is this an error packet? */
34f80b04 1560 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1561 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1562 "ERROR flags %x rx packet %u\n",
1563 cqe_fp_flags, sw_comp_cons);
de832a55 1564 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1565 goto reuse_rx;
1566 }
1567
1568 /* Since we don't have a jumbo ring
1569 * copy small packets if mtu > 1500
1570 */
1571 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1572 (len <= RX_COPY_THRESH)) {
1573 struct sk_buff *new_skb;
1574
1575 new_skb = netdev_alloc_skb(bp->dev,
1576 len + pad);
1577 if (new_skb == NULL) {
1578 DP(NETIF_MSG_RX_ERR,
34f80b04 1579 "ERROR packet dropped "
a2fbb9ea 1580 "because of alloc failure\n");
de832a55 1581 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1582 goto reuse_rx;
1583 }
1584
1585 /* aligned copy */
1586 skb_copy_from_linear_data_offset(skb, pad,
1587 new_skb->data + pad, len);
1588 skb_reserve(new_skb, pad);
1589 skb_put(new_skb, len);
1590
1591 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1592
1593 skb = new_skb;
1594
1595 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1596 pci_unmap_single(bp->pdev,
1597 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1598 bp->rx_buf_size,
a2fbb9ea
ET
1599 PCI_DMA_FROMDEVICE);
1600 skb_reserve(skb, pad);
1601 skb_put(skb, len);
1602
1603 } else {
1604 DP(NETIF_MSG_RX_ERR,
34f80b04 1605 "ERROR packet dropped because "
a2fbb9ea 1606 "of alloc failure\n");
de832a55 1607 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1608reuse_rx:
1609 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1610 goto next_rx;
1611 }
1612
1613 skb->protocol = eth_type_trans(skb, bp->dev);
1614
1615 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1616 if (bp->rx_csum) {
1adcd8be
EG
1617 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1618 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1619 else
de832a55 1620 fp->eth_q_stats.hw_csum_err++;
66e855f3 1621 }
a2fbb9ea
ET
1622 }
1623
748e5439 1624 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1625#ifdef BCM_VLAN
0c6671b0 1626 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1627 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1628 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1629 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1630 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1631 else
1632#endif
34f80b04 1633 netif_receive_skb(skb);
a2fbb9ea 1634
a2fbb9ea
ET
1635
1636next_rx:
1637 rx_buf->skb = NULL;
1638
1639 bd_cons = NEXT_RX_IDX(bd_cons);
1640 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1641 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1642 rx_pkt++;
a2fbb9ea
ET
1643next_cqe:
1644 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1645 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1646
34f80b04 1647 if (rx_pkt == budget)
a2fbb9ea
ET
1648 break;
1649 } /* while */
1650
1651 fp->rx_bd_cons = bd_cons;
34f80b04 1652 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1653 fp->rx_comp_cons = sw_comp_cons;
1654 fp->rx_comp_prod = sw_comp_prod;
1655
7a9b2557
VZ
1656 /* Update producers */
1657 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1658 fp->rx_sge_prod);
a2fbb9ea
ET
1659
1660 fp->rx_pkt += rx_pkt;
1661 fp->rx_calls++;
1662
1663 return rx_pkt;
1664}
1665
1666static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1667{
1668 struct bnx2x_fastpath *fp = fp_cookie;
1669 struct bnx2x *bp = fp->bp;
0626b899 1670 int index = fp->index;
a2fbb9ea 1671
da5a662a
VZ
1672 /* Return here if interrupt is disabled */
1673 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1674 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1675 return IRQ_HANDLED;
1676 }
1677
34f80b04 1678 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1679 index, fp->sb_id);
1680 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1681
1682#ifdef BNX2X_STOP_ON_ERROR
1683 if (unlikely(bp->panic))
1684 return IRQ_HANDLED;
1685#endif
1686
1687 prefetch(fp->rx_cons_sb);
1688 prefetch(fp->tx_cons_sb);
1689 prefetch(&fp->status_blk->c_status_block.status_block_index);
1690 prefetch(&fp->status_blk->u_status_block.status_block_index);
1691
288379f0 1692 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1693
a2fbb9ea
ET
1694 return IRQ_HANDLED;
1695}
1696
1697static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1698{
555f6c78 1699 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1700 u16 status = bnx2x_ack_int(bp);
34f80b04 1701 u16 mask;
a2fbb9ea 1702
34f80b04 1703 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1704 if (unlikely(status == 0)) {
1705 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1706 return IRQ_NONE;
1707 }
f5372251 1708 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1709
34f80b04 1710 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1711 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1712 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1713 return IRQ_HANDLED;
1714 }
1715
3196a88a
EG
1716#ifdef BNX2X_STOP_ON_ERROR
1717 if (unlikely(bp->panic))
1718 return IRQ_HANDLED;
1719#endif
1720
34f80b04
EG
1721 mask = 0x2 << bp->fp[0].sb_id;
1722 if (status & mask) {
a2fbb9ea
ET
1723 struct bnx2x_fastpath *fp = &bp->fp[0];
1724
1725 prefetch(fp->rx_cons_sb);
1726 prefetch(fp->tx_cons_sb);
1727 prefetch(&fp->status_blk->c_status_block.status_block_index);
1728 prefetch(&fp->status_blk->u_status_block.status_block_index);
1729
288379f0 1730 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1731
34f80b04 1732 status &= ~mask;
a2fbb9ea
ET
1733 }
1734
a2fbb9ea 1735
34f80b04 1736 if (unlikely(status & 0x1)) {
1cf167f2 1737 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1738
1739 status &= ~0x1;
1740 if (!status)
1741 return IRQ_HANDLED;
1742 }
1743
34f80b04
EG
1744 if (status)
1745 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1746 status);
a2fbb9ea 1747
c18487ee 1748 return IRQ_HANDLED;
a2fbb9ea
ET
1749}
1750
c18487ee 1751/* end of fast path */
a2fbb9ea 1752
bb2a0f7a 1753static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1754
c18487ee
YR
1755/* Link */
1756
1757/*
1758 * General service functions
1759 */
a2fbb9ea 1760
4a37fb66 1761static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1762{
1763 u32 lock_status;
1764 u32 resource_bit = (1 << resource);
4a37fb66
YG
1765 int func = BP_FUNC(bp);
1766 u32 hw_lock_control_reg;
c18487ee 1767 int cnt;
a2fbb9ea 1768
c18487ee
YR
1769 /* Validating that the resource is within range */
1770 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1771 DP(NETIF_MSG_HW,
1772 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1773 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1774 return -EINVAL;
1775 }
a2fbb9ea 1776
4a37fb66
YG
1777 if (func <= 5) {
1778 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1779 } else {
1780 hw_lock_control_reg =
1781 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1782 }
1783
c18487ee 1784 /* Validating that the resource is not already taken */
4a37fb66 1785 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1786 if (lock_status & resource_bit) {
1787 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1788 lock_status, resource_bit);
1789 return -EEXIST;
1790 }
a2fbb9ea 1791
46230476
EG
1792 /* Try for 5 second every 5ms */
1793 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1794 /* Try to acquire the lock */
4a37fb66
YG
1795 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1796 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1797 if (lock_status & resource_bit)
1798 return 0;
a2fbb9ea 1799
c18487ee 1800 msleep(5);
a2fbb9ea 1801 }
c18487ee
YR
1802 DP(NETIF_MSG_HW, "Timeout\n");
1803 return -EAGAIN;
1804}
a2fbb9ea 1805
4a37fb66 1806static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1807{
1808 u32 lock_status;
1809 u32 resource_bit = (1 << resource);
4a37fb66
YG
1810 int func = BP_FUNC(bp);
1811 u32 hw_lock_control_reg;
a2fbb9ea 1812
c18487ee
YR
1813 /* Validating that the resource is within range */
1814 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1815 DP(NETIF_MSG_HW,
1816 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1817 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1818 return -EINVAL;
1819 }
1820
4a37fb66
YG
1821 if (func <= 5) {
1822 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1823 } else {
1824 hw_lock_control_reg =
1825 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1826 }
1827
c18487ee 1828 /* Validating that the resource is currently taken */
4a37fb66 1829 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1830 if (!(lock_status & resource_bit)) {
1831 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1832 lock_status, resource_bit);
1833 return -EFAULT;
a2fbb9ea
ET
1834 }
1835
4a37fb66 1836 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1837 return 0;
1838}
1839
1840/* HW Lock for shared dual port PHYs */
4a37fb66 1841static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1842{
34f80b04 1843 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1844
46c6a674
EG
1845 if (bp->port.need_hw_lock)
1846 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1847}
a2fbb9ea 1848
4a37fb66 1849static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1850{
46c6a674
EG
1851 if (bp->port.need_hw_lock)
1852 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1853
34f80b04 1854 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1855}
a2fbb9ea 1856
4acac6a5
EG
1857int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1858{
1859 /* The GPIO should be swapped if swap register is set and active */
1860 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1861 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1862 int gpio_shift = gpio_num +
1863 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1864 u32 gpio_mask = (1 << gpio_shift);
1865 u32 gpio_reg;
1866 int value;
1867
1868 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1869 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1870 return -EINVAL;
1871 }
1872
1873 /* read GPIO value */
1874 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1875
1876 /* get the requested pin value */
1877 if ((gpio_reg & gpio_mask) == gpio_mask)
1878 value = 1;
1879 else
1880 value = 0;
1881
1882 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1883
1884 return value;
1885}
1886
17de50b7 1887int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1888{
1889 /* The GPIO should be swapped if swap register is set and active */
1890 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1891 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1892 int gpio_shift = gpio_num +
1893 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1894 u32 gpio_mask = (1 << gpio_shift);
1895 u32 gpio_reg;
a2fbb9ea 1896
c18487ee
YR
1897 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1898 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1899 return -EINVAL;
1900 }
a2fbb9ea 1901
4a37fb66 1902 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1903 /* read GPIO and mask except the float bits */
1904 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1905
c18487ee
YR
1906 switch (mode) {
1907 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1908 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1909 gpio_num, gpio_shift);
1910 /* clear FLOAT and set CLR */
1911 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1912 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1913 break;
a2fbb9ea 1914
c18487ee
YR
1915 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1916 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1917 gpio_num, gpio_shift);
1918 /* clear FLOAT and set SET */
1919 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1920 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1921 break;
a2fbb9ea 1922
17de50b7 1923 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1924 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1925 gpio_num, gpio_shift);
1926 /* set FLOAT */
1927 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1928 break;
a2fbb9ea 1929
c18487ee
YR
1930 default:
1931 break;
a2fbb9ea
ET
1932 }
1933
c18487ee 1934 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1935 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1936
c18487ee 1937 return 0;
a2fbb9ea
ET
1938}
1939
4acac6a5
EG
1940int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1941{
1942 /* The GPIO should be swapped if swap register is set and active */
1943 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1944 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1945 int gpio_shift = gpio_num +
1946 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1947 u32 gpio_mask = (1 << gpio_shift);
1948 u32 gpio_reg;
1949
1950 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1951 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1952 return -EINVAL;
1953 }
1954
1955 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1956 /* read GPIO int */
1957 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1958
1959 switch (mode) {
1960 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1961 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1962 "output low\n", gpio_num, gpio_shift);
1963 /* clear SET and set CLR */
1964 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1965 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1966 break;
1967
1968 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1969 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1970 "output high\n", gpio_num, gpio_shift);
1971 /* clear CLR and set SET */
1972 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1973 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1974 break;
1975
1976 default:
1977 break;
1978 }
1979
1980 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1981 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1982
1983 return 0;
1984}
1985
c18487ee 1986static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1987{
c18487ee
YR
1988 u32 spio_mask = (1 << spio_num);
1989 u32 spio_reg;
a2fbb9ea 1990
c18487ee
YR
1991 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1992 (spio_num > MISC_REGISTERS_SPIO_7)) {
1993 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1994 return -EINVAL;
a2fbb9ea
ET
1995 }
1996
4a37fb66 1997 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1998 /* read SPIO and mask except the float bits */
1999 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2000
c18487ee 2001 switch (mode) {
6378c025 2002 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2003 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2004 /* clear FLOAT and set CLR */
2005 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2006 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2007 break;
a2fbb9ea 2008
6378c025 2009 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2010 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2011 /* clear FLOAT and set SET */
2012 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2013 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2014 break;
a2fbb9ea 2015
c18487ee
YR
2016 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2017 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2018 /* set FLOAT */
2019 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2020 break;
a2fbb9ea 2021
c18487ee
YR
2022 default:
2023 break;
a2fbb9ea
ET
2024 }
2025
c18487ee 2026 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2027 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2028
a2fbb9ea
ET
2029 return 0;
2030}
2031
c18487ee 2032static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2033{
ad33ea3a
EG
2034 switch (bp->link_vars.ieee_fc &
2035 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2036 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2037 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2038 ADVERTISED_Pause);
2039 break;
356e2385 2040
c18487ee 2041 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2042 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2043 ADVERTISED_Pause);
2044 break;
356e2385 2045
c18487ee 2046 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2047 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2048 break;
356e2385 2049
c18487ee 2050 default:
34f80b04 2051 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2052 ADVERTISED_Pause);
2053 break;
2054 }
2055}
f1410647 2056
c18487ee
YR
2057static void bnx2x_link_report(struct bnx2x *bp)
2058{
2059 if (bp->link_vars.link_up) {
2060 if (bp->state == BNX2X_STATE_OPEN)
2061 netif_carrier_on(bp->dev);
2062 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2063
c18487ee 2064 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2065
c18487ee
YR
2066 if (bp->link_vars.duplex == DUPLEX_FULL)
2067 printk("full duplex");
2068 else
2069 printk("half duplex");
f1410647 2070
c0700f90
DM
2071 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2072 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2073 printk(", receive ");
356e2385
EG
2074 if (bp->link_vars.flow_ctrl &
2075 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2076 printk("& transmit ");
2077 } else {
2078 printk(", transmit ");
2079 }
2080 printk("flow control ON");
2081 }
2082 printk("\n");
f1410647 2083
c18487ee
YR
2084 } else { /* link_down */
2085 netif_carrier_off(bp->dev);
2086 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2087 }
c18487ee
YR
2088}
2089
b5bf9068 2090static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2091{
19680c48
EG
2092 if (!BP_NOMCP(bp)) {
2093 u8 rc;
a2fbb9ea 2094
19680c48 2095 /* Initialize link parameters structure variables */
8c99e7b0
YR
2096 /* It is recommended to turn off RX FC for jumbo frames
2097 for better performance */
2098 if (IS_E1HMF(bp))
c0700f90 2099 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2100 else if (bp->dev->mtu > 5000)
c0700f90 2101 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2102 else
c0700f90 2103 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2104
4a37fb66 2105 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2106
2107 if (load_mode == LOAD_DIAG)
2108 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2109
19680c48 2110 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2111
4a37fb66 2112 bnx2x_release_phy_lock(bp);
a2fbb9ea 2113
3c96c68b
EG
2114 bnx2x_calc_fc_adv(bp);
2115
b5bf9068
EG
2116 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2117 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2118 bnx2x_link_report(bp);
b5bf9068 2119 }
34f80b04 2120
19680c48
EG
2121 return rc;
2122 }
f5372251 2123 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2124 return -EINVAL;
a2fbb9ea
ET
2125}
2126
c18487ee 2127static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2128{
19680c48 2129 if (!BP_NOMCP(bp)) {
4a37fb66 2130 bnx2x_acquire_phy_lock(bp);
19680c48 2131 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2132 bnx2x_release_phy_lock(bp);
a2fbb9ea 2133
19680c48
EG
2134 bnx2x_calc_fc_adv(bp);
2135 } else
f5372251 2136 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2137}
a2fbb9ea 2138
c18487ee
YR
2139static void bnx2x__link_reset(struct bnx2x *bp)
2140{
19680c48 2141 if (!BP_NOMCP(bp)) {
4a37fb66 2142 bnx2x_acquire_phy_lock(bp);
589abe3a 2143 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2144 bnx2x_release_phy_lock(bp);
19680c48 2145 } else
f5372251 2146 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2147}
a2fbb9ea 2148
c18487ee
YR
2149static u8 bnx2x_link_test(struct bnx2x *bp)
2150{
2151 u8 rc;
a2fbb9ea 2152
4a37fb66 2153 bnx2x_acquire_phy_lock(bp);
c18487ee 2154 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2155 bnx2x_release_phy_lock(bp);
a2fbb9ea 2156
c18487ee
YR
2157 return rc;
2158}
a2fbb9ea 2159
8a1c38d1 2160static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2161{
8a1c38d1
EG
2162 u32 r_param = bp->link_vars.line_speed / 8;
2163 u32 fair_periodic_timeout_usec;
2164 u32 t_fair;
34f80b04 2165
8a1c38d1
EG
2166 memset(&(bp->cmng.rs_vars), 0,
2167 sizeof(struct rate_shaping_vars_per_port));
2168 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2169
8a1c38d1
EG
2170 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2171 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2172
8a1c38d1
EG
2173 /* this is the threshold below which no timer arming will occur
2174 1.25 coefficient is for the threshold to be a little bigger
2175 than the real time, to compensate for timer in-accuracy */
2176 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2177 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2178
8a1c38d1
EG
2179 /* resolution of fairness timer */
2180 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2181 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2182 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2183
8a1c38d1
EG
2184 /* this is the threshold below which we won't arm the timer anymore */
2185 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2186
8a1c38d1
EG
2187 /* we multiply by 1e3/8 to get bytes/msec.
2188 We don't want the credits to pass a credit
2189 of the t_fair*FAIR_MEM (algorithm resolution) */
2190 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2191 /* since each tick is 4 usec */
2192 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2193}
2194
8a1c38d1 2195static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2196{
2197 struct rate_shaping_vars_per_vn m_rs_vn;
2198 struct fairness_vars_per_vn m_fair_vn;
2199 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2200 u16 vn_min_rate, vn_max_rate;
2201 int i;
2202
2203 /* If function is hidden - set min and max to zeroes */
2204 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2205 vn_min_rate = 0;
2206 vn_max_rate = 0;
2207
2208 } else {
2209 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2210 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2211 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2212 if current min rate is zero - set it to 1.
33471629 2213 This is a requirement of the algorithm. */
8a1c38d1 2214 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2215 vn_min_rate = DEF_MIN_RATE;
2216 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2217 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2218 }
2219
8a1c38d1
EG
2220 DP(NETIF_MSG_IFUP,
2221 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2222 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2223
2224 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2225 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2226
2227 /* global vn counter - maximal Mbps for this vn */
2228 m_rs_vn.vn_counter.rate = vn_max_rate;
2229
2230 /* quota - number of bytes transmitted in this period */
2231 m_rs_vn.vn_counter.quota =
2232 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2233
8a1c38d1 2234 if (bp->vn_weight_sum) {
34f80b04
EG
2235 /* credit for each period of the fairness algorithm:
2236 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2237 vn_weight_sum should not be larger than 10000, thus
2238 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2239 than zero */
34f80b04 2240 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2241 max((u32)(vn_min_rate * (T_FAIR_COEF /
2242 (8 * bp->vn_weight_sum))),
2243 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2244 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2245 m_fair_vn.vn_credit_delta);
2246 }
2247
34f80b04
EG
2248 /* Store it to internal memory */
2249 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2250 REG_WR(bp, BAR_XSTRORM_INTMEM +
2251 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2252 ((u32 *)(&m_rs_vn))[i]);
2253
2254 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2255 REG_WR(bp, BAR_XSTRORM_INTMEM +
2256 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2257 ((u32 *)(&m_fair_vn))[i]);
2258}
2259
8a1c38d1 2260
c18487ee
YR
2261/* This function is called upon link interrupt */
2262static void bnx2x_link_attn(struct bnx2x *bp)
2263{
bb2a0f7a
YG
2264 /* Make sure that we are synced with the current statistics */
2265 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2266
c18487ee 2267 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2268
bb2a0f7a
YG
2269 if (bp->link_vars.link_up) {
2270
1c06328c
EG
2271 /* dropless flow control */
2272 if (CHIP_IS_E1H(bp)) {
2273 int port = BP_PORT(bp);
2274 u32 pause_enabled = 0;
2275
2276 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2277 pause_enabled = 1;
2278
2279 REG_WR(bp, BAR_USTRORM_INTMEM +
2280 USTORM_PAUSE_ENABLED_OFFSET(port),
2281 pause_enabled);
2282 }
2283
bb2a0f7a
YG
2284 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2285 struct host_port_stats *pstats;
2286
2287 pstats = bnx2x_sp(bp, port_stats);
2288 /* reset old bmac stats */
2289 memset(&(pstats->mac_stx[0]), 0,
2290 sizeof(struct mac_stx));
2291 }
2292 if ((bp->state == BNX2X_STATE_OPEN) ||
2293 (bp->state == BNX2X_STATE_DISABLED))
2294 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2295 }
2296
c18487ee
YR
2297 /* indicate link status */
2298 bnx2x_link_report(bp);
34f80b04
EG
2299
2300 if (IS_E1HMF(bp)) {
8a1c38d1 2301 int port = BP_PORT(bp);
34f80b04 2302 int func;
8a1c38d1 2303 int vn;
34f80b04
EG
2304
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306 if (vn == BP_E1HVN(bp))
2307 continue;
2308
8a1c38d1 2309 func = ((vn << 1) | port);
34f80b04
EG
2310
2311 /* Set the attention towards other drivers
2312 on the same port */
2313 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2314 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2315 }
34f80b04 2316
8a1c38d1
EG
2317 if (bp->link_vars.link_up) {
2318 int i;
2319
2320 /* Init rate shaping and fairness contexts */
2321 bnx2x_init_port_minmax(bp);
34f80b04 2322
34f80b04 2323 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2324 bnx2x_init_vn_minmax(bp, 2*vn + port);
2325
2326 /* Store it to internal memory */
2327 for (i = 0;
2328 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2329 REG_WR(bp, BAR_XSTRORM_INTMEM +
2330 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2331 ((u32 *)(&bp->cmng))[i]);
2332 }
34f80b04 2333 }
c18487ee 2334}
a2fbb9ea 2335
c18487ee
YR
2336static void bnx2x__link_status_update(struct bnx2x *bp)
2337{
2338 if (bp->state != BNX2X_STATE_OPEN)
2339 return;
a2fbb9ea 2340
c18487ee 2341 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2342
bb2a0f7a
YG
2343 if (bp->link_vars.link_up)
2344 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2345 else
2346 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2347
c18487ee
YR
2348 /* indicate link status */
2349 bnx2x_link_report(bp);
a2fbb9ea 2350}
a2fbb9ea 2351
34f80b04
EG
2352static void bnx2x_pmf_update(struct bnx2x *bp)
2353{
2354 int port = BP_PORT(bp);
2355 u32 val;
2356
2357 bp->port.pmf = 1;
2358 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2359
2360 /* enable nig attention */
2361 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2362 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2363 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2364
2365 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2366}
2367
c18487ee 2368/* end of Link */
a2fbb9ea
ET
2369
2370/* slow path */
2371
2372/*
2373 * General service functions
2374 */
2375
2376/* the slow path queue is odd since completions arrive on the fastpath ring */
2377static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2378 u32 data_hi, u32 data_lo, int common)
2379{
34f80b04 2380 int func = BP_FUNC(bp);
a2fbb9ea 2381
34f80b04
EG
2382 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2383 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2384 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2385 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2386 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2387
2388#ifdef BNX2X_STOP_ON_ERROR
2389 if (unlikely(bp->panic))
2390 return -EIO;
2391#endif
2392
34f80b04 2393 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2394
2395 if (!bp->spq_left) {
2396 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2397 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2398 bnx2x_panic();
2399 return -EBUSY;
2400 }
f1410647 2401
a2fbb9ea
ET
2402 /* CID needs port number to be encoded int it */
2403 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2404 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2405 HW_CID(bp, cid)));
2406 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2407 if (common)
2408 bp->spq_prod_bd->hdr.type |=
2409 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2410
2411 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2412 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2413
2414 bp->spq_left--;
2415
2416 if (bp->spq_prod_bd == bp->spq_last_bd) {
2417 bp->spq_prod_bd = bp->spq;
2418 bp->spq_prod_idx = 0;
2419 DP(NETIF_MSG_TIMER, "end of spq\n");
2420
2421 } else {
2422 bp->spq_prod_bd++;
2423 bp->spq_prod_idx++;
2424 }
2425
34f80b04 2426 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2427 bp->spq_prod_idx);
2428
34f80b04 2429 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2430 return 0;
2431}
2432
2433/* acquire split MCP access lock register */
4a37fb66 2434static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2435{
a2fbb9ea 2436 u32 i, j, val;
34f80b04 2437 int rc = 0;
a2fbb9ea
ET
2438
2439 might_sleep();
2440 i = 100;
2441 for (j = 0; j < i*10; j++) {
2442 val = (1UL << 31);
2443 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2444 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2445 if (val & (1L << 31))
2446 break;
2447
2448 msleep(5);
2449 }
a2fbb9ea 2450 if (!(val & (1L << 31))) {
19680c48 2451 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2452 rc = -EBUSY;
2453 }
2454
2455 return rc;
2456}
2457
4a37fb66
YG
2458/* release split MCP access lock register */
2459static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2460{
2461 u32 val = 0;
2462
2463 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2464}
2465
2466static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2467{
2468 struct host_def_status_block *def_sb = bp->def_status_blk;
2469 u16 rc = 0;
2470
2471 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2472 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2473 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2474 rc |= 1;
2475 }
2476 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2477 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2478 rc |= 2;
2479 }
2480 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2481 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2482 rc |= 4;
2483 }
2484 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2485 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2486 rc |= 8;
2487 }
2488 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2489 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2490 rc |= 16;
2491 }
2492 return rc;
2493}
2494
2495/*
2496 * slow path service functions
2497 */
2498
2499static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2500{
34f80b04 2501 int port = BP_PORT(bp);
5c862848
EG
2502 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2503 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2504 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2505 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2506 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2507 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2508 u32 aeu_mask;
87942b46 2509 u32 nig_mask = 0;
a2fbb9ea 2510
a2fbb9ea
ET
2511 if (bp->attn_state & asserted)
2512 BNX2X_ERR("IGU ERROR\n");
2513
3fcaf2e5
EG
2514 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2515 aeu_mask = REG_RD(bp, aeu_addr);
2516
a2fbb9ea 2517 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2518 aeu_mask, asserted);
2519 aeu_mask &= ~(asserted & 0xff);
2520 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2521
3fcaf2e5
EG
2522 REG_WR(bp, aeu_addr, aeu_mask);
2523 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2524
3fcaf2e5 2525 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2526 bp->attn_state |= asserted;
3fcaf2e5 2527 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2528
2529 if (asserted & ATTN_HARD_WIRED_MASK) {
2530 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2531
a5e9a7cf
EG
2532 bnx2x_acquire_phy_lock(bp);
2533
877e9aa4 2534 /* save nig interrupt mask */
87942b46 2535 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2536 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2537
c18487ee 2538 bnx2x_link_attn(bp);
a2fbb9ea
ET
2539
2540 /* handle unicore attn? */
2541 }
2542 if (asserted & ATTN_SW_TIMER_4_FUNC)
2543 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2544
2545 if (asserted & GPIO_2_FUNC)
2546 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2547
2548 if (asserted & GPIO_3_FUNC)
2549 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2550
2551 if (asserted & GPIO_4_FUNC)
2552 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2553
2554 if (port == 0) {
2555 if (asserted & ATTN_GENERAL_ATTN_1) {
2556 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2557 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2558 }
2559 if (asserted & ATTN_GENERAL_ATTN_2) {
2560 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2561 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2562 }
2563 if (asserted & ATTN_GENERAL_ATTN_3) {
2564 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2565 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2566 }
2567 } else {
2568 if (asserted & ATTN_GENERAL_ATTN_4) {
2569 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2570 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2571 }
2572 if (asserted & ATTN_GENERAL_ATTN_5) {
2573 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2574 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2575 }
2576 if (asserted & ATTN_GENERAL_ATTN_6) {
2577 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2578 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2579 }
2580 }
2581
2582 } /* if hardwired */
2583
5c862848
EG
2584 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2585 asserted, hc_addr);
2586 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2587
2588 /* now set back the mask */
a5e9a7cf 2589 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2590 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2591 bnx2x_release_phy_lock(bp);
2592 }
a2fbb9ea
ET
2593}
2594
877e9aa4 2595static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2596{
34f80b04 2597 int port = BP_PORT(bp);
877e9aa4
ET
2598 int reg_offset;
2599 u32 val;
2600
34f80b04
EG
2601 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2602 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2603
34f80b04 2604 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2605
2606 val = REG_RD(bp, reg_offset);
2607 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2608 REG_WR(bp, reg_offset, val);
2609
2610 BNX2X_ERR("SPIO5 hw attention\n");
2611
35b19ba5
EG
2612 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2613 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2614 /* Fan failure attention */
2615
17de50b7 2616 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2617 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2618 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2619 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2620 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2621 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2622 /* mark the failure */
c18487ee 2623 bp->link_params.ext_phy_config &=
877e9aa4 2624 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2625 bp->link_params.ext_phy_config |=
877e9aa4
ET
2626 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627 SHMEM_WR(bp,
2628 dev_info.port_hw_config[port].
2629 external_phy_config,
c18487ee 2630 bp->link_params.ext_phy_config);
877e9aa4
ET
2631 /* log the failure */
2632 printk(KERN_ERR PFX "Fan Failure on Network"
2633 " Controller %s has caused the driver to"
2634 " shutdown the card to prevent permanent"
2635 " damage. Please contact Dell Support for"
2636 " assistance\n", bp->dev->name);
2637 break;
2638
2639 default:
2640 break;
2641 }
2642 }
34f80b04 2643
589abe3a
EG
2644 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2645 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2646 bnx2x_acquire_phy_lock(bp);
2647 bnx2x_handle_module_detect_int(&bp->link_params);
2648 bnx2x_release_phy_lock(bp);
2649 }
2650
34f80b04
EG
2651 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2652
2653 val = REG_RD(bp, reg_offset);
2654 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2655 REG_WR(bp, reg_offset, val);
2656
2657 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2658 (attn & HW_INTERRUT_ASSERT_SET_0));
2659 bnx2x_panic();
2660 }
877e9aa4
ET
2661}
2662
2663static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2664{
2665 u32 val;
2666
0626b899 2667 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2668
2669 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2670 BNX2X_ERR("DB hw attention 0x%x\n", val);
2671 /* DORQ discard attention */
2672 if (val & 0x2)
2673 BNX2X_ERR("FATAL error from DORQ\n");
2674 }
34f80b04
EG
2675
2676 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2677
2678 int port = BP_PORT(bp);
2679 int reg_offset;
2680
2681 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2682 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2683
2684 val = REG_RD(bp, reg_offset);
2685 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2686 REG_WR(bp, reg_offset, val);
2687
2688 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2689 (attn & HW_INTERRUT_ASSERT_SET_1));
2690 bnx2x_panic();
2691 }
877e9aa4
ET
2692}
2693
2694static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2695{
2696 u32 val;
2697
2698 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2699
2700 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2701 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2702 /* CFC error attention */
2703 if (val & 0x2)
2704 BNX2X_ERR("FATAL error from CFC\n");
2705 }
2706
2707 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2708
2709 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2710 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2711 /* RQ_USDMDP_FIFO_OVERFLOW */
2712 if (val & 0x18000)
2713 BNX2X_ERR("FATAL error from PXP\n");
2714 }
34f80b04
EG
2715
2716 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2717
2718 int port = BP_PORT(bp);
2719 int reg_offset;
2720
2721 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2722 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2723
2724 val = REG_RD(bp, reg_offset);
2725 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2726 REG_WR(bp, reg_offset, val);
2727
2728 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2729 (attn & HW_INTERRUT_ASSERT_SET_2));
2730 bnx2x_panic();
2731 }
877e9aa4
ET
2732}
2733
2734static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2735{
34f80b04
EG
2736 u32 val;
2737
877e9aa4
ET
2738 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2739
34f80b04
EG
2740 if (attn & BNX2X_PMF_LINK_ASSERT) {
2741 int func = BP_FUNC(bp);
2742
2743 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2744 bnx2x__link_status_update(bp);
2745 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2746 DRV_STATUS_PMF)
2747 bnx2x_pmf_update(bp);
2748
2749 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2750
2751 BNX2X_ERR("MC assert!\n");
2752 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2753 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2754 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2755 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2756 bnx2x_panic();
2757
2758 } else if (attn & BNX2X_MCP_ASSERT) {
2759
2760 BNX2X_ERR("MCP assert!\n");
2761 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2762 bnx2x_fw_dump(bp);
877e9aa4
ET
2763
2764 } else
2765 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2766 }
2767
2768 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2769 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2770 if (attn & BNX2X_GRC_TIMEOUT) {
2771 val = CHIP_IS_E1H(bp) ?
2772 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2773 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2774 }
2775 if (attn & BNX2X_GRC_RSV) {
2776 val = CHIP_IS_E1H(bp) ?
2777 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2778 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2779 }
877e9aa4 2780 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2781 }
2782}
2783
2784static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2785{
a2fbb9ea
ET
2786 struct attn_route attn;
2787 struct attn_route group_mask;
34f80b04 2788 int port = BP_PORT(bp);
877e9aa4 2789 int index;
a2fbb9ea
ET
2790 u32 reg_addr;
2791 u32 val;
3fcaf2e5 2792 u32 aeu_mask;
a2fbb9ea
ET
2793
2794 /* need to take HW lock because MCP or other port might also
2795 try to handle this event */
4a37fb66 2796 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2797
2798 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2799 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2800 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2801 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2802 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2803 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2804
2805 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2806 if (deasserted & (1 << index)) {
2807 group_mask = bp->attn_group[index];
2808
34f80b04
EG
2809 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2810 index, group_mask.sig[0], group_mask.sig[1],
2811 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2812
877e9aa4
ET
2813 bnx2x_attn_int_deasserted3(bp,
2814 attn.sig[3] & group_mask.sig[3]);
2815 bnx2x_attn_int_deasserted1(bp,
2816 attn.sig[1] & group_mask.sig[1]);
2817 bnx2x_attn_int_deasserted2(bp,
2818 attn.sig[2] & group_mask.sig[2]);
2819 bnx2x_attn_int_deasserted0(bp,
2820 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2821
a2fbb9ea
ET
2822 if ((attn.sig[0] & group_mask.sig[0] &
2823 HW_PRTY_ASSERT_SET_0) ||
2824 (attn.sig[1] & group_mask.sig[1] &
2825 HW_PRTY_ASSERT_SET_1) ||
2826 (attn.sig[2] & group_mask.sig[2] &
2827 HW_PRTY_ASSERT_SET_2))
6378c025 2828 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2829 }
2830 }
2831
4a37fb66 2832 bnx2x_release_alr(bp);
a2fbb9ea 2833
5c862848 2834 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2835
2836 val = ~deasserted;
3fcaf2e5
EG
2837 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2838 val, reg_addr);
5c862848 2839 REG_WR(bp, reg_addr, val);
a2fbb9ea 2840
a2fbb9ea 2841 if (~bp->attn_state & deasserted)
3fcaf2e5 2842 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2843
2844 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2845 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2846
3fcaf2e5
EG
2847 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2848 aeu_mask = REG_RD(bp, reg_addr);
2849
2850 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2851 aeu_mask, deasserted);
2852 aeu_mask |= (deasserted & 0xff);
2853 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2854
3fcaf2e5
EG
2855 REG_WR(bp, reg_addr, aeu_mask);
2856 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2857
2858 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2859 bp->attn_state &= ~deasserted;
2860 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2861}
2862
2863static void bnx2x_attn_int(struct bnx2x *bp)
2864{
2865 /* read local copy of bits */
68d59484
EG
2866 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2867 attn_bits);
2868 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2869 attn_bits_ack);
a2fbb9ea
ET
2870 u32 attn_state = bp->attn_state;
2871
2872 /* look for changed bits */
2873 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2874 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2875
2876 DP(NETIF_MSG_HW,
2877 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2878 attn_bits, attn_ack, asserted, deasserted);
2879
2880 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2881 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2882
2883 /* handle bits that were raised */
2884 if (asserted)
2885 bnx2x_attn_int_asserted(bp, asserted);
2886
2887 if (deasserted)
2888 bnx2x_attn_int_deasserted(bp, deasserted);
2889}
2890
2891static void bnx2x_sp_task(struct work_struct *work)
2892{
1cf167f2 2893 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2894 u16 status;
2895
34f80b04 2896
a2fbb9ea
ET
2897 /* Return here if interrupt is disabled */
2898 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2899 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2900 return;
2901 }
2902
2903 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2904/* if (status == 0) */
2905/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2906
3196a88a 2907 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2908
877e9aa4
ET
2909 /* HW attentions */
2910 if (status & 0x1)
a2fbb9ea 2911 bnx2x_attn_int(bp);
a2fbb9ea 2912
68d59484 2913 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2914 IGU_INT_NOP, 1);
2915 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2916 IGU_INT_NOP, 1);
2917 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2918 IGU_INT_NOP, 1);
2919 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2920 IGU_INT_NOP, 1);
2921 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2922 IGU_INT_ENABLE, 1);
877e9aa4 2923
a2fbb9ea
ET
2924}
2925
2926static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2927{
2928 struct net_device *dev = dev_instance;
2929 struct bnx2x *bp = netdev_priv(dev);
2930
2931 /* Return here if interrupt is disabled */
2932 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2933 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2934 return IRQ_HANDLED;
2935 }
2936
8d9c5f34 2937 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2938
2939#ifdef BNX2X_STOP_ON_ERROR
2940 if (unlikely(bp->panic))
2941 return IRQ_HANDLED;
2942#endif
2943
1cf167f2 2944 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2945
2946 return IRQ_HANDLED;
2947}
2948
2949/* end of slow path */
2950
2951/* Statistics */
2952
2953/****************************************************************************
2954* Macros
2955****************************************************************************/
2956
a2fbb9ea
ET
2957/* sum[hi:lo] += add[hi:lo] */
2958#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2959 do { \
2960 s_lo += a_lo; \
f5ba6772 2961 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2962 } while (0)
2963
2964/* difference = minuend - subtrahend */
2965#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2966 do { \
bb2a0f7a
YG
2967 if (m_lo < s_lo) { \
2968 /* underflow */ \
a2fbb9ea 2969 d_hi = m_hi - s_hi; \
bb2a0f7a 2970 if (d_hi > 0) { \
6378c025 2971 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2972 d_hi--; \
2973 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2974 } else { \
6378c025 2975 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2976 d_hi = 0; \
2977 d_lo = 0; \
2978 } \
bb2a0f7a
YG
2979 } else { \
2980 /* m_lo >= s_lo */ \
a2fbb9ea 2981 if (m_hi < s_hi) { \
bb2a0f7a
YG
2982 d_hi = 0; \
2983 d_lo = 0; \
2984 } else { \
6378c025 2985 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2986 d_hi = m_hi - s_hi; \
2987 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2988 } \
2989 } \
2990 } while (0)
2991
bb2a0f7a 2992#define UPDATE_STAT64(s, t) \
a2fbb9ea 2993 do { \
bb2a0f7a
YG
2994 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2995 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2996 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2997 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2998 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2999 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3000 } while (0)
3001
bb2a0f7a 3002#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3003 do { \
bb2a0f7a
YG
3004 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3005 diff.lo, new->s##_lo, old->s##_lo); \
3006 ADD_64(estats->t##_hi, diff.hi, \
3007 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3008 } while (0)
3009
3010/* sum[hi:lo] += add */
3011#define ADD_EXTEND_64(s_hi, s_lo, a) \
3012 do { \
3013 s_lo += a; \
3014 s_hi += (s_lo < a) ? 1 : 0; \
3015 } while (0)
3016
bb2a0f7a 3017#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3018 do { \
bb2a0f7a
YG
3019 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3020 pstats->mac_stx[1].s##_lo, \
3021 new->s); \
a2fbb9ea
ET
3022 } while (0)
3023
bb2a0f7a 3024#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3025 do { \
4781bfad
EG
3026 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3027 old_tclient->s = tclient->s; \
de832a55
EG
3028 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3029 } while (0)
3030
3031#define UPDATE_EXTEND_USTAT(s, t) \
3032 do { \
3033 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3034 old_uclient->s = uclient->s; \
3035 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3036 } while (0)
3037
3038#define UPDATE_EXTEND_XSTAT(s, t) \
3039 do { \
4781bfad
EG
3040 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3041 old_xclient->s = xclient->s; \
de832a55
EG
3042 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3043 } while (0)
3044
3045/* minuend -= subtrahend */
3046#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3047 do { \
3048 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3049 } while (0)
3050
3051/* minuend[hi:lo] -= subtrahend */
3052#define SUB_EXTEND_64(m_hi, m_lo, s) \
3053 do { \
3054 SUB_64(m_hi, 0, m_lo, s); \
3055 } while (0)
3056
3057#define SUB_EXTEND_USTAT(s, t) \
3058 do { \
3059 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3060 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3061 } while (0)
3062
3063/*
3064 * General service functions
3065 */
3066
3067static inline long bnx2x_hilo(u32 *hiref)
3068{
3069 u32 lo = *(hiref + 1);
3070#if (BITS_PER_LONG == 64)
3071 u32 hi = *hiref;
3072
3073 return HILO_U64(hi, lo);
3074#else
3075 return lo;
3076#endif
3077}
3078
3079/*
3080 * Init service functions
3081 */
3082
bb2a0f7a
YG
3083static void bnx2x_storm_stats_post(struct bnx2x *bp)
3084{
3085 if (!bp->stats_pending) {
3086 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3087 int i, rc;
bb2a0f7a
YG
3088
3089 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3090 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3091 for_each_queue(bp, i)
3092 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3093
3094 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3095 ((u32 *)&ramrod_data)[1],
3096 ((u32 *)&ramrod_data)[0], 0);
3097 if (rc == 0) {
3098 /* stats ramrod has it's own slot on the spq */
3099 bp->spq_left++;
3100 bp->stats_pending = 1;
3101 }
3102 }
3103}
3104
3105static void bnx2x_stats_init(struct bnx2x *bp)
3106{
3107 int port = BP_PORT(bp);
de832a55 3108 int i;
bb2a0f7a 3109
de832a55 3110 bp->stats_pending = 0;
bb2a0f7a
YG
3111 bp->executer_idx = 0;
3112 bp->stats_counter = 0;
3113
3114 /* port stats */
3115 if (!BP_NOMCP(bp))
3116 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3117 else
3118 bp->port.port_stx = 0;
3119 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3120
3121 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3122 bp->port.old_nig_stats.brb_discard =
3123 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3124 bp->port.old_nig_stats.brb_truncate =
3125 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3126 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3127 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3128 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3129 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3130
3131 /* function stats */
de832a55
EG
3132 for_each_queue(bp, i) {
3133 struct bnx2x_fastpath *fp = &bp->fp[i];
3134
3135 memset(&fp->old_tclient, 0,
3136 sizeof(struct tstorm_per_client_stats));
3137 memset(&fp->old_uclient, 0,
3138 sizeof(struct ustorm_per_client_stats));
3139 memset(&fp->old_xclient, 0,
3140 sizeof(struct xstorm_per_client_stats));
3141 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3142 }
3143
bb2a0f7a 3144 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3145 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3146
3147 bp->stats_state = STATS_STATE_DISABLED;
3148 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3149 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3150}
3151
3152static void bnx2x_hw_stats_post(struct bnx2x *bp)
3153{
3154 struct dmae_command *dmae = &bp->stats_dmae;
3155 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3156
3157 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3158 if (CHIP_REV_IS_SLOW(bp))
3159 return;
bb2a0f7a
YG
3160
3161 /* loader */
3162 if (bp->executer_idx) {
3163 int loader_idx = PMF_DMAE_C(bp);
3164
3165 memset(dmae, 0, sizeof(struct dmae_command));
3166
3167 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3168 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3169 DMAE_CMD_DST_RESET |
3170#ifdef __BIG_ENDIAN
3171 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3172#else
3173 DMAE_CMD_ENDIANITY_DW_SWAP |
3174#endif
3175 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3176 DMAE_CMD_PORT_0) |
3177 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3178 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3179 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3180 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3181 sizeof(struct dmae_command) *
3182 (loader_idx + 1)) >> 2;
3183 dmae->dst_addr_hi = 0;
3184 dmae->len = sizeof(struct dmae_command) >> 2;
3185 if (CHIP_IS_E1(bp))
3186 dmae->len--;
3187 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3188 dmae->comp_addr_hi = 0;
3189 dmae->comp_val = 1;
3190
3191 *stats_comp = 0;
3192 bnx2x_post_dmae(bp, dmae, loader_idx);
3193
3194 } else if (bp->func_stx) {
3195 *stats_comp = 0;
3196 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3197 }
3198}
3199
3200static int bnx2x_stats_comp(struct bnx2x *bp)
3201{
3202 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3203 int cnt = 10;
3204
3205 might_sleep();
3206 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3207 if (!cnt) {
3208 BNX2X_ERR("timeout waiting for stats finished\n");
3209 break;
3210 }
3211 cnt--;
12469401 3212 msleep(1);
bb2a0f7a
YG
3213 }
3214 return 1;
3215}
3216
3217/*
3218 * Statistics service functions
3219 */
3220
3221static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3222{
3223 struct dmae_command *dmae;
3224 u32 opcode;
3225 int loader_idx = PMF_DMAE_C(bp);
3226 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3227
3228 /* sanity */
3229 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3230 BNX2X_ERR("BUG!\n");
3231 return;
3232 }
3233
3234 bp->executer_idx = 0;
3235
3236 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3237 DMAE_CMD_C_ENABLE |
3238 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3239#ifdef __BIG_ENDIAN
3240 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3241#else
3242 DMAE_CMD_ENDIANITY_DW_SWAP |
3243#endif
3244 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3245 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3246
3247 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3248 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3249 dmae->src_addr_lo = bp->port.port_stx >> 2;
3250 dmae->src_addr_hi = 0;
3251 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3252 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3253 dmae->len = DMAE_LEN32_RD_MAX;
3254 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3255 dmae->comp_addr_hi = 0;
3256 dmae->comp_val = 1;
3257
3258 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3259 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3260 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3261 dmae->src_addr_hi = 0;
7a9b2557
VZ
3262 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3263 DMAE_LEN32_RD_MAX * 4);
3264 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3265 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3266 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3267 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3268 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3269 dmae->comp_val = DMAE_COMP_VAL;
3270
3271 *stats_comp = 0;
3272 bnx2x_hw_stats_post(bp);
3273 bnx2x_stats_comp(bp);
3274}
3275
3276static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3277{
3278 struct dmae_command *dmae;
34f80b04 3279 int port = BP_PORT(bp);
bb2a0f7a 3280 int vn = BP_E1HVN(bp);
a2fbb9ea 3281 u32 opcode;
bb2a0f7a 3282 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3283 u32 mac_addr;
bb2a0f7a
YG
3284 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3285
3286 /* sanity */
3287 if (!bp->link_vars.link_up || !bp->port.pmf) {
3288 BNX2X_ERR("BUG!\n");
3289 return;
3290 }
a2fbb9ea
ET
3291
3292 bp->executer_idx = 0;
bb2a0f7a
YG
3293
3294 /* MCP */
3295 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3296 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3298#ifdef __BIG_ENDIAN
bb2a0f7a 3299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3300#else
bb2a0f7a 3301 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3302#endif
bb2a0f7a
YG
3303 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3304 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3305
bb2a0f7a 3306 if (bp->port.port_stx) {
a2fbb9ea
ET
3307
3308 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3309 dmae->opcode = opcode;
bb2a0f7a
YG
3310 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3311 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3312 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3313 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3314 dmae->len = sizeof(struct host_port_stats) >> 2;
3315 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3316 dmae->comp_addr_hi = 0;
3317 dmae->comp_val = 1;
a2fbb9ea
ET
3318 }
3319
bb2a0f7a
YG
3320 if (bp->func_stx) {
3321
3322 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3323 dmae->opcode = opcode;
3324 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3325 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3326 dmae->dst_addr_lo = bp->func_stx >> 2;
3327 dmae->dst_addr_hi = 0;
3328 dmae->len = sizeof(struct host_func_stats) >> 2;
3329 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3330 dmae->comp_addr_hi = 0;
3331 dmae->comp_val = 1;
a2fbb9ea
ET
3332 }
3333
bb2a0f7a 3334 /* MAC */
a2fbb9ea
ET
3335 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3336 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3337 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3338#ifdef __BIG_ENDIAN
3339 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3340#else
3341 DMAE_CMD_ENDIANITY_DW_SWAP |
3342#endif
bb2a0f7a
YG
3343 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3344 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3345
c18487ee 3346 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3347
3348 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3349 NIG_REG_INGRESS_BMAC0_MEM);
3350
3351 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3352 BIGMAC_REGISTER_TX_STAT_GTBYT */
3353 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354 dmae->opcode = opcode;
3355 dmae->src_addr_lo = (mac_addr +
3356 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3357 dmae->src_addr_hi = 0;
3358 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3359 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3360 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3361 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3364 dmae->comp_val = 1;
3365
3366 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3367 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3368 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3369 dmae->opcode = opcode;
3370 dmae->src_addr_lo = (mac_addr +
3371 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3372 dmae->src_addr_hi = 0;
3373 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3374 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3375 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3376 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3377 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3378 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3379 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3380 dmae->comp_addr_hi = 0;
3381 dmae->comp_val = 1;
3382
c18487ee 3383 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3384
3385 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3386
3387 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3388 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3389 dmae->opcode = opcode;
3390 dmae->src_addr_lo = (mac_addr +
3391 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3392 dmae->src_addr_hi = 0;
3393 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3394 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3395 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3396 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3397 dmae->comp_addr_hi = 0;
3398 dmae->comp_val = 1;
3399
3400 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3401 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3402 dmae->opcode = opcode;
3403 dmae->src_addr_lo = (mac_addr +
3404 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3405 dmae->src_addr_hi = 0;
3406 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3407 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3408 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3409 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3410 dmae->len = 1;
3411 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3412 dmae->comp_addr_hi = 0;
3413 dmae->comp_val = 1;
3414
3415 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3416 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3417 dmae->opcode = opcode;
3418 dmae->src_addr_lo = (mac_addr +
3419 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3420 dmae->src_addr_hi = 0;
3421 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3422 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3423 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3424 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3425 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3426 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3427 dmae->comp_addr_hi = 0;
3428 dmae->comp_val = 1;
3429 }
3430
3431 /* NIG */
bb2a0f7a
YG
3432 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3433 dmae->opcode = opcode;
3434 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3435 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3436 dmae->src_addr_hi = 0;
3437 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3438 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3439 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3440 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3441 dmae->comp_addr_hi = 0;
3442 dmae->comp_val = 1;
3443
3444 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3445 dmae->opcode = opcode;
3446 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3447 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3448 dmae->src_addr_hi = 0;
3449 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3450 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3451 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3452 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3453 dmae->len = (2*sizeof(u32)) >> 2;
3454 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3455 dmae->comp_addr_hi = 0;
3456 dmae->comp_val = 1;
3457
a2fbb9ea
ET
3458 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3459 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3460 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3461 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3462#ifdef __BIG_ENDIAN
3463 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3464#else
3465 DMAE_CMD_ENDIANITY_DW_SWAP |
3466#endif
bb2a0f7a
YG
3467 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3468 (vn << DMAE_CMD_E1HVN_SHIFT));
3469 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3470 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3471 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3472 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3473 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3474 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3475 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3476 dmae->len = (2*sizeof(u32)) >> 2;
3477 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3478 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3479 dmae->comp_val = DMAE_COMP_VAL;
3480
3481 *stats_comp = 0;
a2fbb9ea
ET
3482}
3483
bb2a0f7a 3484static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3485{
bb2a0f7a
YG
3486 struct dmae_command *dmae = &bp->stats_dmae;
3487 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3488
bb2a0f7a
YG
3489 /* sanity */
3490 if (!bp->func_stx) {
3491 BNX2X_ERR("BUG!\n");
3492 return;
3493 }
a2fbb9ea 3494
bb2a0f7a
YG
3495 bp->executer_idx = 0;
3496 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3497
bb2a0f7a
YG
3498 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3499 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3500 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3501#ifdef __BIG_ENDIAN
3502 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3503#else
3504 DMAE_CMD_ENDIANITY_DW_SWAP |
3505#endif
3506 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3507 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3508 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3509 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3510 dmae->dst_addr_lo = bp->func_stx >> 2;
3511 dmae->dst_addr_hi = 0;
3512 dmae->len = sizeof(struct host_func_stats) >> 2;
3513 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3514 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3515 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3516
bb2a0f7a
YG
3517 *stats_comp = 0;
3518}
a2fbb9ea 3519
bb2a0f7a
YG
3520static void bnx2x_stats_start(struct bnx2x *bp)
3521{
3522 if (bp->port.pmf)
3523 bnx2x_port_stats_init(bp);
3524
3525 else if (bp->func_stx)
3526 bnx2x_func_stats_init(bp);
3527
3528 bnx2x_hw_stats_post(bp);
3529 bnx2x_storm_stats_post(bp);
3530}
3531
3532static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3533{
3534 bnx2x_stats_comp(bp);
3535 bnx2x_stats_pmf_update(bp);
3536 bnx2x_stats_start(bp);
3537}
3538
3539static void bnx2x_stats_restart(struct bnx2x *bp)
3540{
3541 bnx2x_stats_comp(bp);
3542 bnx2x_stats_start(bp);
3543}
3544
3545static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3546{
3547 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3548 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3549 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3550 struct {
3551 u32 lo;
3552 u32 hi;
3553 } diff;
bb2a0f7a
YG
3554
3555 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3556 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3557 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3558 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3559 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3560 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3561 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3562 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3563 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3564 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3565 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3566 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3567 UPDATE_STAT64(tx_stat_gt127,
3568 tx_stat_etherstatspkts65octetsto127octets);
3569 UPDATE_STAT64(tx_stat_gt255,
3570 tx_stat_etherstatspkts128octetsto255octets);
3571 UPDATE_STAT64(tx_stat_gt511,
3572 tx_stat_etherstatspkts256octetsto511octets);
3573 UPDATE_STAT64(tx_stat_gt1023,
3574 tx_stat_etherstatspkts512octetsto1023octets);
3575 UPDATE_STAT64(tx_stat_gt1518,
3576 tx_stat_etherstatspkts1024octetsto1522octets);
3577 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3578 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3579 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3580 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3581 UPDATE_STAT64(tx_stat_gterr,
3582 tx_stat_dot3statsinternalmactransmiterrors);
3583 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3584
3585 estats->pause_frames_received_hi =
3586 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3587 estats->pause_frames_received_lo =
3588 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3589
3590 estats->pause_frames_sent_hi =
3591 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3592 estats->pause_frames_sent_lo =
3593 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3594}
3595
3596static void bnx2x_emac_stats_update(struct bnx2x *bp)
3597{
3598 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3599 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3600 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3601
3602 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3603 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3605 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3606 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3607 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3608 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3609 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3610 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3611 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3612 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3613 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3614 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3615 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3616 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3617 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3618 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3619 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3620 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3621 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3622 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3623 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3624 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3627 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3628 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3629 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3630 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3631 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3632 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3633
3634 estats->pause_frames_received_hi =
3635 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3636 estats->pause_frames_received_lo =
3637 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3638 ADD_64(estats->pause_frames_received_hi,
3639 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3640 estats->pause_frames_received_lo,
3641 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3642
3643 estats->pause_frames_sent_hi =
3644 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3645 estats->pause_frames_sent_lo =
3646 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3647 ADD_64(estats->pause_frames_sent_hi,
3648 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3649 estats->pause_frames_sent_lo,
3650 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3651}
3652
3653static int bnx2x_hw_stats_update(struct bnx2x *bp)
3654{
3655 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3656 struct nig_stats *old = &(bp->port.old_nig_stats);
3657 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3658 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3659 struct {
3660 u32 lo;
3661 u32 hi;
3662 } diff;
de832a55 3663 u32 nig_timer_max;
bb2a0f7a
YG
3664
3665 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3666 bnx2x_bmac_stats_update(bp);
3667
3668 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3669 bnx2x_emac_stats_update(bp);
3670
3671 else { /* unreached */
3672 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3673 return -1;
3674 }
a2fbb9ea 3675
bb2a0f7a
YG
3676 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3677 new->brb_discard - old->brb_discard);
66e855f3
YG
3678 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3679 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3680
bb2a0f7a
YG
3681 UPDATE_STAT64_NIG(egress_mac_pkt0,
3682 etherstatspkts1024octetsto1522octets);
3683 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3684
bb2a0f7a 3685 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3686
bb2a0f7a
YG
3687 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3688 sizeof(struct mac_stx));
3689 estats->brb_drop_hi = pstats->brb_drop_hi;
3690 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3691
bb2a0f7a 3692 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3693
de832a55
EG
3694 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3695 if (nig_timer_max != estats->nig_timer_max) {
3696 estats->nig_timer_max = nig_timer_max;
3697 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3698 }
3699
bb2a0f7a 3700 return 0;
a2fbb9ea
ET
3701}
3702
bb2a0f7a 3703static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3704{
3705 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3706 struct tstorm_per_port_stats *tport =
de832a55 3707 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3708 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3709 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3710 int i;
3711
3712 memset(&(fstats->total_bytes_received_hi), 0,
3713 sizeof(struct host_func_stats) - 2*sizeof(u32));
3714 estats->error_bytes_received_hi = 0;
3715 estats->error_bytes_received_lo = 0;
3716 estats->etherstatsoverrsizepkts_hi = 0;
3717 estats->etherstatsoverrsizepkts_lo = 0;
3718 estats->no_buff_discard_hi = 0;
3719 estats->no_buff_discard_lo = 0;
a2fbb9ea 3720
de832a55
EG
3721 for_each_queue(bp, i) {
3722 struct bnx2x_fastpath *fp = &bp->fp[i];
3723 int cl_id = fp->cl_id;
3724 struct tstorm_per_client_stats *tclient =
3725 &stats->tstorm_common.client_statistics[cl_id];
3726 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3727 struct ustorm_per_client_stats *uclient =
3728 &stats->ustorm_common.client_statistics[cl_id];
3729 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3730 struct xstorm_per_client_stats *xclient =
3731 &stats->xstorm_common.client_statistics[cl_id];
3732 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3733 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3734 u32 diff;
3735
3736 /* are storm stats valid? */
3737 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3738 bp->stats_counter) {
de832a55
EG
3739 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3740 " xstorm counter (%d) != stats_counter (%d)\n",
3741 i, xclient->stats_counter, bp->stats_counter);
3742 return -1;
3743 }
3744 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3745 bp->stats_counter) {
de832a55
EG
3746 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3747 " tstorm counter (%d) != stats_counter (%d)\n",
3748 i, tclient->stats_counter, bp->stats_counter);
3749 return -2;
3750 }
3751 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3752 bp->stats_counter) {
3753 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3754 " ustorm counter (%d) != stats_counter (%d)\n",
3755 i, uclient->stats_counter, bp->stats_counter);
3756 return -4;
3757 }
a2fbb9ea 3758
de832a55
EG
3759 qstats->total_bytes_received_hi =
3760 qstats->valid_bytes_received_hi =
a2fbb9ea 3761 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3762 qstats->total_bytes_received_lo =
3763 qstats->valid_bytes_received_lo =
a2fbb9ea 3764 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3765
de832a55 3766 qstats->error_bytes_received_hi =
bb2a0f7a 3767 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3768 qstats->error_bytes_received_lo =
bb2a0f7a 3769 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3770
de832a55
EG
3771 ADD_64(qstats->total_bytes_received_hi,
3772 qstats->error_bytes_received_hi,
3773 qstats->total_bytes_received_lo,
3774 qstats->error_bytes_received_lo);
3775
3776 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3777 total_unicast_packets_received);
3778 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3779 total_multicast_packets_received);
3780 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3781 total_broadcast_packets_received);
3782 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3783 etherstatsoverrsizepkts);
3784 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3785
3786 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3787 total_unicast_packets_received);
3788 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3789 total_multicast_packets_received);
3790 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3791 total_broadcast_packets_received);
3792 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3793 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3794 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3795
3796 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3797 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3798 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3799 le32_to_cpu(xclient->total_sent_bytes.lo);
3800
de832a55
EG
3801 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3802 total_unicast_packets_transmitted);
3803 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3804 total_multicast_packets_transmitted);
3805 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3806 total_broadcast_packets_transmitted);
3807
3808 old_tclient->checksum_discard = tclient->checksum_discard;
3809 old_tclient->ttl0_discard = tclient->ttl0_discard;
3810
3811 ADD_64(fstats->total_bytes_received_hi,
3812 qstats->total_bytes_received_hi,
3813 fstats->total_bytes_received_lo,
3814 qstats->total_bytes_received_lo);
3815 ADD_64(fstats->total_bytes_transmitted_hi,
3816 qstats->total_bytes_transmitted_hi,
3817 fstats->total_bytes_transmitted_lo,
3818 qstats->total_bytes_transmitted_lo);
3819 ADD_64(fstats->total_unicast_packets_received_hi,
3820 qstats->total_unicast_packets_received_hi,
3821 fstats->total_unicast_packets_received_lo,
3822 qstats->total_unicast_packets_received_lo);
3823 ADD_64(fstats->total_multicast_packets_received_hi,
3824 qstats->total_multicast_packets_received_hi,
3825 fstats->total_multicast_packets_received_lo,
3826 qstats->total_multicast_packets_received_lo);
3827 ADD_64(fstats->total_broadcast_packets_received_hi,
3828 qstats->total_broadcast_packets_received_hi,
3829 fstats->total_broadcast_packets_received_lo,
3830 qstats->total_broadcast_packets_received_lo);
3831 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3832 qstats->total_unicast_packets_transmitted_hi,
3833 fstats->total_unicast_packets_transmitted_lo,
3834 qstats->total_unicast_packets_transmitted_lo);
3835 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3836 qstats->total_multicast_packets_transmitted_hi,
3837 fstats->total_multicast_packets_transmitted_lo,
3838 qstats->total_multicast_packets_transmitted_lo);
3839 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3840 qstats->total_broadcast_packets_transmitted_hi,
3841 fstats->total_broadcast_packets_transmitted_lo,
3842 qstats->total_broadcast_packets_transmitted_lo);
3843 ADD_64(fstats->valid_bytes_received_hi,
3844 qstats->valid_bytes_received_hi,
3845 fstats->valid_bytes_received_lo,
3846 qstats->valid_bytes_received_lo);
3847
3848 ADD_64(estats->error_bytes_received_hi,
3849 qstats->error_bytes_received_hi,
3850 estats->error_bytes_received_lo,
3851 qstats->error_bytes_received_lo);
3852 ADD_64(estats->etherstatsoverrsizepkts_hi,
3853 qstats->etherstatsoverrsizepkts_hi,
3854 estats->etherstatsoverrsizepkts_lo,
3855 qstats->etherstatsoverrsizepkts_lo);
3856 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3857 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3858 }
3859
3860 ADD_64(fstats->total_bytes_received_hi,
3861 estats->rx_stat_ifhcinbadoctets_hi,
3862 fstats->total_bytes_received_lo,
3863 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3864
3865 memcpy(estats, &(fstats->total_bytes_received_hi),
3866 sizeof(struct host_func_stats) - 2*sizeof(u32));
3867
de832a55
EG
3868 ADD_64(estats->etherstatsoverrsizepkts_hi,
3869 estats->rx_stat_dot3statsframestoolong_hi,
3870 estats->etherstatsoverrsizepkts_lo,
3871 estats->rx_stat_dot3statsframestoolong_lo);
3872 ADD_64(estats->error_bytes_received_hi,
3873 estats->rx_stat_ifhcinbadoctets_hi,
3874 estats->error_bytes_received_lo,
3875 estats->rx_stat_ifhcinbadoctets_lo);
3876
3877 if (bp->port.pmf) {
3878 estats->mac_filter_discard =
3879 le32_to_cpu(tport->mac_filter_discard);
3880 estats->xxoverflow_discard =
3881 le32_to_cpu(tport->xxoverflow_discard);
3882 estats->brb_truncate_discard =
bb2a0f7a 3883 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3884 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3885 }
bb2a0f7a
YG
3886
3887 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3888
de832a55
EG
3889 bp->stats_pending = 0;
3890
a2fbb9ea
ET
3891 return 0;
3892}
3893
bb2a0f7a 3894static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3895{
bb2a0f7a 3896 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3897 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3898 int i;
a2fbb9ea
ET
3899
3900 nstats->rx_packets =
3901 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3902 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3903 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3904
3905 nstats->tx_packets =
3906 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3907 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3908 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3909
de832a55 3910 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3911
0e39e645 3912 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3913
de832a55
EG
3914 nstats->rx_dropped = estats->mac_discard;
3915 for_each_queue(bp, i)
3916 nstats->rx_dropped +=
3917 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3918
a2fbb9ea
ET
3919 nstats->tx_dropped = 0;
3920
3921 nstats->multicast =
de832a55 3922 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3923
bb2a0f7a 3924 nstats->collisions =
de832a55 3925 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3926
3927 nstats->rx_length_errors =
de832a55
EG
3928 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3929 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3930 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3931 bnx2x_hilo(&estats->brb_truncate_hi);
3932 nstats->rx_crc_errors =
3933 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3934 nstats->rx_frame_errors =
3935 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3936 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3937 nstats->rx_missed_errors = estats->xxoverflow_discard;
3938
3939 nstats->rx_errors = nstats->rx_length_errors +
3940 nstats->rx_over_errors +
3941 nstats->rx_crc_errors +
3942 nstats->rx_frame_errors +
0e39e645
ET
3943 nstats->rx_fifo_errors +
3944 nstats->rx_missed_errors;
a2fbb9ea 3945
bb2a0f7a 3946 nstats->tx_aborted_errors =
de832a55
EG
3947 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3948 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3949 nstats->tx_carrier_errors =
3950 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3951 nstats->tx_fifo_errors = 0;
3952 nstats->tx_heartbeat_errors = 0;
3953 nstats->tx_window_errors = 0;
3954
3955 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3956 nstats->tx_carrier_errors +
3957 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3958}
3959
3960static void bnx2x_drv_stats_update(struct bnx2x *bp)
3961{
3962 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3963 int i;
3964
3965 estats->driver_xoff = 0;
3966 estats->rx_err_discard_pkt = 0;
3967 estats->rx_skb_alloc_failed = 0;
3968 estats->hw_csum_err = 0;
3969 for_each_queue(bp, i) {
3970 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3971
3972 estats->driver_xoff += qstats->driver_xoff;
3973 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3974 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3975 estats->hw_csum_err += qstats->hw_csum_err;
3976 }
a2fbb9ea
ET
3977}
3978
bb2a0f7a 3979static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3980{
bb2a0f7a 3981 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3982
bb2a0f7a
YG
3983 if (*stats_comp != DMAE_COMP_VAL)
3984 return;
3985
3986 if (bp->port.pmf)
de832a55 3987 bnx2x_hw_stats_update(bp);
a2fbb9ea 3988
de832a55
EG
3989 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3990 BNX2X_ERR("storm stats were not updated for 3 times\n");
3991 bnx2x_panic();
3992 return;
a2fbb9ea
ET
3993 }
3994
de832a55
EG
3995 bnx2x_net_stats_update(bp);
3996 bnx2x_drv_stats_update(bp);
3997
a2fbb9ea 3998 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3999 struct tstorm_per_client_stats *old_tclient =
4000 &bp->fp->old_tclient;
4001 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4002 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4003 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4004 int i;
a2fbb9ea
ET
4005
4006 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4007 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4008 " tx pkt (%lx)\n",
4009 bnx2x_tx_avail(bp->fp),
7a9b2557 4010 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4011 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4012 " rx pkt (%lx)\n",
7a9b2557
VZ
4013 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4014 bp->fp->rx_comp_cons),
4015 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4016 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4017 "brb truncate %u\n",
4018 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4019 qstats->driver_xoff,
4020 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4021 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4022 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4023 "mac_discard %u mac_filter_discard %u "
4024 "xxovrflow_discard %u brb_truncate_discard %u "
4025 "ttl0_discard %u\n",
4781bfad 4026 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4027 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4028 bnx2x_hilo(&qstats->no_buff_discard_hi),
4029 estats->mac_discard, estats->mac_filter_discard,
4030 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4031 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4032
4033 for_each_queue(bp, i) {
4034 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4035 bnx2x_fp(bp, i, tx_pkt),
4036 bnx2x_fp(bp, i, rx_pkt),
4037 bnx2x_fp(bp, i, rx_calls));
4038 }
4039 }
4040
bb2a0f7a
YG
4041 bnx2x_hw_stats_post(bp);
4042 bnx2x_storm_stats_post(bp);
4043}
a2fbb9ea 4044
bb2a0f7a
YG
4045static void bnx2x_port_stats_stop(struct bnx2x *bp)
4046{
4047 struct dmae_command *dmae;
4048 u32 opcode;
4049 int loader_idx = PMF_DMAE_C(bp);
4050 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4051
bb2a0f7a 4052 bp->executer_idx = 0;
a2fbb9ea 4053
bb2a0f7a
YG
4054 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4055 DMAE_CMD_C_ENABLE |
4056 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4057#ifdef __BIG_ENDIAN
bb2a0f7a 4058 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4059#else
bb2a0f7a 4060 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4061#endif
bb2a0f7a
YG
4062 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4063 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4064
4065 if (bp->port.port_stx) {
4066
4067 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4068 if (bp->func_stx)
4069 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4070 else
4071 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4072 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4073 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4074 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4075 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4076 dmae->len = sizeof(struct host_port_stats) >> 2;
4077 if (bp->func_stx) {
4078 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4079 dmae->comp_addr_hi = 0;
4080 dmae->comp_val = 1;
4081 } else {
4082 dmae->comp_addr_lo =
4083 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4084 dmae->comp_addr_hi =
4085 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4086 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4087
bb2a0f7a
YG
4088 *stats_comp = 0;
4089 }
a2fbb9ea
ET
4090 }
4091
bb2a0f7a
YG
4092 if (bp->func_stx) {
4093
4094 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4095 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4096 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4097 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4098 dmae->dst_addr_lo = bp->func_stx >> 2;
4099 dmae->dst_addr_hi = 0;
4100 dmae->len = sizeof(struct host_func_stats) >> 2;
4101 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4102 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4103 dmae->comp_val = DMAE_COMP_VAL;
4104
4105 *stats_comp = 0;
a2fbb9ea 4106 }
bb2a0f7a
YG
4107}
4108
4109static void bnx2x_stats_stop(struct bnx2x *bp)
4110{
4111 int update = 0;
4112
4113 bnx2x_stats_comp(bp);
4114
4115 if (bp->port.pmf)
4116 update = (bnx2x_hw_stats_update(bp) == 0);
4117
4118 update |= (bnx2x_storm_stats_update(bp) == 0);
4119
4120 if (update) {
4121 bnx2x_net_stats_update(bp);
a2fbb9ea 4122
bb2a0f7a
YG
4123 if (bp->port.pmf)
4124 bnx2x_port_stats_stop(bp);
4125
4126 bnx2x_hw_stats_post(bp);
4127 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4128 }
4129}
4130
bb2a0f7a
YG
4131static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4132{
4133}
4134
4135static const struct {
4136 void (*action)(struct bnx2x *bp);
4137 enum bnx2x_stats_state next_state;
4138} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4139/* state event */
4140{
4141/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4142/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4143/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4144/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4145},
4146{
4147/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4148/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4149/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4150/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4151}
4152};
4153
4154static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4155{
4156 enum bnx2x_stats_state state = bp->stats_state;
4157
4158 bnx2x_stats_stm[state][event].action(bp);
4159 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4160
4161 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4162 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4163 state, event, bp->stats_state);
4164}
4165
a2fbb9ea
ET
4166static void bnx2x_timer(unsigned long data)
4167{
4168 struct bnx2x *bp = (struct bnx2x *) data;
4169
4170 if (!netif_running(bp->dev))
4171 return;
4172
4173 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4174 goto timer_restart;
a2fbb9ea
ET
4175
4176 if (poll) {
4177 struct bnx2x_fastpath *fp = &bp->fp[0];
4178 int rc;
4179
4180 bnx2x_tx_int(fp, 1000);
4181 rc = bnx2x_rx_int(fp, 1000);
4182 }
4183
34f80b04
EG
4184 if (!BP_NOMCP(bp)) {
4185 int func = BP_FUNC(bp);
a2fbb9ea
ET
4186 u32 drv_pulse;
4187 u32 mcp_pulse;
4188
4189 ++bp->fw_drv_pulse_wr_seq;
4190 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4191 /* TBD - add SYSTEM_TIME */
4192 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4193 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4194
34f80b04 4195 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4196 MCP_PULSE_SEQ_MASK);
4197 /* The delta between driver pulse and mcp response
4198 * should be 1 (before mcp response) or 0 (after mcp response)
4199 */
4200 if ((drv_pulse != mcp_pulse) &&
4201 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4202 /* someone lost a heartbeat... */
4203 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4204 drv_pulse, mcp_pulse);
4205 }
4206 }
4207
bb2a0f7a
YG
4208 if ((bp->state == BNX2X_STATE_OPEN) ||
4209 (bp->state == BNX2X_STATE_DISABLED))
4210 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4211
f1410647 4212timer_restart:
a2fbb9ea
ET
4213 mod_timer(&bp->timer, jiffies + bp->current_interval);
4214}
4215
4216/* end of Statistics */
4217
4218/* nic init */
4219
4220/*
4221 * nic init service functions
4222 */
4223
34f80b04 4224static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4225{
34f80b04
EG
4226 int port = BP_PORT(bp);
4227
4228 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4229 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4230 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4231 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4232 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4233 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4234}
4235
5c862848
EG
4236static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4237 dma_addr_t mapping, int sb_id)
34f80b04
EG
4238{
4239 int port = BP_PORT(bp);
bb2a0f7a 4240 int func = BP_FUNC(bp);
a2fbb9ea 4241 int index;
34f80b04 4242 u64 section;
a2fbb9ea
ET
4243
4244 /* USTORM */
4245 section = ((u64)mapping) + offsetof(struct host_status_block,
4246 u_status_block);
34f80b04 4247 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4248
4249 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4250 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4251 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4252 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4253 U64_HI(section));
bb2a0f7a
YG
4254 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4255 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4256
4257 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4258 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4259 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4260
4261 /* CSTORM */
4262 section = ((u64)mapping) + offsetof(struct host_status_block,
4263 c_status_block);
34f80b04 4264 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4265
4266 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4267 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4268 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4269 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4270 U64_HI(section));
7a9b2557
VZ
4271 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4272 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4273
4274 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4275 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4276 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4277
4278 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4279}
4280
4281static void bnx2x_zero_def_sb(struct bnx2x *bp)
4282{
4283 int func = BP_FUNC(bp);
a2fbb9ea 4284
34f80b04
EG
4285 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4286 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4287 sizeof(struct ustorm_def_status_block)/4);
4288 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4289 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4290 sizeof(struct cstorm_def_status_block)/4);
4291 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4292 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4293 sizeof(struct xstorm_def_status_block)/4);
4294 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4295 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4296 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4297}
4298
4299static void bnx2x_init_def_sb(struct bnx2x *bp,
4300 struct host_def_status_block *def_sb,
34f80b04 4301 dma_addr_t mapping, int sb_id)
a2fbb9ea 4302{
34f80b04
EG
4303 int port = BP_PORT(bp);
4304 int func = BP_FUNC(bp);
a2fbb9ea
ET
4305 int index, val, reg_offset;
4306 u64 section;
4307
4308 /* ATTN */
4309 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4310 atten_status_block);
34f80b04 4311 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4312
49d66772
ET
4313 bp->attn_state = 0;
4314
a2fbb9ea
ET
4315 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4316 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4317
34f80b04 4318 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4319 bp->attn_group[index].sig[0] = REG_RD(bp,
4320 reg_offset + 0x10*index);
4321 bp->attn_group[index].sig[1] = REG_RD(bp,
4322 reg_offset + 0x4 + 0x10*index);
4323 bp->attn_group[index].sig[2] = REG_RD(bp,
4324 reg_offset + 0x8 + 0x10*index);
4325 bp->attn_group[index].sig[3] = REG_RD(bp,
4326 reg_offset + 0xc + 0x10*index);
4327 }
4328
a2fbb9ea
ET
4329 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4330 HC_REG_ATTN_MSG0_ADDR_L);
4331
4332 REG_WR(bp, reg_offset, U64_LO(section));
4333 REG_WR(bp, reg_offset + 4, U64_HI(section));
4334
4335 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4336
4337 val = REG_RD(bp, reg_offset);
34f80b04 4338 val |= sb_id;
a2fbb9ea
ET
4339 REG_WR(bp, reg_offset, val);
4340
4341 /* USTORM */
4342 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4343 u_def_status_block);
34f80b04 4344 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4345
4346 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4347 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4348 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4349 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4350 U64_HI(section));
5c862848 4351 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4352 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4353
4354 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4355 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4356 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4357
4358 /* CSTORM */
4359 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4360 c_def_status_block);
34f80b04 4361 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4362
4363 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4364 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4365 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4366 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4367 U64_HI(section));
5c862848 4368 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4369 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4370
4371 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4372 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4373 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4374
4375 /* TSTORM */
4376 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4377 t_def_status_block);
34f80b04 4378 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4379
4380 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4381 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4382 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4383 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4384 U64_HI(section));
5c862848 4385 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4386 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4387
4388 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4389 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4390 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4391
4392 /* XSTORM */
4393 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4394 x_def_status_block);
34f80b04 4395 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4396
4397 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4398 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4399 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4400 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4401 U64_HI(section));
5c862848 4402 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4403 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4404
4405 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4406 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4407 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4408
bb2a0f7a 4409 bp->stats_pending = 0;
66e855f3 4410 bp->set_mac_pending = 0;
bb2a0f7a 4411
34f80b04 4412 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4413}
4414
4415static void bnx2x_update_coalesce(struct bnx2x *bp)
4416{
34f80b04 4417 int port = BP_PORT(bp);
a2fbb9ea
ET
4418 int i;
4419
4420 for_each_queue(bp, i) {
34f80b04 4421 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4422
4423 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4424 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4425 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4426 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4427 bp->rx_ticks/12);
a2fbb9ea 4428 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4429 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4430 U_SB_ETH_RX_CQ_INDEX),
4431 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4432
4433 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4434 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4435 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4436 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4437 bp->tx_ticks/12);
a2fbb9ea 4438 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4439 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4440 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4441 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4442 }
4443}
4444
7a9b2557
VZ
4445static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4446 struct bnx2x_fastpath *fp, int last)
4447{
4448 int i;
4449
4450 for (i = 0; i < last; i++) {
4451 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4452 struct sk_buff *skb = rx_buf->skb;
4453
4454 if (skb == NULL) {
4455 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4456 continue;
4457 }
4458
4459 if (fp->tpa_state[i] == BNX2X_TPA_START)
4460 pci_unmap_single(bp->pdev,
4461 pci_unmap_addr(rx_buf, mapping),
356e2385 4462 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4463
4464 dev_kfree_skb(skb);
4465 rx_buf->skb = NULL;
4466 }
4467}
4468
a2fbb9ea
ET
4469static void bnx2x_init_rx_rings(struct bnx2x *bp)
4470{
7a9b2557 4471 int func = BP_FUNC(bp);
32626230
EG
4472 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4473 ETH_MAX_AGGREGATION_QUEUES_E1H;
4474 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4475 int i, j;
a2fbb9ea 4476
87942b46 4477 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4478 DP(NETIF_MSG_IFUP,
4479 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4480
7a9b2557 4481 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4482
555f6c78 4483 for_each_rx_queue(bp, j) {
32626230 4484 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4485
32626230 4486 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4487 fp->tpa_pool[i].skb =
4488 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4489 if (!fp->tpa_pool[i].skb) {
4490 BNX2X_ERR("Failed to allocate TPA "
4491 "skb pool for queue[%d] - "
4492 "disabling TPA on this "
4493 "queue!\n", j);
4494 bnx2x_free_tpa_pool(bp, fp, i);
4495 fp->disable_tpa = 1;
4496 break;
4497 }
4498 pci_unmap_addr_set((struct sw_rx_bd *)
4499 &bp->fp->tpa_pool[i],
4500 mapping, 0);
4501 fp->tpa_state[i] = BNX2X_TPA_STOP;
4502 }
4503 }
4504 }
4505
555f6c78 4506 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4507 struct bnx2x_fastpath *fp = &bp->fp[j];
4508
4509 fp->rx_bd_cons = 0;
4510 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4511 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4512
4513 /* "next page" elements initialization */
4514 /* SGE ring */
4515 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4516 struct eth_rx_sge *sge;
4517
4518 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4519 sge->addr_hi =
4520 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4521 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4522 sge->addr_lo =
4523 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4524 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4525 }
4526
4527 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4528
7a9b2557 4529 /* RX BD ring */
a2fbb9ea
ET
4530 for (i = 1; i <= NUM_RX_RINGS; i++) {
4531 struct eth_rx_bd *rx_bd;
4532
4533 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4534 rx_bd->addr_hi =
4535 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4536 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4537 rx_bd->addr_lo =
4538 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4539 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4540 }
4541
34f80b04 4542 /* CQ ring */
a2fbb9ea
ET
4543 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4544 struct eth_rx_cqe_next_page *nextpg;
4545
4546 nextpg = (struct eth_rx_cqe_next_page *)
4547 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4548 nextpg->addr_hi =
4549 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4550 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4551 nextpg->addr_lo =
4552 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4553 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4554 }
4555
7a9b2557
VZ
4556 /* Allocate SGEs and initialize the ring elements */
4557 for (i = 0, ring_prod = 0;
4558 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4559
7a9b2557
VZ
4560 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4561 BNX2X_ERR("was only able to allocate "
4562 "%d rx sges\n", i);
4563 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4564 /* Cleanup already allocated elements */
4565 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4566 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4567 fp->disable_tpa = 1;
4568 ring_prod = 0;
4569 break;
4570 }
4571 ring_prod = NEXT_SGE_IDX(ring_prod);
4572 }
4573 fp->rx_sge_prod = ring_prod;
4574
4575 /* Allocate BDs and initialize BD ring */
66e855f3 4576 fp->rx_comp_cons = 0;
7a9b2557 4577 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4578 for (i = 0; i < bp->rx_ring_size; i++) {
4579 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4580 BNX2X_ERR("was only able to allocate "
de832a55
EG
4581 "%d rx skbs on queue[%d]\n", i, j);
4582 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4583 break;
4584 }
4585 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4586 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4587 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4588 }
4589
7a9b2557
VZ
4590 fp->rx_bd_prod = ring_prod;
4591 /* must not have more available CQEs than BDs */
4592 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4593 cqe_ring_prod);
a2fbb9ea
ET
4594 fp->rx_pkt = fp->rx_calls = 0;
4595
7a9b2557
VZ
4596 /* Warning!
4597 * this will generate an interrupt (to the TSTORM)
4598 * must only be done after chip is initialized
4599 */
4600 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4601 fp->rx_sge_prod);
a2fbb9ea
ET
4602 if (j != 0)
4603 continue;
4604
4605 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4606 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4607 U64_LO(fp->rx_comp_mapping));
4608 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4609 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4610 U64_HI(fp->rx_comp_mapping));
4611 }
4612}
4613
4614static void bnx2x_init_tx_ring(struct bnx2x *bp)
4615{
4616 int i, j;
4617
555f6c78 4618 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4619 struct bnx2x_fastpath *fp = &bp->fp[j];
4620
4621 for (i = 1; i <= NUM_TX_RINGS; i++) {
4622 struct eth_tx_bd *tx_bd =
4623 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4624
4625 tx_bd->addr_hi =
4626 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4627 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4628 tx_bd->addr_lo =
4629 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4630 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4631 }
4632
4633 fp->tx_pkt_prod = 0;
4634 fp->tx_pkt_cons = 0;
4635 fp->tx_bd_prod = 0;
4636 fp->tx_bd_cons = 0;
4637 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4638 fp->tx_pkt = 0;
4639 }
4640}
4641
4642static void bnx2x_init_sp_ring(struct bnx2x *bp)
4643{
34f80b04 4644 int func = BP_FUNC(bp);
a2fbb9ea
ET
4645
4646 spin_lock_init(&bp->spq_lock);
4647
4648 bp->spq_left = MAX_SPQ_PENDING;
4649 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4650 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4651 bp->spq_prod_bd = bp->spq;
4652 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4653
34f80b04 4654 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4655 U64_LO(bp->spq_mapping));
34f80b04
EG
4656 REG_WR(bp,
4657 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4658 U64_HI(bp->spq_mapping));
4659
34f80b04 4660 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4661 bp->spq_prod_idx);
4662}
4663
4664static void bnx2x_init_context(struct bnx2x *bp)
4665{
4666 int i;
4667
4668 for_each_queue(bp, i) {
4669 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4670 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4671 u8 cl_id = fp->cl_id;
0626b899 4672 u8 sb_id = fp->sb_id;
a2fbb9ea 4673
34f80b04
EG
4674 context->ustorm_st_context.common.sb_index_numbers =
4675 BNX2X_RX_SB_INDEX_NUM;
0626b899 4676 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4677 context->ustorm_st_context.common.status_block_id = sb_id;
4678 context->ustorm_st_context.common.flags =
de832a55
EG
4679 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4680 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4681 context->ustorm_st_context.common.statistics_counter_id =
4682 cl_id;
8d9c5f34 4683 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4684 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4685 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4686 bp->rx_buf_size;
34f80b04 4687 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4688 U64_HI(fp->rx_desc_mapping);
34f80b04 4689 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4690 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4691 if (!fp->disable_tpa) {
4692 context->ustorm_st_context.common.flags |=
4693 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4694 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4695 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4696 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4697 (u32)0xffff);
7a9b2557
VZ
4698 context->ustorm_st_context.common.sge_page_base_hi =
4699 U64_HI(fp->rx_sge_mapping);
4700 context->ustorm_st_context.common.sge_page_base_lo =
4701 U64_LO(fp->rx_sge_mapping);
4702 }
4703
8d9c5f34
EG
4704 context->ustorm_ag_context.cdu_usage =
4705 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4706 CDU_REGION_NUMBER_UCM_AG,
4707 ETH_CONNECTION_TYPE);
4708
4709 context->xstorm_st_context.tx_bd_page_base_hi =
4710 U64_HI(fp->tx_desc_mapping);
4711 context->xstorm_st_context.tx_bd_page_base_lo =
4712 U64_LO(fp->tx_desc_mapping);
4713 context->xstorm_st_context.db_data_addr_hi =
4714 U64_HI(fp->tx_prods_mapping);
4715 context->xstorm_st_context.db_data_addr_lo =
4716 U64_LO(fp->tx_prods_mapping);
0626b899 4717 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4718 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4719 context->cstorm_st_context.sb_index_number =
5c862848 4720 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4721 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4722
4723 context->xstorm_ag_context.cdu_reserved =
4724 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4725 CDU_REGION_NUMBER_XCM_AG,
4726 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4727 }
4728}
4729
4730static void bnx2x_init_ind_table(struct bnx2x *bp)
4731{
26c8fa4d 4732 int func = BP_FUNC(bp);
a2fbb9ea
ET
4733 int i;
4734
555f6c78 4735 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4736 return;
4737
555f6c78
EG
4738 DP(NETIF_MSG_IFUP,
4739 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4740 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4741 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4742 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4743 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4744}
4745
49d66772
ET
4746static void bnx2x_set_client_config(struct bnx2x *bp)
4747{
49d66772 4748 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4749 int port = BP_PORT(bp);
4750 int i;
49d66772 4751
e7799c5f 4752 tstorm_client.mtu = bp->dev->mtu;
49d66772 4753 tstorm_client.config_flags =
de832a55
EG
4754 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4755 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4756#ifdef BCM_VLAN
0c6671b0 4757 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4758 tstorm_client.config_flags |=
8d9c5f34 4759 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4760 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4761 }
4762#endif
49d66772 4763
7a9b2557
VZ
4764 if (bp->flags & TPA_ENABLE_FLAG) {
4765 tstorm_client.max_sges_for_packet =
4f40f2cb 4766 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4767 tstorm_client.max_sges_for_packet =
4768 ((tstorm_client.max_sges_for_packet +
4769 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4770 PAGES_PER_SGE_SHIFT;
4771
4772 tstorm_client.config_flags |=
4773 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4774 }
4775
49d66772 4776 for_each_queue(bp, i) {
de832a55
EG
4777 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4778
49d66772 4779 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4780 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4781 ((u32 *)&tstorm_client)[0]);
4782 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4783 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4784 ((u32 *)&tstorm_client)[1]);
4785 }
4786
34f80b04
EG
4787 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4788 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4789}
4790
a2fbb9ea
ET
4791static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4792{
a2fbb9ea 4793 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4794 int mode = bp->rx_mode;
4795 int mask = (1 << BP_L_ID(bp));
4796 int func = BP_FUNC(bp);
a2fbb9ea
ET
4797 int i;
4798
3196a88a 4799 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4800
4801 switch (mode) {
4802 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4803 tstorm_mac_filter.ucast_drop_all = mask;
4804 tstorm_mac_filter.mcast_drop_all = mask;
4805 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4806 break;
356e2385 4807
a2fbb9ea 4808 case BNX2X_RX_MODE_NORMAL:
34f80b04 4809 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4810 break;
356e2385 4811
a2fbb9ea 4812 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4813 tstorm_mac_filter.mcast_accept_all = mask;
4814 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4815 break;
356e2385 4816
a2fbb9ea 4817 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4818 tstorm_mac_filter.ucast_accept_all = mask;
4819 tstorm_mac_filter.mcast_accept_all = mask;
4820 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4821 break;
356e2385 4822
a2fbb9ea 4823 default:
34f80b04
EG
4824 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4825 break;
a2fbb9ea
ET
4826 }
4827
4828 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4829 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4830 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4831 ((u32 *)&tstorm_mac_filter)[i]);
4832
34f80b04 4833/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4834 ((u32 *)&tstorm_mac_filter)[i]); */
4835 }
a2fbb9ea 4836
49d66772
ET
4837 if (mode != BNX2X_RX_MODE_NONE)
4838 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4839}
4840
471de716
EG
4841static void bnx2x_init_internal_common(struct bnx2x *bp)
4842{
4843 int i;
4844
3cdf1db7
YG
4845 if (bp->flags & TPA_ENABLE_FLAG) {
4846 struct tstorm_eth_tpa_exist tpa = {0};
4847
4848 tpa.tpa_exist = 1;
4849
4850 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4851 ((u32 *)&tpa)[0]);
4852 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4853 ((u32 *)&tpa)[1]);
4854 }
4855
471de716
EG
4856 /* Zero this manually as its initialization is
4857 currently missing in the initTool */
4858 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4859 REG_WR(bp, BAR_USTRORM_INTMEM +
4860 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4861}
4862
4863static void bnx2x_init_internal_port(struct bnx2x *bp)
4864{
4865 int port = BP_PORT(bp);
4866
4867 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4869 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4870 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4871}
4872
8a1c38d1
EG
4873/* Calculates the sum of vn_min_rates.
4874 It's needed for further normalizing of the min_rates.
4875 Returns:
4876 sum of vn_min_rates.
4877 or
4878 0 - if all the min_rates are 0.
4879 In the later case fainess algorithm should be deactivated.
4880 If not all min_rates are zero then those that are zeroes will be set to 1.
4881 */
4882static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4883{
4884 int all_zero = 1;
4885 int port = BP_PORT(bp);
4886 int vn;
4887
4888 bp->vn_weight_sum = 0;
4889 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4890 int func = 2*vn + port;
4891 u32 vn_cfg =
4892 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4893 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4894 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4895
4896 /* Skip hidden vns */
4897 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4898 continue;
4899
4900 /* If min rate is zero - set it to 1 */
4901 if (!vn_min_rate)
4902 vn_min_rate = DEF_MIN_RATE;
4903 else
4904 all_zero = 0;
4905
4906 bp->vn_weight_sum += vn_min_rate;
4907 }
4908
4909 /* ... only if all min rates are zeros - disable fairness */
4910 if (all_zero)
4911 bp->vn_weight_sum = 0;
4912}
4913
471de716 4914static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4915{
a2fbb9ea
ET
4916 struct tstorm_eth_function_common_config tstorm_config = {0};
4917 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4918 int port = BP_PORT(bp);
4919 int func = BP_FUNC(bp);
de832a55
EG
4920 int i, j;
4921 u32 offset;
471de716 4922 u16 max_agg_size;
a2fbb9ea
ET
4923
4924 if (is_multi(bp)) {
555f6c78 4925 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4926 tstorm_config.rss_result_mask = MULTI_MASK;
4927 }
8d9c5f34
EG
4928 if (IS_E1HMF(bp))
4929 tstorm_config.config_flags |=
4930 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4931
34f80b04
EG
4932 tstorm_config.leading_client_id = BP_L_ID(bp);
4933
a2fbb9ea 4934 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4935 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4936 (*(u32 *)&tstorm_config));
4937
c14423fe 4938 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4939 bnx2x_set_storm_rx_mode(bp);
4940
de832a55
EG
4941 for_each_queue(bp, i) {
4942 u8 cl_id = bp->fp[i].cl_id;
4943
4944 /* reset xstorm per client statistics */
4945 offset = BAR_XSTRORM_INTMEM +
4946 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4947 for (j = 0;
4948 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4949 REG_WR(bp, offset + j*4, 0);
4950
4951 /* reset tstorm per client statistics */
4952 offset = BAR_TSTRORM_INTMEM +
4953 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4954 for (j = 0;
4955 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4956 REG_WR(bp, offset + j*4, 0);
4957
4958 /* reset ustorm per client statistics */
4959 offset = BAR_USTRORM_INTMEM +
4960 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4961 for (j = 0;
4962 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4963 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4964 }
4965
4966 /* Init statistics related context */
34f80b04 4967 stats_flags.collect_eth = 1;
a2fbb9ea 4968
66e855f3 4969 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4970 ((u32 *)&stats_flags)[0]);
66e855f3 4971 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4972 ((u32 *)&stats_flags)[1]);
4973
66e855f3 4974 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4975 ((u32 *)&stats_flags)[0]);
66e855f3 4976 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4977 ((u32 *)&stats_flags)[1]);
4978
de832a55
EG
4979 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4980 ((u32 *)&stats_flags)[0]);
4981 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4982 ((u32 *)&stats_flags)[1]);
4983
66e855f3 4984 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4985 ((u32 *)&stats_flags)[0]);
66e855f3 4986 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4987 ((u32 *)&stats_flags)[1]);
4988
66e855f3
YG
4989 REG_WR(bp, BAR_XSTRORM_INTMEM +
4990 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4991 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4992 REG_WR(bp, BAR_XSTRORM_INTMEM +
4993 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4994 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4995
4996 REG_WR(bp, BAR_TSTRORM_INTMEM +
4997 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4998 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4999 REG_WR(bp, BAR_TSTRORM_INTMEM +
5000 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5001 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5002
de832a55
EG
5003 REG_WR(bp, BAR_USTRORM_INTMEM +
5004 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5005 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5006 REG_WR(bp, BAR_USTRORM_INTMEM +
5007 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5008 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5009
34f80b04
EG
5010 if (CHIP_IS_E1H(bp)) {
5011 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5012 IS_E1HMF(bp));
5013 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5014 IS_E1HMF(bp));
5015 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5016 IS_E1HMF(bp));
5017 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5018 IS_E1HMF(bp));
5019
7a9b2557
VZ
5020 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5021 bp->e1hov);
34f80b04
EG
5022 }
5023
4f40f2cb
EG
5024 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5025 max_agg_size =
5026 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5027 SGE_PAGE_SIZE * PAGES_PER_SGE),
5028 (u32)0xffff);
555f6c78 5029 for_each_rx_queue(bp, i) {
7a9b2557 5030 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5031
5032 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5033 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5034 U64_LO(fp->rx_comp_mapping));
5035 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5036 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5037 U64_HI(fp->rx_comp_mapping));
5038
7a9b2557 5039 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5040 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5041 max_agg_size);
5042 }
8a1c38d1 5043
1c06328c
EG
5044 /* dropless flow control */
5045 if (CHIP_IS_E1H(bp)) {
5046 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5047
5048 rx_pause.bd_thr_low = 250;
5049 rx_pause.cqe_thr_low = 250;
5050 rx_pause.cos = 1;
5051 rx_pause.sge_thr_low = 0;
5052 rx_pause.bd_thr_high = 350;
5053 rx_pause.cqe_thr_high = 350;
5054 rx_pause.sge_thr_high = 0;
5055
5056 for_each_rx_queue(bp, i) {
5057 struct bnx2x_fastpath *fp = &bp->fp[i];
5058
5059 if (!fp->disable_tpa) {
5060 rx_pause.sge_thr_low = 150;
5061 rx_pause.sge_thr_high = 250;
5062 }
5063
5064
5065 offset = BAR_USTRORM_INTMEM +
5066 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5067 fp->cl_id);
5068 for (j = 0;
5069 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5070 j++)
5071 REG_WR(bp, offset + j*4,
5072 ((u32 *)&rx_pause)[j]);
5073 }
5074 }
5075
8a1c38d1
EG
5076 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5077
5078 /* Init rate shaping and fairness contexts */
5079 if (IS_E1HMF(bp)) {
5080 int vn;
5081
5082 /* During init there is no active link
5083 Until link is up, set link rate to 10Gbps */
5084 bp->link_vars.line_speed = SPEED_10000;
5085 bnx2x_init_port_minmax(bp);
5086
5087 bnx2x_calc_vn_weight_sum(bp);
5088
5089 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5090 bnx2x_init_vn_minmax(bp, 2*vn + port);
5091
5092 /* Enable rate shaping and fairness */
5093 bp->cmng.flags.cmng_enables =
5094 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5095 if (bp->vn_weight_sum)
5096 bp->cmng.flags.cmng_enables |=
5097 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5098 else
5099 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5100 " fairness will be disabled\n");
5101 } else {
5102 /* rate shaping and fairness are disabled */
5103 DP(NETIF_MSG_IFUP,
5104 "single function mode minmax will be disabled\n");
5105 }
5106
5107
5108 /* Store it to internal memory */
5109 if (bp->port.pmf)
5110 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5111 REG_WR(bp, BAR_XSTRORM_INTMEM +
5112 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5113 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5114}
5115
471de716
EG
5116static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5117{
5118 switch (load_code) {
5119 case FW_MSG_CODE_DRV_LOAD_COMMON:
5120 bnx2x_init_internal_common(bp);
5121 /* no break */
5122
5123 case FW_MSG_CODE_DRV_LOAD_PORT:
5124 bnx2x_init_internal_port(bp);
5125 /* no break */
5126
5127 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5128 bnx2x_init_internal_func(bp);
5129 break;
5130
5131 default:
5132 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5133 break;
5134 }
5135}
5136
5137static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5138{
5139 int i;
5140
5141 for_each_queue(bp, i) {
5142 struct bnx2x_fastpath *fp = &bp->fp[i];
5143
34f80b04 5144 fp->bp = bp;
a2fbb9ea 5145 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5146 fp->index = i;
34f80b04
EG
5147 fp->cl_id = BP_L_ID(bp) + i;
5148 fp->sb_id = fp->cl_id;
5149 DP(NETIF_MSG_IFUP,
f5372251
EG
5150 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5151 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5152 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5153 fp->sb_id);
5c862848 5154 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5155 }
5156
5c862848
EG
5157 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5158 DEF_SB_ID);
5159 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5160 bnx2x_update_coalesce(bp);
5161 bnx2x_init_rx_rings(bp);
5162 bnx2x_init_tx_ring(bp);
5163 bnx2x_init_sp_ring(bp);
5164 bnx2x_init_context(bp);
471de716 5165 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5166 bnx2x_init_ind_table(bp);
0ef00459
EG
5167 bnx2x_stats_init(bp);
5168
5169 /* At this point, we are ready for interrupts */
5170 atomic_set(&bp->intr_sem, 0);
5171
5172 /* flush all before enabling interrupts */
5173 mb();
5174 mmiowb();
5175
615f8fd9 5176 bnx2x_int_enable(bp);
a2fbb9ea
ET
5177}
5178
5179/* end of nic init */
5180
5181/*
5182 * gzip service functions
5183 */
5184
5185static int bnx2x_gunzip_init(struct bnx2x *bp)
5186{
5187 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5188 &bp->gunzip_mapping);
5189 if (bp->gunzip_buf == NULL)
5190 goto gunzip_nomem1;
5191
5192 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5193 if (bp->strm == NULL)
5194 goto gunzip_nomem2;
5195
5196 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5197 GFP_KERNEL);
5198 if (bp->strm->workspace == NULL)
5199 goto gunzip_nomem3;
5200
5201 return 0;
5202
5203gunzip_nomem3:
5204 kfree(bp->strm);
5205 bp->strm = NULL;
5206
5207gunzip_nomem2:
5208 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5209 bp->gunzip_mapping);
5210 bp->gunzip_buf = NULL;
5211
5212gunzip_nomem1:
5213 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5214 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5215 return -ENOMEM;
5216}
5217
5218static void bnx2x_gunzip_end(struct bnx2x *bp)
5219{
5220 kfree(bp->strm->workspace);
5221
5222 kfree(bp->strm);
5223 bp->strm = NULL;
5224
5225 if (bp->gunzip_buf) {
5226 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5227 bp->gunzip_mapping);
5228 bp->gunzip_buf = NULL;
5229 }
5230}
5231
5232static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5233{
5234 int n, rc;
5235
5236 /* check gzip header */
5237 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5238 return -EINVAL;
5239
5240 n = 10;
5241
34f80b04 5242#define FNAME 0x8
a2fbb9ea
ET
5243
5244 if (zbuf[3] & FNAME)
5245 while ((zbuf[n++] != 0) && (n < len));
5246
5247 bp->strm->next_in = zbuf + n;
5248 bp->strm->avail_in = len - n;
5249 bp->strm->next_out = bp->gunzip_buf;
5250 bp->strm->avail_out = FW_BUF_SIZE;
5251
5252 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5253 if (rc != Z_OK)
5254 return rc;
5255
5256 rc = zlib_inflate(bp->strm, Z_FINISH);
5257 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5258 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5259 bp->dev->name, bp->strm->msg);
5260
5261 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5262 if (bp->gunzip_outlen & 0x3)
5263 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5264 " gunzip_outlen (%d) not aligned\n",
5265 bp->dev->name, bp->gunzip_outlen);
5266 bp->gunzip_outlen >>= 2;
5267
5268 zlib_inflateEnd(bp->strm);
5269
5270 if (rc == Z_STREAM_END)
5271 return 0;
5272
5273 return rc;
5274}
5275
5276/* nic load/unload */
5277
5278/*
34f80b04 5279 * General service functions
a2fbb9ea
ET
5280 */
5281
5282/* send a NIG loopback debug packet */
5283static void bnx2x_lb_pckt(struct bnx2x *bp)
5284{
a2fbb9ea 5285 u32 wb_write[3];
a2fbb9ea
ET
5286
5287 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5288 wb_write[0] = 0x55555555;
5289 wb_write[1] = 0x55555555;
34f80b04 5290 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5291 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5292
5293 /* NON-IP protocol */
a2fbb9ea
ET
5294 wb_write[0] = 0x09000000;
5295 wb_write[1] = 0x55555555;
34f80b04 5296 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5297 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5298}
5299
5300/* some of the internal memories
5301 * are not directly readable from the driver
5302 * to test them we send debug packets
5303 */
5304static int bnx2x_int_mem_test(struct bnx2x *bp)
5305{
5306 int factor;
5307 int count, i;
5308 u32 val = 0;
5309
ad8d3948 5310 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5311 factor = 120;
ad8d3948
EG
5312 else if (CHIP_REV_IS_EMUL(bp))
5313 factor = 200;
5314 else
a2fbb9ea 5315 factor = 1;
a2fbb9ea
ET
5316
5317 DP(NETIF_MSG_HW, "start part1\n");
5318
5319 /* Disable inputs of parser neighbor blocks */
5320 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5321 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5322 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5323 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5324
5325 /* Write 0 to parser credits for CFC search request */
5326 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5327
5328 /* send Ethernet packet */
5329 bnx2x_lb_pckt(bp);
5330
5331 /* TODO do i reset NIG statistic? */
5332 /* Wait until NIG register shows 1 packet of size 0x10 */
5333 count = 1000 * factor;
5334 while (count) {
34f80b04 5335
a2fbb9ea
ET
5336 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5337 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5338 if (val == 0x10)
5339 break;
5340
5341 msleep(10);
5342 count--;
5343 }
5344 if (val != 0x10) {
5345 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5346 return -1;
5347 }
5348
5349 /* Wait until PRS register shows 1 packet */
5350 count = 1000 * factor;
5351 while (count) {
5352 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5353 if (val == 1)
5354 break;
5355
5356 msleep(10);
5357 count--;
5358 }
5359 if (val != 0x1) {
5360 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5361 return -2;
5362 }
5363
5364 /* Reset and init BRB, PRS */
34f80b04 5365 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5366 msleep(50);
34f80b04 5367 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5368 msleep(50);
5369 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5370 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5371
5372 DP(NETIF_MSG_HW, "part2\n");
5373
5374 /* Disable inputs of parser neighbor blocks */
5375 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5376 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5377 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5378 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5379
5380 /* Write 0 to parser credits for CFC search request */
5381 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5382
5383 /* send 10 Ethernet packets */
5384 for (i = 0; i < 10; i++)
5385 bnx2x_lb_pckt(bp);
5386
5387 /* Wait until NIG register shows 10 + 1
5388 packets of size 11*0x10 = 0xb0 */
5389 count = 1000 * factor;
5390 while (count) {
34f80b04 5391
a2fbb9ea
ET
5392 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5393 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5394 if (val == 0xb0)
5395 break;
5396
5397 msleep(10);
5398 count--;
5399 }
5400 if (val != 0xb0) {
5401 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5402 return -3;
5403 }
5404
5405 /* Wait until PRS register shows 2 packets */
5406 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5407 if (val != 2)
5408 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5409
5410 /* Write 1 to parser credits for CFC search request */
5411 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5412
5413 /* Wait until PRS register shows 3 packets */
5414 msleep(10 * factor);
5415 /* Wait until NIG register shows 1 packet of size 0x10 */
5416 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5417 if (val != 3)
5418 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5419
5420 /* clear NIG EOP FIFO */
5421 for (i = 0; i < 11; i++)
5422 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5423 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5424 if (val != 1) {
5425 BNX2X_ERR("clear of NIG failed\n");
5426 return -4;
5427 }
5428
5429 /* Reset and init BRB, PRS, NIG */
5430 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5431 msleep(50);
5432 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5433 msleep(50);
5434 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5435 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5436#ifndef BCM_ISCSI
5437 /* set NIC mode */
5438 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5439#endif
5440
5441 /* Enable inputs of parser neighbor blocks */
5442 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5443 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5444 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5445 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5446
5447 DP(NETIF_MSG_HW, "done\n");
5448
5449 return 0; /* OK */
5450}
5451
5452static void enable_blocks_attention(struct bnx2x *bp)
5453{
5454 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5455 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5456 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5457 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5458 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5459 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5460 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5461 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5462 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5463/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5464/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5465 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5466 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5467 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5468/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5469/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5470 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5471 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5472 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5473 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5474/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5475/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5476 if (CHIP_REV_IS_FPGA(bp))
5477 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5478 else
5479 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5480 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5481 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5482 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5483/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5484/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5485 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5486 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5487/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5488 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5489}
5490
34f80b04 5491
81f75bbf
EG
5492static void bnx2x_reset_common(struct bnx2x *bp)
5493{
5494 /* reset_common */
5495 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5496 0xd3ffff7f);
5497 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5498}
5499
34f80b04 5500static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5501{
a2fbb9ea 5502 u32 val, i;
a2fbb9ea 5503
34f80b04 5504 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5505
81f75bbf 5506 bnx2x_reset_common(bp);
34f80b04
EG
5507 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5508 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5509
34f80b04
EG
5510 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5511 if (CHIP_IS_E1H(bp))
5512 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5513
34f80b04
EG
5514 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5515 msleep(30);
5516 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5517
34f80b04
EG
5518 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5519 if (CHIP_IS_E1(bp)) {
5520 /* enable HW interrupt from PXP on USDM overflow
5521 bit 16 on INT_MASK_0 */
5522 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5523 }
a2fbb9ea 5524
34f80b04
EG
5525 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5526 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5527
5528#ifdef __BIG_ENDIAN
34f80b04
EG
5529 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5530 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5531 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5532 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5533 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5534 /* make sure this value is 0 */
5535 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5536
5537/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5538 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5539 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5540 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5541 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5542#endif
5543
34f80b04 5544 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5545#ifdef BCM_ISCSI
34f80b04
EG
5546 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5547 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5548 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5549#endif
5550
34f80b04
EG
5551 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5552 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5553
34f80b04
EG
5554 /* let the HW do it's magic ... */
5555 msleep(100);
5556 /* finish PXP init */
5557 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5558 if (val != 1) {
5559 BNX2X_ERR("PXP2 CFG failed\n");
5560 return -EBUSY;
5561 }
5562 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5563 if (val != 1) {
5564 BNX2X_ERR("PXP2 RD_INIT failed\n");
5565 return -EBUSY;
5566 }
a2fbb9ea 5567
34f80b04
EG
5568 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5569 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5570
34f80b04 5571 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5572
34f80b04
EG
5573 /* clean the DMAE memory */
5574 bp->dmae_ready = 1;
5575 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5576
34f80b04
EG
5577 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5578 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5579 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5580 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5581
34f80b04
EG
5582 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5583 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5584 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5585 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5586
5587 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5588 /* soft reset pulse */
5589 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5590 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5591
5592#ifdef BCM_ISCSI
34f80b04 5593 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5594#endif
a2fbb9ea 5595
34f80b04
EG
5596 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5597 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5598 if (!CHIP_REV_IS_SLOW(bp)) {
5599 /* enable hw interrupt from doorbell Q */
5600 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5601 }
a2fbb9ea 5602
34f80b04 5603 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5604 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5605 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5606 /* set NIC mode */
5607 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5608 if (CHIP_IS_E1H(bp))
5609 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5610
34f80b04
EG
5611 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5612 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5613 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5614 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5615
34f80b04
EG
5616 if (CHIP_IS_E1H(bp)) {
5617 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5618 STORM_INTMEM_SIZE_E1H/2);
5619 bnx2x_init_fill(bp,
5620 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5621 0, STORM_INTMEM_SIZE_E1H/2);
5622 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5623 STORM_INTMEM_SIZE_E1H/2);
5624 bnx2x_init_fill(bp,
5625 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5626 0, STORM_INTMEM_SIZE_E1H/2);
5627 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5628 STORM_INTMEM_SIZE_E1H/2);
5629 bnx2x_init_fill(bp,
5630 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5631 0, STORM_INTMEM_SIZE_E1H/2);
5632 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5633 STORM_INTMEM_SIZE_E1H/2);
5634 bnx2x_init_fill(bp,
5635 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5636 0, STORM_INTMEM_SIZE_E1H/2);
5637 } else { /* E1 */
ad8d3948
EG
5638 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5639 STORM_INTMEM_SIZE_E1);
5640 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5641 STORM_INTMEM_SIZE_E1);
5642 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5643 STORM_INTMEM_SIZE_E1);
5644 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5645 STORM_INTMEM_SIZE_E1);
34f80b04 5646 }
a2fbb9ea 5647
34f80b04
EG
5648 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5649 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5650 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5651 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5652
34f80b04
EG
5653 /* sync semi rtc */
5654 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5655 0x80000000);
5656 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5657 0x80000000);
a2fbb9ea 5658
34f80b04
EG
5659 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5660 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5661 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5662
34f80b04
EG
5663 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5664 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5665 REG_WR(bp, i, 0xc0cac01a);
5666 /* TODO: replace with something meaningful */
5667 }
8d9c5f34 5668 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5669 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5670
34f80b04
EG
5671 if (sizeof(union cdu_context) != 1024)
5672 /* we currently assume that a context is 1024 bytes */
5673 printk(KERN_ALERT PFX "please adjust the size of"
5674 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5675
34f80b04
EG
5676 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5677 val = (4 << 24) + (0 << 12) + 1024;
5678 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5679 if (CHIP_IS_E1(bp)) {
5680 /* !!! fix pxp client crdit until excel update */
5681 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5682 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5683 }
a2fbb9ea 5684
34f80b04
EG
5685 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5686 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5687 /* enable context validation interrupt from CFC */
5688 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5689
5690 /* set the thresholds to prevent CFC/CDU race */
5691 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5692
34f80b04
EG
5693 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5694 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5695
34f80b04
EG
5696 /* PXPCS COMMON comes here */
5697 /* Reset PCIE errors for debug */
5698 REG_WR(bp, 0x2814, 0xffffffff);
5699 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5700
34f80b04
EG
5701 /* EMAC0 COMMON comes here */
5702 /* EMAC1 COMMON comes here */
5703 /* DBU COMMON comes here */
5704 /* DBG COMMON comes here */
5705
5706 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5707 if (CHIP_IS_E1H(bp)) {
5708 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5709 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5710 }
5711
5712 if (CHIP_REV_IS_SLOW(bp))
5713 msleep(200);
5714
5715 /* finish CFC init */
5716 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5717 if (val != 1) {
5718 BNX2X_ERR("CFC LL_INIT failed\n");
5719 return -EBUSY;
5720 }
5721 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5722 if (val != 1) {
5723 BNX2X_ERR("CFC AC_INIT failed\n");
5724 return -EBUSY;
5725 }
5726 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5727 if (val != 1) {
5728 BNX2X_ERR("CFC CAM_INIT failed\n");
5729 return -EBUSY;
5730 }
5731 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5732
34f80b04
EG
5733 /* read NIG statistic
5734 to see if this is our first up since powerup */
5735 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5736 val = *bnx2x_sp(bp, wb_data[0]);
5737
5738 /* do internal memory self test */
5739 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5740 BNX2X_ERR("internal mem self test failed\n");
5741 return -EBUSY;
5742 }
5743
35b19ba5 5744 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5745 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5746 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5747 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5748 bp->port.need_hw_lock = 1;
5749 break;
5750
35b19ba5 5751 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5752 /* Fan failure is indicated by SPIO 5 */
5753 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5754 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5755
5756 /* set to active low mode */
5757 val = REG_RD(bp, MISC_REG_SPIO_INT);
5758 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5759 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5760 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5761
34f80b04
EG
5762 /* enable interrupt to signal the IGU */
5763 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5764 val |= (1 << MISC_REGISTERS_SPIO_5);
5765 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5766 break;
f1410647 5767
34f80b04
EG
5768 default:
5769 break;
5770 }
f1410647 5771
34f80b04
EG
5772 /* clear PXP2 attentions */
5773 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5774
34f80b04 5775 enable_blocks_attention(bp);
a2fbb9ea 5776
6bbca910
YR
5777 if (!BP_NOMCP(bp)) {
5778 bnx2x_acquire_phy_lock(bp);
5779 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5780 bnx2x_release_phy_lock(bp);
5781 } else
5782 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5783
34f80b04
EG
5784 return 0;
5785}
a2fbb9ea 5786
34f80b04
EG
5787static int bnx2x_init_port(struct bnx2x *bp)
5788{
5789 int port = BP_PORT(bp);
1c06328c 5790 u32 low, high;
34f80b04 5791 u32 val;
a2fbb9ea 5792
34f80b04
EG
5793 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5794
5795 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5796
5797 /* Port PXP comes here */
5798 /* Port PXP2 comes here */
a2fbb9ea
ET
5799#ifdef BCM_ISCSI
5800 /* Port0 1
5801 * Port1 385 */
5802 i++;
5803 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5804 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5805 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5806 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5807
5808 /* Port0 2
5809 * Port1 386 */
5810 i++;
5811 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5812 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5813 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5814 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5815
5816 /* Port0 3
5817 * Port1 387 */
5818 i++;
5819 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5820 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5821 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5822 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5823#endif
34f80b04 5824 /* Port CMs come here */
8d9c5f34
EG
5825 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5826 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5827
5828 /* Port QM comes here */
a2fbb9ea
ET
5829#ifdef BCM_ISCSI
5830 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5831 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5832
5833 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5834 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5835#endif
5836 /* Port DQ comes here */
1c06328c
EG
5837
5838 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5839 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5840 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5841 /* no pause for emulation and FPGA */
5842 low = 0;
5843 high = 513;
5844 } else {
5845 if (IS_E1HMF(bp))
5846 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5847 else if (bp->dev->mtu > 4096) {
5848 if (bp->flags & ONE_PORT_FLAG)
5849 low = 160;
5850 else {
5851 val = bp->dev->mtu;
5852 /* (24*1024 + val*4)/256 */
5853 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5854 }
5855 } else
5856 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5857 high = low + 56; /* 14*1024/256 */
5858 }
5859 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5860 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5861
5862
ad8d3948 5863 /* Port PRS comes here */
a2fbb9ea
ET
5864 /* Port TSDM comes here */
5865 /* Port CSDM comes here */
5866 /* Port USDM comes here */
5867 /* Port XSDM comes here */
356e2385 5868
34f80b04
EG
5869 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5870 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5871 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5872 port ? USEM_PORT1_END : USEM_PORT0_END);
5873 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5874 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5875 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5876 port ? XSEM_PORT1_END : XSEM_PORT0_END);
356e2385 5877
a2fbb9ea 5878 /* Port UPB comes here */
34f80b04
EG
5879 /* Port XPB comes here */
5880
5881 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5882 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5883
5884 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5885 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5886
5887 /* update threshold */
34f80b04 5888 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5889 /* update init credit */
34f80b04 5890 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5891
5892 /* probe changes */
34f80b04 5893 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5894 msleep(5);
34f80b04 5895 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5896
5897#ifdef BCM_ISCSI
5898 /* tell the searcher where the T2 table is */
5899 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5900
5901 wb_write[0] = U64_LO(bp->t2_mapping);
5902 wb_write[1] = U64_HI(bp->t2_mapping);
5903 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5904 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5905 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5906 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5907
5908 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5909 /* Port SRCH comes here */
5910#endif
5911 /* Port CDU comes here */
5912 /* Port CFC comes here */
34f80b04
EG
5913
5914 if (CHIP_IS_E1(bp)) {
5915 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5916 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5917 }
5918 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5919 port ? HC_PORT1_END : HC_PORT0_END);
5920
5921 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5922 MISC_AEU_PORT0_START,
34f80b04
EG
5923 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5924 /* init aeu_mask_attn_func_0/1:
5925 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5926 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5927 * bits 4-7 are used for "per vn group attention" */
5928 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5929 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5930
a2fbb9ea
ET
5931 /* Port PXPCS comes here */
5932 /* Port EMAC0 comes here */
5933 /* Port EMAC1 comes here */
5934 /* Port DBU comes here */
5935 /* Port DBG comes here */
356e2385 5936
34f80b04
EG
5937 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5938 port ? NIG_PORT1_END : NIG_PORT0_END);
5939
5940 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5941
5942 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5943 /* 0x2 disable e1hov, 0x1 enable */
5944 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5945 (IS_E1HMF(bp) ? 0x1 : 0x2));
5946
1c06328c
EG
5947 /* support pause requests from USDM, TSDM and BRB */
5948 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5949
5950 {
5951 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5952 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5953 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5954 }
34f80b04
EG
5955 }
5956
a2fbb9ea
ET
5957 /* Port MCP comes here */
5958 /* Port DMAE comes here */
5959
35b19ba5 5960 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5961 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5962 {
5963 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5964
5965 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5966 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5967
5968 /* The GPIO should be swapped if the swap register is
5969 set and active */
5970 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5971 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5972
5973 /* Select function upon port-swap configuration */
5974 if (port == 0) {
5975 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5976 aeu_gpio_mask = (swap_val && swap_override) ?
5977 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5978 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5979 } else {
5980 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5981 aeu_gpio_mask = (swap_val && swap_override) ?
5982 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5983 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5984 }
5985 val = REG_RD(bp, offset);
5986 /* add GPIO3 to group */
5987 val |= aeu_gpio_mask;
5988 REG_WR(bp, offset, val);
5989 }
5990 break;
5991
35b19ba5 5992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5993 /* add SPIO 5 to group 0 */
5994 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5995 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5996 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5997 break;
5998
5999 default:
6000 break;
6001 }
6002
c18487ee 6003 bnx2x__link_reset(bp);
a2fbb9ea 6004
34f80b04
EG
6005 return 0;
6006}
6007
6008#define ILT_PER_FUNC (768/2)
6009#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6010/* the phys address is shifted right 12 bits and has an added
6011 1=valid bit added to the 53rd bit
6012 then since this is a wide register(TM)
6013 we split it into two 32 bit writes
6014 */
6015#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6016#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6017#define PXP_ONE_ILT(x) (((x) << 10) | x)
6018#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6019
6020#define CNIC_ILT_LINES 0
6021
6022static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6023{
6024 int reg;
6025
6026 if (CHIP_IS_E1H(bp))
6027 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6028 else /* E1 */
6029 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6030
6031 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6032}
6033
6034static int bnx2x_init_func(struct bnx2x *bp)
6035{
6036 int port = BP_PORT(bp);
6037 int func = BP_FUNC(bp);
8badd27a 6038 u32 addr, val;
34f80b04
EG
6039 int i;
6040
6041 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6042
8badd27a
EG
6043 /* set MSI reconfigure capability */
6044 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6045 val = REG_RD(bp, addr);
6046 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6047 REG_WR(bp, addr, val);
6048
34f80b04
EG
6049 i = FUNC_ILT_BASE(func);
6050
6051 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6052 if (CHIP_IS_E1H(bp)) {
6053 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6054 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6055 } else /* E1 */
6056 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6057 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6058
6059
6060 if (CHIP_IS_E1H(bp)) {
6061 for (i = 0; i < 9; i++)
6062 bnx2x_init_block(bp,
6063 cm_start[func][i], cm_end[func][i]);
6064
6065 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6066 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6067 }
6068
6069 /* HC init per function */
6070 if (CHIP_IS_E1H(bp)) {
6071 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6072
6073 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6074 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6075 }
6076 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6077
c14423fe 6078 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6079 REG_WR(bp, 0x2114, 0xffffffff);
6080 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6081
34f80b04
EG
6082 return 0;
6083}
6084
6085static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6086{
6087 int i, rc = 0;
a2fbb9ea 6088
34f80b04
EG
6089 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6090 BP_FUNC(bp), load_code);
a2fbb9ea 6091
34f80b04
EG
6092 bp->dmae_ready = 0;
6093 mutex_init(&bp->dmae_mutex);
6094 bnx2x_gunzip_init(bp);
a2fbb9ea 6095
34f80b04
EG
6096 switch (load_code) {
6097 case FW_MSG_CODE_DRV_LOAD_COMMON:
6098 rc = bnx2x_init_common(bp);
6099 if (rc)
6100 goto init_hw_err;
6101 /* no break */
6102
6103 case FW_MSG_CODE_DRV_LOAD_PORT:
6104 bp->dmae_ready = 1;
6105 rc = bnx2x_init_port(bp);
6106 if (rc)
6107 goto init_hw_err;
6108 /* no break */
6109
6110 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6111 bp->dmae_ready = 1;
6112 rc = bnx2x_init_func(bp);
6113 if (rc)
6114 goto init_hw_err;
6115 break;
6116
6117 default:
6118 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6119 break;
6120 }
6121
6122 if (!BP_NOMCP(bp)) {
6123 int func = BP_FUNC(bp);
a2fbb9ea
ET
6124
6125 bp->fw_drv_pulse_wr_seq =
34f80b04 6126 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6127 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6128 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6129 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6130 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6131 } else
6132 bp->func_stx = 0;
a2fbb9ea 6133
34f80b04
EG
6134 /* this needs to be done before gunzip end */
6135 bnx2x_zero_def_sb(bp);
6136 for_each_queue(bp, i)
6137 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6138
6139init_hw_err:
6140 bnx2x_gunzip_end(bp);
6141
6142 return rc;
a2fbb9ea
ET
6143}
6144
c14423fe 6145/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6146static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6147{
34f80b04 6148 int func = BP_FUNC(bp);
f1410647
ET
6149 u32 seq = ++bp->fw_seq;
6150 u32 rc = 0;
19680c48
EG
6151 u32 cnt = 1;
6152 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6153
34f80b04 6154 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6155 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6156
19680c48
EG
6157 do {
6158 /* let the FW do it's magic ... */
6159 msleep(delay);
a2fbb9ea 6160
19680c48 6161 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6162
19680c48
EG
6163 /* Give the FW up to 2 second (200*10ms) */
6164 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6165
6166 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6167 cnt*delay, rc, seq);
a2fbb9ea
ET
6168
6169 /* is this a reply to our command? */
6170 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6171 rc &= FW_MSG_CODE_MASK;
f1410647 6172
a2fbb9ea
ET
6173 } else {
6174 /* FW BUG! */
6175 BNX2X_ERR("FW failed to respond!\n");
6176 bnx2x_fw_dump(bp);
6177 rc = 0;
6178 }
f1410647 6179
a2fbb9ea
ET
6180 return rc;
6181}
6182
6183static void bnx2x_free_mem(struct bnx2x *bp)
6184{
6185
6186#define BNX2X_PCI_FREE(x, y, size) \
6187 do { \
6188 if (x) { \
6189 pci_free_consistent(bp->pdev, size, x, y); \
6190 x = NULL; \
6191 y = 0; \
6192 } \
6193 } while (0)
6194
6195#define BNX2X_FREE(x) \
6196 do { \
6197 if (x) { \
6198 vfree(x); \
6199 x = NULL; \
6200 } \
6201 } while (0)
6202
6203 int i;
6204
6205 /* fastpath */
555f6c78 6206 /* Common */
a2fbb9ea
ET
6207 for_each_queue(bp, i) {
6208
555f6c78 6209 /* status blocks */
a2fbb9ea
ET
6210 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6211 bnx2x_fp(bp, i, status_blk_mapping),
6212 sizeof(struct host_status_block) +
6213 sizeof(struct eth_tx_db_data));
555f6c78
EG
6214 }
6215 /* Rx */
6216 for_each_rx_queue(bp, i) {
a2fbb9ea 6217
555f6c78 6218 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6219 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6220 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6221 bnx2x_fp(bp, i, rx_desc_mapping),
6222 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6223
6224 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6225 bnx2x_fp(bp, i, rx_comp_mapping),
6226 sizeof(struct eth_fast_path_rx_cqe) *
6227 NUM_RCQ_BD);
a2fbb9ea 6228
7a9b2557 6229 /* SGE ring */
32626230 6230 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6231 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6232 bnx2x_fp(bp, i, rx_sge_mapping),
6233 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6234 }
555f6c78
EG
6235 /* Tx */
6236 for_each_tx_queue(bp, i) {
6237
6238 /* fastpath tx rings: tx_buf tx_desc */
6239 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6240 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6241 bnx2x_fp(bp, i, tx_desc_mapping),
6242 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6243 }
a2fbb9ea
ET
6244 /* end of fastpath */
6245
6246 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6247 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6248
6249 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6250 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6251
6252#ifdef BCM_ISCSI
6253 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6254 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6255 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6256 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6257#endif
7a9b2557 6258 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6259
6260#undef BNX2X_PCI_FREE
6261#undef BNX2X_KFREE
6262}
6263
6264static int bnx2x_alloc_mem(struct bnx2x *bp)
6265{
6266
6267#define BNX2X_PCI_ALLOC(x, y, size) \
6268 do { \
6269 x = pci_alloc_consistent(bp->pdev, size, y); \
6270 if (x == NULL) \
6271 goto alloc_mem_err; \
6272 memset(x, 0, size); \
6273 } while (0)
6274
6275#define BNX2X_ALLOC(x, size) \
6276 do { \
6277 x = vmalloc(size); \
6278 if (x == NULL) \
6279 goto alloc_mem_err; \
6280 memset(x, 0, size); \
6281 } while (0)
6282
6283 int i;
6284
6285 /* fastpath */
555f6c78 6286 /* Common */
a2fbb9ea
ET
6287 for_each_queue(bp, i) {
6288 bnx2x_fp(bp, i, bp) = bp;
6289
555f6c78 6290 /* status blocks */
a2fbb9ea
ET
6291 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6292 &bnx2x_fp(bp, i, status_blk_mapping),
6293 sizeof(struct host_status_block) +
6294 sizeof(struct eth_tx_db_data));
555f6c78
EG
6295 }
6296 /* Rx */
6297 for_each_rx_queue(bp, i) {
a2fbb9ea 6298
555f6c78 6299 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6300 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6301 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6302 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6303 &bnx2x_fp(bp, i, rx_desc_mapping),
6304 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6305
6306 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6307 &bnx2x_fp(bp, i, rx_comp_mapping),
6308 sizeof(struct eth_fast_path_rx_cqe) *
6309 NUM_RCQ_BD);
6310
7a9b2557
VZ
6311 /* SGE ring */
6312 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6313 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6314 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6315 &bnx2x_fp(bp, i, rx_sge_mapping),
6316 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6317 }
555f6c78
EG
6318 /* Tx */
6319 for_each_tx_queue(bp, i) {
6320
6321 bnx2x_fp(bp, i, hw_tx_prods) =
6322 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6323
6324 bnx2x_fp(bp, i, tx_prods_mapping) =
6325 bnx2x_fp(bp, i, status_blk_mapping) +
6326 sizeof(struct host_status_block);
6327
6328 /* fastpath tx rings: tx_buf tx_desc */
6329 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6330 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6331 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6332 &bnx2x_fp(bp, i, tx_desc_mapping),
6333 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6334 }
a2fbb9ea
ET
6335 /* end of fastpath */
6336
6337 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6338 sizeof(struct host_def_status_block));
6339
6340 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6341 sizeof(struct bnx2x_slowpath));
6342
6343#ifdef BCM_ISCSI
6344 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6345
6346 /* Initialize T1 */
6347 for (i = 0; i < 64*1024; i += 64) {
6348 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6349 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6350 }
6351
6352 /* allocate searcher T2 table
6353 we allocate 1/4 of alloc num for T2
6354 (which is not entered into the ILT) */
6355 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6356
6357 /* Initialize T2 */
6358 for (i = 0; i < 16*1024; i += 64)
6359 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6360
c14423fe 6361 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6362 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6363
6364 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6365 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6366
6367 /* QM queues (128*MAX_CONN) */
6368 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6369#endif
6370
6371 /* Slow path ring */
6372 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6373
6374 return 0;
6375
6376alloc_mem_err:
6377 bnx2x_free_mem(bp);
6378 return -ENOMEM;
6379
6380#undef BNX2X_PCI_ALLOC
6381#undef BNX2X_ALLOC
6382}
6383
6384static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6385{
6386 int i;
6387
555f6c78 6388 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6389 struct bnx2x_fastpath *fp = &bp->fp[i];
6390
6391 u16 bd_cons = fp->tx_bd_cons;
6392 u16 sw_prod = fp->tx_pkt_prod;
6393 u16 sw_cons = fp->tx_pkt_cons;
6394
a2fbb9ea
ET
6395 while (sw_cons != sw_prod) {
6396 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6397 sw_cons++;
6398 }
6399 }
6400}
6401
6402static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6403{
6404 int i, j;
6405
555f6c78 6406 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6407 struct bnx2x_fastpath *fp = &bp->fp[j];
6408
a2fbb9ea
ET
6409 for (i = 0; i < NUM_RX_BD; i++) {
6410 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6411 struct sk_buff *skb = rx_buf->skb;
6412
6413 if (skb == NULL)
6414 continue;
6415
6416 pci_unmap_single(bp->pdev,
6417 pci_unmap_addr(rx_buf, mapping),
356e2385 6418 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6419
6420 rx_buf->skb = NULL;
6421 dev_kfree_skb(skb);
6422 }
7a9b2557 6423 if (!fp->disable_tpa)
32626230
EG
6424 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6425 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6426 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6427 }
6428}
6429
6430static void bnx2x_free_skbs(struct bnx2x *bp)
6431{
6432 bnx2x_free_tx_skbs(bp);
6433 bnx2x_free_rx_skbs(bp);
6434}
6435
6436static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6437{
34f80b04 6438 int i, offset = 1;
a2fbb9ea
ET
6439
6440 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6441 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6442 bp->msix_table[0].vector);
6443
6444 for_each_queue(bp, i) {
c14423fe 6445 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6446 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6447 bnx2x_fp(bp, i, state));
6448
34f80b04 6449 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6450 }
a2fbb9ea
ET
6451}
6452
6453static void bnx2x_free_irq(struct bnx2x *bp)
6454{
a2fbb9ea 6455 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6456 bnx2x_free_msix_irqs(bp);
6457 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6458 bp->flags &= ~USING_MSIX_FLAG;
6459
8badd27a
EG
6460 } else if (bp->flags & USING_MSI_FLAG) {
6461 free_irq(bp->pdev->irq, bp->dev);
6462 pci_disable_msi(bp->pdev);
6463 bp->flags &= ~USING_MSI_FLAG;
6464
a2fbb9ea
ET
6465 } else
6466 free_irq(bp->pdev->irq, bp->dev);
6467}
6468
6469static int bnx2x_enable_msix(struct bnx2x *bp)
6470{
8badd27a
EG
6471 int i, rc, offset = 1;
6472 int igu_vec = 0;
a2fbb9ea 6473
8badd27a
EG
6474 bp->msix_table[0].entry = igu_vec;
6475 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6476
34f80b04 6477 for_each_queue(bp, i) {
8badd27a 6478 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6479 bp->msix_table[i + offset].entry = igu_vec;
6480 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6481 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6482 }
6483
34f80b04 6484 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6485 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6486 if (rc) {
8badd27a
EG
6487 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6488 return rc;
34f80b04 6489 }
8badd27a 6490
a2fbb9ea
ET
6491 bp->flags |= USING_MSIX_FLAG;
6492
6493 return 0;
a2fbb9ea
ET
6494}
6495
a2fbb9ea
ET
6496static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6497{
34f80b04 6498 int i, rc, offset = 1;
a2fbb9ea 6499
a2fbb9ea
ET
6500 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6501 bp->dev->name, bp->dev);
a2fbb9ea
ET
6502 if (rc) {
6503 BNX2X_ERR("request sp irq failed\n");
6504 return -EBUSY;
6505 }
6506
6507 for_each_queue(bp, i) {
555f6c78
EG
6508 struct bnx2x_fastpath *fp = &bp->fp[i];
6509
6510 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6511 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6512 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6513 if (rc) {
555f6c78 6514 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6515 bnx2x_free_msix_irqs(bp);
6516 return -EBUSY;
6517 }
6518
555f6c78 6519 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6520 }
6521
555f6c78
EG
6522 i = BNX2X_NUM_QUEUES(bp);
6523 if (is_multi(bp))
6524 printk(KERN_INFO PFX
6525 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6526 bp->dev->name, bp->msix_table[0].vector,
6527 bp->msix_table[offset].vector,
6528 bp->msix_table[offset + i - 1].vector);
6529 else
6530 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6531 bp->dev->name, bp->msix_table[0].vector,
6532 bp->msix_table[offset + i - 1].vector);
6533
a2fbb9ea 6534 return 0;
a2fbb9ea
ET
6535}
6536
8badd27a
EG
6537static int bnx2x_enable_msi(struct bnx2x *bp)
6538{
6539 int rc;
6540
6541 rc = pci_enable_msi(bp->pdev);
6542 if (rc) {
6543 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6544 return -1;
6545 }
6546 bp->flags |= USING_MSI_FLAG;
6547
6548 return 0;
6549}
6550
a2fbb9ea
ET
6551static int bnx2x_req_irq(struct bnx2x *bp)
6552{
8badd27a 6553 unsigned long flags;
34f80b04 6554 int rc;
a2fbb9ea 6555
8badd27a
EG
6556 if (bp->flags & USING_MSI_FLAG)
6557 flags = 0;
6558 else
6559 flags = IRQF_SHARED;
6560
6561 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6562 bp->dev->name, bp->dev);
a2fbb9ea
ET
6563 if (!rc)
6564 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6565
6566 return rc;
a2fbb9ea
ET
6567}
6568
65abd74d
YG
6569static void bnx2x_napi_enable(struct bnx2x *bp)
6570{
6571 int i;
6572
555f6c78 6573 for_each_rx_queue(bp, i)
65abd74d
YG
6574 napi_enable(&bnx2x_fp(bp, i, napi));
6575}
6576
6577static void bnx2x_napi_disable(struct bnx2x *bp)
6578{
6579 int i;
6580
555f6c78 6581 for_each_rx_queue(bp, i)
65abd74d
YG
6582 napi_disable(&bnx2x_fp(bp, i, napi));
6583}
6584
6585static void bnx2x_netif_start(struct bnx2x *bp)
6586{
6587 if (atomic_dec_and_test(&bp->intr_sem)) {
6588 if (netif_running(bp->dev)) {
65abd74d
YG
6589 bnx2x_napi_enable(bp);
6590 bnx2x_int_enable(bp);
555f6c78
EG
6591 if (bp->state == BNX2X_STATE_OPEN)
6592 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6593 }
6594 }
6595}
6596
f8ef6e44 6597static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6598{
f8ef6e44 6599 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6600 bnx2x_napi_disable(bp);
65abd74d 6601 if (netif_running(bp->dev)) {
65abd74d
YG
6602 netif_tx_disable(bp->dev);
6603 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6604 }
6605}
6606
a2fbb9ea
ET
6607/*
6608 * Init service functions
6609 */
6610
3101c2bc 6611static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6612{
6613 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6614 int port = BP_PORT(bp);
a2fbb9ea
ET
6615
6616 /* CAM allocation
6617 * unicasts 0-31:port0 32-63:port1
6618 * multicast 64-127:port0 128-191:port1
6619 */
8d9c5f34 6620 config->hdr.length = 2;
af246401 6621 config->hdr.offset = port ? 32 : 0;
0626b899 6622 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6623 config->hdr.reserved1 = 0;
6624
6625 /* primary MAC */
6626 config->config_table[0].cam_entry.msb_mac_addr =
6627 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6628 config->config_table[0].cam_entry.middle_mac_addr =
6629 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6630 config->config_table[0].cam_entry.lsb_mac_addr =
6631 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6632 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6633 if (set)
6634 config->config_table[0].target_table_entry.flags = 0;
6635 else
6636 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6637 config->config_table[0].target_table_entry.client_id = 0;
6638 config->config_table[0].target_table_entry.vlan_id = 0;
6639
3101c2bc
YG
6640 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6641 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6642 config->config_table[0].cam_entry.msb_mac_addr,
6643 config->config_table[0].cam_entry.middle_mac_addr,
6644 config->config_table[0].cam_entry.lsb_mac_addr);
6645
6646 /* broadcast */
4781bfad
EG
6647 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6648 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6649 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6650 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6651 if (set)
6652 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6653 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6654 else
6655 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6656 config->config_table[1].target_table_entry.client_id = 0;
6657 config->config_table[1].target_table_entry.vlan_id = 0;
6658
6659 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6660 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6661 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6662}
6663
3101c2bc 6664static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6665{
6666 struct mac_configuration_cmd_e1h *config =
6667 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6668
3101c2bc 6669 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6670 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6671 return;
6672 }
6673
6674 /* CAM allocation for E1H
6675 * unicasts: by func number
6676 * multicast: 20+FUNC*20, 20 each
6677 */
8d9c5f34 6678 config->hdr.length = 1;
34f80b04 6679 config->hdr.offset = BP_FUNC(bp);
0626b899 6680 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6681 config->hdr.reserved1 = 0;
6682
6683 /* primary MAC */
6684 config->config_table[0].msb_mac_addr =
6685 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6686 config->config_table[0].middle_mac_addr =
6687 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6688 config->config_table[0].lsb_mac_addr =
6689 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6690 config->config_table[0].client_id = BP_L_ID(bp);
6691 config->config_table[0].vlan_id = 0;
6692 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6693 if (set)
6694 config->config_table[0].flags = BP_PORT(bp);
6695 else
6696 config->config_table[0].flags =
6697 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6698
3101c2bc
YG
6699 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6700 (set ? "setting" : "clearing"),
34f80b04
EG
6701 config->config_table[0].msb_mac_addr,
6702 config->config_table[0].middle_mac_addr,
6703 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6704
6705 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6706 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6707 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6708}
6709
a2fbb9ea
ET
6710static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6711 int *state_p, int poll)
6712{
6713 /* can take a while if any port is running */
8b3a0f0b 6714 int cnt = 5000;
a2fbb9ea 6715
c14423fe
ET
6716 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6717 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6718
6719 might_sleep();
34f80b04 6720 while (cnt--) {
a2fbb9ea
ET
6721 if (poll) {
6722 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6723 /* if index is different from 0
6724 * the reply for some commands will
3101c2bc 6725 * be on the non default queue
a2fbb9ea
ET
6726 */
6727 if (idx)
6728 bnx2x_rx_int(&bp->fp[idx], 10);
6729 }
a2fbb9ea 6730
3101c2bc 6731 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6732 if (*state_p == state) {
6733#ifdef BNX2X_STOP_ON_ERROR
6734 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6735#endif
a2fbb9ea 6736 return 0;
8b3a0f0b 6737 }
a2fbb9ea 6738
a2fbb9ea 6739 msleep(1);
a2fbb9ea
ET
6740 }
6741
a2fbb9ea 6742 /* timeout! */
49d66772
ET
6743 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6744 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6745#ifdef BNX2X_STOP_ON_ERROR
6746 bnx2x_panic();
6747#endif
a2fbb9ea 6748
49d66772 6749 return -EBUSY;
a2fbb9ea
ET
6750}
6751
6752static int bnx2x_setup_leading(struct bnx2x *bp)
6753{
34f80b04 6754 int rc;
a2fbb9ea 6755
c14423fe 6756 /* reset IGU state */
34f80b04 6757 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6758
6759 /* SETUP ramrod */
6760 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6761
34f80b04
EG
6762 /* Wait for completion */
6763 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6764
34f80b04 6765 return rc;
a2fbb9ea
ET
6766}
6767
6768static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6769{
555f6c78
EG
6770 struct bnx2x_fastpath *fp = &bp->fp[index];
6771
a2fbb9ea 6772 /* reset IGU state */
555f6c78 6773 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6774
228241eb 6775 /* SETUP ramrod */
555f6c78
EG
6776 fp->state = BNX2X_FP_STATE_OPENING;
6777 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6778 fp->cl_id, 0);
a2fbb9ea
ET
6779
6780 /* Wait for completion */
6781 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6782 &(fp->state), 0);
a2fbb9ea
ET
6783}
6784
a2fbb9ea 6785static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6786
8badd27a 6787static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6788{
555f6c78 6789 int num_queues;
a2fbb9ea 6790
8badd27a
EG
6791 switch (int_mode) {
6792 case INT_MODE_INTx:
6793 case INT_MODE_MSI:
555f6c78
EG
6794 num_queues = 1;
6795 bp->num_rx_queues = num_queues;
6796 bp->num_tx_queues = num_queues;
6797 DP(NETIF_MSG_IFUP,
6798 "set number of queues to %d\n", num_queues);
8badd27a
EG
6799 break;
6800
6801 case INT_MODE_MSIX:
6802 default:
555f6c78
EG
6803 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6804 num_queues = min_t(u32, num_online_cpus(),
6805 BNX2X_MAX_QUEUES(bp));
34f80b04 6806 else
555f6c78
EG
6807 num_queues = 1;
6808 bp->num_rx_queues = num_queues;
6809 bp->num_tx_queues = num_queues;
6810 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6811 " number of tx queues to %d\n",
6812 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6813 /* if we can't use MSI-X we only need one fp,
6814 * so try to enable MSI-X with the requested number of fp's
6815 * and fallback to MSI or legacy INTx with one fp
6816 */
8badd27a 6817 if (bnx2x_enable_msix(bp)) {
34f80b04 6818 /* failed to enable MSI-X */
555f6c78
EG
6819 num_queues = 1;
6820 bp->num_rx_queues = num_queues;
6821 bp->num_tx_queues = num_queues;
6822 if (bp->multi_mode)
6823 BNX2X_ERR("Multi requested but failed to "
6824 "enable MSI-X set number of "
6825 "queues to %d\n", num_queues);
a2fbb9ea 6826 }
8badd27a 6827 break;
a2fbb9ea 6828 }
555f6c78 6829 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6830}
6831
6832static void bnx2x_set_rx_mode(struct net_device *dev);
6833
6834/* must be called with rtnl_lock */
6835static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6836{
6837 u32 load_code;
6838 int i, rc = 0;
6839#ifdef BNX2X_STOP_ON_ERROR
6840 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6841 if (unlikely(bp->panic))
6842 return -EPERM;
6843#endif
6844
6845 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6846
6847 bnx2x_set_int_mode(bp);
c14423fe 6848
a2fbb9ea
ET
6849 if (bnx2x_alloc_mem(bp))
6850 return -ENOMEM;
6851
555f6c78 6852 for_each_rx_queue(bp, i)
7a9b2557
VZ
6853 bnx2x_fp(bp, i, disable_tpa) =
6854 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6855
555f6c78 6856 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6857 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6858 bnx2x_poll, 128);
6859
6860#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6861 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6862 struct bnx2x_fastpath *fp = &bp->fp[i];
6863
6864 fp->poll_no_work = 0;
6865 fp->poll_calls = 0;
6866 fp->poll_max_calls = 0;
6867 fp->poll_complete = 0;
6868 fp->poll_exit = 0;
6869 }
6870#endif
6871 bnx2x_napi_enable(bp);
6872
34f80b04
EG
6873 if (bp->flags & USING_MSIX_FLAG) {
6874 rc = bnx2x_req_msix_irqs(bp);
6875 if (rc) {
6876 pci_disable_msix(bp->pdev);
2dfe0e1f 6877 goto load_error1;
34f80b04
EG
6878 }
6879 } else {
8badd27a
EG
6880 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6881 bnx2x_enable_msi(bp);
34f80b04
EG
6882 bnx2x_ack_int(bp);
6883 rc = bnx2x_req_irq(bp);
6884 if (rc) {
2dfe0e1f 6885 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6886 if (bp->flags & USING_MSI_FLAG)
6887 pci_disable_msi(bp->pdev);
2dfe0e1f 6888 goto load_error1;
a2fbb9ea 6889 }
8badd27a
EG
6890 if (bp->flags & USING_MSI_FLAG) {
6891 bp->dev->irq = bp->pdev->irq;
6892 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6893 bp->dev->name, bp->pdev->irq);
6894 }
a2fbb9ea
ET
6895 }
6896
2dfe0e1f
EG
6897 /* Send LOAD_REQUEST command to MCP
6898 Returns the type of LOAD command:
6899 if it is the first port to be initialized
6900 common blocks should be initialized, otherwise - not
6901 */
6902 if (!BP_NOMCP(bp)) {
6903 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6904 if (!load_code) {
6905 BNX2X_ERR("MCP response failure, aborting\n");
6906 rc = -EBUSY;
6907 goto load_error2;
6908 }
6909 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6910 rc = -EBUSY; /* other port in diagnostic mode */
6911 goto load_error2;
6912 }
6913
6914 } else {
6915 int port = BP_PORT(bp);
6916
f5372251 6917 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
6918 load_count[0], load_count[1], load_count[2]);
6919 load_count[0]++;
6920 load_count[1 + port]++;
f5372251 6921 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
6922 load_count[0], load_count[1], load_count[2]);
6923 if (load_count[0] == 1)
6924 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6925 else if (load_count[1 + port] == 1)
6926 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6927 else
6928 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6929 }
6930
6931 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6932 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6933 bp->port.pmf = 1;
6934 else
6935 bp->port.pmf = 0;
6936 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6937
a2fbb9ea 6938 /* Initialize HW */
34f80b04
EG
6939 rc = bnx2x_init_hw(bp, load_code);
6940 if (rc) {
a2fbb9ea 6941 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6942 goto load_error2;
a2fbb9ea
ET
6943 }
6944
a2fbb9ea 6945 /* Setup NIC internals and enable interrupts */
471de716 6946 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6947
6948 /* Send LOAD_DONE command to MCP */
34f80b04 6949 if (!BP_NOMCP(bp)) {
228241eb
ET
6950 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6951 if (!load_code) {
da5a662a 6952 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6953 rc = -EBUSY;
2dfe0e1f 6954 goto load_error3;
a2fbb9ea
ET
6955 }
6956 }
6957
6958 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6959
34f80b04
EG
6960 rc = bnx2x_setup_leading(bp);
6961 if (rc) {
da5a662a 6962 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6963 goto load_error3;
34f80b04 6964 }
a2fbb9ea 6965
34f80b04
EG
6966 if (CHIP_IS_E1H(bp))
6967 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 6968 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
6969 bp->state = BNX2X_STATE_DISABLED;
6970 }
a2fbb9ea 6971
34f80b04
EG
6972 if (bp->state == BNX2X_STATE_OPEN)
6973 for_each_nondefault_queue(bp, i) {
6974 rc = bnx2x_setup_multi(bp, i);
6975 if (rc)
2dfe0e1f 6976 goto load_error3;
34f80b04 6977 }
a2fbb9ea 6978
34f80b04 6979 if (CHIP_IS_E1(bp))
3101c2bc 6980 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6981 else
3101c2bc 6982 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6983
6984 if (bp->port.pmf)
b5bf9068 6985 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
6986
6987 /* Start fast path */
34f80b04
EG
6988 switch (load_mode) {
6989 case LOAD_NORMAL:
6990 /* Tx queue should be only reenabled */
555f6c78 6991 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6992 /* Initialize the receive filter. */
34f80b04
EG
6993 bnx2x_set_rx_mode(bp->dev);
6994 break;
6995
6996 case LOAD_OPEN:
555f6c78 6997 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6998 /* Initialize the receive filter. */
34f80b04 6999 bnx2x_set_rx_mode(bp->dev);
34f80b04 7000 break;
a2fbb9ea 7001
34f80b04 7002 case LOAD_DIAG:
2dfe0e1f 7003 /* Initialize the receive filter. */
a2fbb9ea 7004 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7005 bp->state = BNX2X_STATE_DIAG;
7006 break;
7007
7008 default:
7009 break;
a2fbb9ea
ET
7010 }
7011
34f80b04
EG
7012 if (!bp->port.pmf)
7013 bnx2x__link_status_update(bp);
7014
a2fbb9ea
ET
7015 /* start the timer */
7016 mod_timer(&bp->timer, jiffies + bp->current_interval);
7017
34f80b04 7018
a2fbb9ea
ET
7019 return 0;
7020
2dfe0e1f
EG
7021load_error3:
7022 bnx2x_int_disable_sync(bp, 1);
7023 if (!BP_NOMCP(bp)) {
7024 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7025 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7026 }
7027 bp->port.pmf = 0;
7a9b2557
VZ
7028 /* Free SKBs, SGEs, TPA pool and driver internals */
7029 bnx2x_free_skbs(bp);
555f6c78 7030 for_each_rx_queue(bp, i)
3196a88a 7031 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7032load_error2:
d1014634
YG
7033 /* Release IRQs */
7034 bnx2x_free_irq(bp);
2dfe0e1f
EG
7035load_error1:
7036 bnx2x_napi_disable(bp);
555f6c78 7037 for_each_rx_queue(bp, i)
7cde1c8b 7038 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7039 bnx2x_free_mem(bp);
7040
34f80b04 7041 return rc;
a2fbb9ea
ET
7042}
7043
7044static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7045{
555f6c78 7046 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7047 int rc;
7048
c14423fe 7049 /* halt the connection */
555f6c78
EG
7050 fp->state = BNX2X_FP_STATE_HALTING;
7051 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7052
34f80b04 7053 /* Wait for completion */
a2fbb9ea 7054 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7055 &(fp->state), 1);
c14423fe 7056 if (rc) /* timeout */
a2fbb9ea
ET
7057 return rc;
7058
7059 /* delete cfc entry */
7060 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7061
34f80b04
EG
7062 /* Wait for completion */
7063 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7064 &(fp->state), 1);
34f80b04 7065 return rc;
a2fbb9ea
ET
7066}
7067
da5a662a 7068static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7069{
4781bfad 7070 __le16 dsb_sp_prod_idx;
c14423fe 7071 /* if the other port is handling traffic,
a2fbb9ea 7072 this can take a lot of time */
34f80b04
EG
7073 int cnt = 500;
7074 int rc;
a2fbb9ea
ET
7075
7076 might_sleep();
7077
7078 /* Send HALT ramrod */
7079 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7080 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7081
34f80b04
EG
7082 /* Wait for completion */
7083 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7084 &(bp->fp[0].state), 1);
7085 if (rc) /* timeout */
da5a662a 7086 return rc;
a2fbb9ea 7087
49d66772 7088 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7089
228241eb 7090 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7091 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7092
49d66772 7093 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7094 we are going to reset the chip anyway
7095 so there is not much to do if this times out
7096 */
34f80b04 7097 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7098 if (!cnt) {
7099 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7100 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7101 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7102#ifdef BNX2X_STOP_ON_ERROR
7103 bnx2x_panic();
7104#endif
36e552ab 7105 rc = -EBUSY;
34f80b04
EG
7106 break;
7107 }
7108 cnt--;
da5a662a 7109 msleep(1);
5650d9d4 7110 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7111 }
7112 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7113 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7114
7115 return rc;
a2fbb9ea
ET
7116}
7117
34f80b04
EG
7118static void bnx2x_reset_func(struct bnx2x *bp)
7119{
7120 int port = BP_PORT(bp);
7121 int func = BP_FUNC(bp);
7122 int base, i;
7123
7124 /* Configure IGU */
7125 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7126 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7127
34f80b04
EG
7128 /* Clear ILT */
7129 base = FUNC_ILT_BASE(func);
7130 for (i = base; i < base + ILT_PER_FUNC; i++)
7131 bnx2x_ilt_wr(bp, i, 0);
7132}
7133
7134static void bnx2x_reset_port(struct bnx2x *bp)
7135{
7136 int port = BP_PORT(bp);
7137 u32 val;
7138
7139 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7140
7141 /* Do not rcv packets to BRB */
7142 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7143 /* Do not direct rcv packets that are not for MCP to the BRB */
7144 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7145 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7146
7147 /* Configure AEU */
7148 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7149
7150 msleep(100);
7151 /* Check for BRB port occupancy */
7152 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7153 if (val)
7154 DP(NETIF_MSG_IFDOWN,
33471629 7155 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7156
7157 /* TODO: Close Doorbell port? */
7158}
7159
34f80b04
EG
7160static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7161{
7162 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7163 BP_FUNC(bp), reset_code);
7164
7165 switch (reset_code) {
7166 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7167 bnx2x_reset_port(bp);
7168 bnx2x_reset_func(bp);
7169 bnx2x_reset_common(bp);
7170 break;
7171
7172 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7173 bnx2x_reset_port(bp);
7174 bnx2x_reset_func(bp);
7175 break;
7176
7177 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7178 bnx2x_reset_func(bp);
7179 break;
49d66772 7180
34f80b04
EG
7181 default:
7182 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7183 break;
7184 }
7185}
7186
33471629 7187/* must be called with rtnl_lock */
34f80b04 7188static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7189{
da5a662a 7190 int port = BP_PORT(bp);
a2fbb9ea 7191 u32 reset_code = 0;
da5a662a 7192 int i, cnt, rc;
a2fbb9ea
ET
7193
7194 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7195
228241eb
ET
7196 bp->rx_mode = BNX2X_RX_MODE_NONE;
7197 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7198
f8ef6e44 7199 bnx2x_netif_stop(bp, 1);
e94d8af3 7200
34f80b04
EG
7201 del_timer_sync(&bp->timer);
7202 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7203 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7204 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7205
70b9986c
EG
7206 /* Release IRQs */
7207 bnx2x_free_irq(bp);
7208
555f6c78
EG
7209 /* Wait until tx fastpath tasks complete */
7210 for_each_tx_queue(bp, i) {
228241eb
ET
7211 struct bnx2x_fastpath *fp = &bp->fp[i];
7212
34f80b04 7213 cnt = 1000;
e8b5fc51 7214 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7215
65abd74d 7216 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7217 if (!cnt) {
7218 BNX2X_ERR("timeout waiting for queue[%d]\n",
7219 i);
7220#ifdef BNX2X_STOP_ON_ERROR
7221 bnx2x_panic();
7222 return -EBUSY;
7223#else
7224 break;
7225#endif
7226 }
7227 cnt--;
da5a662a 7228 msleep(1);
34f80b04 7229 }
228241eb 7230 }
da5a662a
VZ
7231 /* Give HW time to discard old tx messages */
7232 msleep(1);
a2fbb9ea 7233
3101c2bc
YG
7234 if (CHIP_IS_E1(bp)) {
7235 struct mac_configuration_cmd *config =
7236 bnx2x_sp(bp, mcast_config);
7237
7238 bnx2x_set_mac_addr_e1(bp, 0);
7239
8d9c5f34 7240 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7241 CAM_INVALIDATE(config->config_table[i]);
7242
8d9c5f34 7243 config->hdr.length = i;
3101c2bc
YG
7244 if (CHIP_REV_IS_SLOW(bp))
7245 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7246 else
7247 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7248 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7249 config->hdr.reserved1 = 0;
7250
7251 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7252 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7253 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7254
7255 } else { /* E1H */
65abd74d
YG
7256 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7257
3101c2bc
YG
7258 bnx2x_set_mac_addr_e1h(bp, 0);
7259
7260 for (i = 0; i < MC_HASH_SIZE; i++)
7261 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7262 }
7263
65abd74d
YG
7264 if (unload_mode == UNLOAD_NORMAL)
7265 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7266
7267 else if (bp->flags & NO_WOL_FLAG) {
7268 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7269 if (CHIP_IS_E1H(bp))
7270 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7271
7272 } else if (bp->wol) {
7273 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7274 u8 *mac_addr = bp->dev->dev_addr;
7275 u32 val;
7276 /* The mac address is written to entries 1-4 to
7277 preserve entry 0 which is used by the PMF */
7278 u8 entry = (BP_E1HVN(bp) + 1)*8;
7279
7280 val = (mac_addr[0] << 8) | mac_addr[1];
7281 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7282
7283 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7284 (mac_addr[4] << 8) | mac_addr[5];
7285 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7286
7287 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7288
7289 } else
7290 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7291
34f80b04
EG
7292 /* Close multi and leading connections
7293 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7294 for_each_nondefault_queue(bp, i)
7295 if (bnx2x_stop_multi(bp, i))
228241eb 7296 goto unload_error;
a2fbb9ea 7297
da5a662a
VZ
7298 rc = bnx2x_stop_leading(bp);
7299 if (rc) {
34f80b04 7300 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7301#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7302 return -EBUSY;
da5a662a
VZ
7303#else
7304 goto unload_error;
34f80b04 7305#endif
228241eb
ET
7306 }
7307
7308unload_error:
34f80b04 7309 if (!BP_NOMCP(bp))
228241eb 7310 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7311 else {
f5372251 7312 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7313 load_count[0], load_count[1], load_count[2]);
7314 load_count[0]--;
da5a662a 7315 load_count[1 + port]--;
f5372251 7316 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7317 load_count[0], load_count[1], load_count[2]);
7318 if (load_count[0] == 0)
7319 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7320 else if (load_count[1 + port] == 0)
34f80b04
EG
7321 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7322 else
7323 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7324 }
a2fbb9ea 7325
34f80b04
EG
7326 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7327 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7328 bnx2x__link_reset(bp);
a2fbb9ea
ET
7329
7330 /* Reset the chip */
228241eb 7331 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7332
7333 /* Report UNLOAD_DONE to MCP */
34f80b04 7334 if (!BP_NOMCP(bp))
a2fbb9ea 7335 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7336
9a035440 7337 bp->port.pmf = 0;
a2fbb9ea 7338
7a9b2557 7339 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7340 bnx2x_free_skbs(bp);
555f6c78 7341 for_each_rx_queue(bp, i)
3196a88a 7342 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7343 for_each_rx_queue(bp, i)
7cde1c8b 7344 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7345 bnx2x_free_mem(bp);
7346
7347 bp->state = BNX2X_STATE_CLOSED;
228241eb 7348
a2fbb9ea
ET
7349 netif_carrier_off(bp->dev);
7350
7351 return 0;
7352}
7353
34f80b04
EG
7354static void bnx2x_reset_task(struct work_struct *work)
7355{
7356 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7357
7358#ifdef BNX2X_STOP_ON_ERROR
7359 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7360 " so reset not done to allow debug dump,\n"
7361 KERN_ERR " you will need to reboot when done\n");
7362 return;
7363#endif
7364
7365 rtnl_lock();
7366
7367 if (!netif_running(bp->dev))
7368 goto reset_task_exit;
7369
7370 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7371 bnx2x_nic_load(bp, LOAD_NORMAL);
7372
7373reset_task_exit:
7374 rtnl_unlock();
7375}
7376
a2fbb9ea
ET
7377/* end of nic load/unload */
7378
7379/* ethtool_ops */
7380
7381/*
7382 * Init service functions
7383 */
7384
f1ef27ef
EG
7385static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7386{
7387 switch (func) {
7388 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7389 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7390 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7391 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7392 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7393 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7394 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7395 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7396 default:
7397 BNX2X_ERR("Unsupported function index: %d\n", func);
7398 return (u32)(-1);
7399 }
7400}
7401
7402static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7403{
7404 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7405
7406 /* Flush all outstanding writes */
7407 mmiowb();
7408
7409 /* Pretend to be function 0 */
7410 REG_WR(bp, reg, 0);
7411 /* Flush the GRC transaction (in the chip) */
7412 new_val = REG_RD(bp, reg);
7413 if (new_val != 0) {
7414 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7415 new_val);
7416 BUG();
7417 }
7418
7419 /* From now we are in the "like-E1" mode */
7420 bnx2x_int_disable(bp);
7421
7422 /* Flush all outstanding writes */
7423 mmiowb();
7424
7425 /* Restore the original funtion settings */
7426 REG_WR(bp, reg, orig_func);
7427 new_val = REG_RD(bp, reg);
7428 if (new_val != orig_func) {
7429 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7430 orig_func, new_val);
7431 BUG();
7432 }
7433}
7434
7435static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7436{
7437 if (CHIP_IS_E1H(bp))
7438 bnx2x_undi_int_disable_e1h(bp, func);
7439 else
7440 bnx2x_int_disable(bp);
7441}
7442
34f80b04
EG
7443static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7444{
7445 u32 val;
7446
7447 /* Check if there is any driver already loaded */
7448 val = REG_RD(bp, MISC_REG_UNPREPARED);
7449 if (val == 0x1) {
7450 /* Check if it is the UNDI driver
7451 * UNDI driver initializes CID offset for normal bell to 0x7
7452 */
4a37fb66 7453 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7454 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7455 if (val == 0x7) {
7456 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7457 /* save our func */
34f80b04 7458 int func = BP_FUNC(bp);
da5a662a
VZ
7459 u32 swap_en;
7460 u32 swap_val;
34f80b04 7461
b4661739
EG
7462 /* clear the UNDI indication */
7463 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7464
34f80b04
EG
7465 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7466
7467 /* try unload UNDI on port 0 */
7468 bp->func = 0;
da5a662a
VZ
7469 bp->fw_seq =
7470 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7471 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7472 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7473
7474 /* if UNDI is loaded on the other port */
7475 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7476
da5a662a
VZ
7477 /* send "DONE" for previous unload */
7478 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7479
7480 /* unload UNDI on port 1 */
34f80b04 7481 bp->func = 1;
da5a662a
VZ
7482 bp->fw_seq =
7483 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7484 DRV_MSG_SEQ_NUMBER_MASK);
7485 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7486
7487 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7488 }
7489
b4661739
EG
7490 /* now it's safe to release the lock */
7491 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7492
f1ef27ef 7493 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7494
7495 /* close input traffic and wait for it */
7496 /* Do not rcv packets to BRB */
7497 REG_WR(bp,
7498 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7499 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7500 /* Do not direct rcv packets that are not for MCP to
7501 * the BRB */
7502 REG_WR(bp,
7503 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7504 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7505 /* clear AEU */
7506 REG_WR(bp,
7507 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7508 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7509 msleep(10);
7510
7511 /* save NIG port swap info */
7512 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7513 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7514 /* reset device */
7515 REG_WR(bp,
7516 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7517 0xd3ffffff);
34f80b04
EG
7518 REG_WR(bp,
7519 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7520 0x1403);
da5a662a
VZ
7521 /* take the NIG out of reset and restore swap values */
7522 REG_WR(bp,
7523 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7524 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7525 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7526 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7527
7528 /* send unload done to the MCP */
7529 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7530
7531 /* restore our func and fw_seq */
7532 bp->func = func;
7533 bp->fw_seq =
7534 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7535 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7536
7537 } else
7538 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7539 }
7540}
7541
7542static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7543{
7544 u32 val, val2, val3, val4, id;
72ce58c3 7545 u16 pmc;
34f80b04
EG
7546
7547 /* Get the chip revision id and number. */
7548 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7549 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7550 id = ((val & 0xffff) << 16);
7551 val = REG_RD(bp, MISC_REG_CHIP_REV);
7552 id |= ((val & 0xf) << 12);
7553 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7554 id |= ((val & 0xff) << 4);
5a40e08e 7555 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7556 id |= (val & 0xf);
7557 bp->common.chip_id = id;
7558 bp->link_params.chip_id = bp->common.chip_id;
7559 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7560
1c06328c
EG
7561 val = (REG_RD(bp, 0x2874) & 0x55);
7562 if ((bp->common.chip_id & 0x1) ||
7563 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7564 bp->flags |= ONE_PORT_FLAG;
7565 BNX2X_DEV_INFO("single port device\n");
7566 }
7567
34f80b04
EG
7568 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7569 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7570 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7571 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7572 bp->common.flash_size, bp->common.flash_size);
7573
7574 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7575 bp->link_params.shmem_base = bp->common.shmem_base;
7576 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7577
7578 if (!bp->common.shmem_base ||
7579 (bp->common.shmem_base < 0xA0000) ||
7580 (bp->common.shmem_base >= 0xC0000)) {
7581 BNX2X_DEV_INFO("MCP not active\n");
7582 bp->flags |= NO_MCP_FLAG;
7583 return;
7584 }
7585
7586 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7587 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7588 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7589 BNX2X_ERR("BAD MCP validity signature\n");
7590
7591 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7592 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7593
7594 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7595 SHARED_HW_CFG_LED_MODE_MASK) >>
7596 SHARED_HW_CFG_LED_MODE_SHIFT);
7597
c2c8b03e
EG
7598 bp->link_params.feature_config_flags = 0;
7599 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7600 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7601 bp->link_params.feature_config_flags |=
7602 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7603 else
7604 bp->link_params.feature_config_flags &=
7605 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7606
34f80b04
EG
7607 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7608 bp->common.bc_ver = val;
7609 BNX2X_DEV_INFO("bc_ver %X\n", val);
7610 if (val < BNX2X_BC_VER) {
7611 /* for now only warn
7612 * later we might need to enforce this */
7613 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7614 " please upgrade BC\n", BNX2X_BC_VER, val);
7615 }
72ce58c3
EG
7616
7617 if (BP_E1HVN(bp) == 0) {
7618 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7619 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7620 } else {
7621 /* no WOL capability for E1HVN != 0 */
7622 bp->flags |= NO_WOL_FLAG;
7623 }
7624 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7625 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7626
7627 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7628 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7629 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7630 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7631
7632 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7633 val, val2, val3, val4);
7634}
7635
7636static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7637 u32 switch_cfg)
a2fbb9ea 7638{
34f80b04 7639 int port = BP_PORT(bp);
a2fbb9ea
ET
7640 u32 ext_phy_type;
7641
a2fbb9ea
ET
7642 switch (switch_cfg) {
7643 case SWITCH_CFG_1G:
7644 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7645
c18487ee
YR
7646 ext_phy_type =
7647 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7648 switch (ext_phy_type) {
7649 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7650 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7651 ext_phy_type);
7652
34f80b04
EG
7653 bp->port.supported |= (SUPPORTED_10baseT_Half |
7654 SUPPORTED_10baseT_Full |
7655 SUPPORTED_100baseT_Half |
7656 SUPPORTED_100baseT_Full |
7657 SUPPORTED_1000baseT_Full |
7658 SUPPORTED_2500baseX_Full |
7659 SUPPORTED_TP |
7660 SUPPORTED_FIBRE |
7661 SUPPORTED_Autoneg |
7662 SUPPORTED_Pause |
7663 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7664 break;
7665
7666 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7667 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7668 ext_phy_type);
7669
34f80b04
EG
7670 bp->port.supported |= (SUPPORTED_10baseT_Half |
7671 SUPPORTED_10baseT_Full |
7672 SUPPORTED_100baseT_Half |
7673 SUPPORTED_100baseT_Full |
7674 SUPPORTED_1000baseT_Full |
7675 SUPPORTED_TP |
7676 SUPPORTED_FIBRE |
7677 SUPPORTED_Autoneg |
7678 SUPPORTED_Pause |
7679 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7680 break;
7681
7682 default:
7683 BNX2X_ERR("NVRAM config error. "
7684 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7685 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7686 return;
7687 }
7688
34f80b04
EG
7689 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7690 port*0x10);
7691 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7692 break;
7693
7694 case SWITCH_CFG_10G:
7695 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7696
c18487ee
YR
7697 ext_phy_type =
7698 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7699 switch (ext_phy_type) {
7700 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7701 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7702 ext_phy_type);
7703
34f80b04
EG
7704 bp->port.supported |= (SUPPORTED_10baseT_Half |
7705 SUPPORTED_10baseT_Full |
7706 SUPPORTED_100baseT_Half |
7707 SUPPORTED_100baseT_Full |
7708 SUPPORTED_1000baseT_Full |
7709 SUPPORTED_2500baseX_Full |
7710 SUPPORTED_10000baseT_Full |
7711 SUPPORTED_TP |
7712 SUPPORTED_FIBRE |
7713 SUPPORTED_Autoneg |
7714 SUPPORTED_Pause |
7715 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7716 break;
7717
589abe3a
EG
7718 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7719 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7720 ext_phy_type);
f1410647 7721
34f80b04 7722 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7723 SUPPORTED_1000baseT_Full |
34f80b04 7724 SUPPORTED_FIBRE |
589abe3a 7725 SUPPORTED_Autoneg |
34f80b04
EG
7726 SUPPORTED_Pause |
7727 SUPPORTED_Asym_Pause);
f1410647
ET
7728 break;
7729
589abe3a
EG
7730 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7731 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7732 ext_phy_type);
7733
34f80b04 7734 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7735 SUPPORTED_2500baseX_Full |
34f80b04 7736 SUPPORTED_1000baseT_Full |
589abe3a
EG
7737 SUPPORTED_FIBRE |
7738 SUPPORTED_Autoneg |
7739 SUPPORTED_Pause |
7740 SUPPORTED_Asym_Pause);
7741 break;
7742
7743 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7744 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7745 ext_phy_type);
7746
7747 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7748 SUPPORTED_FIBRE |
7749 SUPPORTED_Pause |
7750 SUPPORTED_Asym_Pause);
f1410647
ET
7751 break;
7752
589abe3a
EG
7753 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7754 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7755 ext_phy_type);
7756
34f80b04
EG
7757 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7758 SUPPORTED_1000baseT_Full |
7759 SUPPORTED_FIBRE |
34f80b04
EG
7760 SUPPORTED_Pause |
7761 SUPPORTED_Asym_Pause);
f1410647
ET
7762 break;
7763
589abe3a
EG
7764 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7765 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7766 ext_phy_type);
7767
34f80b04 7768 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7769 SUPPORTED_1000baseT_Full |
34f80b04 7770 SUPPORTED_Autoneg |
589abe3a 7771 SUPPORTED_FIBRE |
34f80b04
EG
7772 SUPPORTED_Pause |
7773 SUPPORTED_Asym_Pause);
c18487ee
YR
7774 break;
7775
f1410647
ET
7776 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7777 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7778 ext_phy_type);
7779
34f80b04
EG
7780 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7781 SUPPORTED_TP |
7782 SUPPORTED_Autoneg |
7783 SUPPORTED_Pause |
7784 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7785 break;
7786
28577185
EG
7787 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7788 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7789 ext_phy_type);
7790
7791 bp->port.supported |= (SUPPORTED_10baseT_Half |
7792 SUPPORTED_10baseT_Full |
7793 SUPPORTED_100baseT_Half |
7794 SUPPORTED_100baseT_Full |
7795 SUPPORTED_1000baseT_Full |
7796 SUPPORTED_10000baseT_Full |
7797 SUPPORTED_TP |
7798 SUPPORTED_Autoneg |
7799 SUPPORTED_Pause |
7800 SUPPORTED_Asym_Pause);
7801 break;
7802
c18487ee
YR
7803 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7804 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7805 bp->link_params.ext_phy_config);
7806 break;
7807
a2fbb9ea
ET
7808 default:
7809 BNX2X_ERR("NVRAM config error. "
7810 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7811 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7812 return;
7813 }
7814
34f80b04
EG
7815 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7816 port*0x18);
7817 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7818
a2fbb9ea
ET
7819 break;
7820
7821 default:
7822 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7823 bp->port.link_config);
a2fbb9ea
ET
7824 return;
7825 }
34f80b04 7826 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7827
7828 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7829 if (!(bp->link_params.speed_cap_mask &
7830 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7831 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7832
c18487ee
YR
7833 if (!(bp->link_params.speed_cap_mask &
7834 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7835 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7836
c18487ee
YR
7837 if (!(bp->link_params.speed_cap_mask &
7838 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7839 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7840
c18487ee
YR
7841 if (!(bp->link_params.speed_cap_mask &
7842 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7843 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7844
c18487ee
YR
7845 if (!(bp->link_params.speed_cap_mask &
7846 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7847 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7848 SUPPORTED_1000baseT_Full);
a2fbb9ea 7849
c18487ee
YR
7850 if (!(bp->link_params.speed_cap_mask &
7851 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7852 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7853
c18487ee
YR
7854 if (!(bp->link_params.speed_cap_mask &
7855 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7856 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7857
34f80b04 7858 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7859}
7860
34f80b04 7861static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7862{
c18487ee 7863 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7864
34f80b04 7865 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7866 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7867 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7868 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7869 bp->port.advertising = bp->port.supported;
a2fbb9ea 7870 } else {
c18487ee
YR
7871 u32 ext_phy_type =
7872 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7873
7874 if ((ext_phy_type ==
7875 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7876 (ext_phy_type ==
7877 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7878 /* force 10G, no AN */
c18487ee 7879 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7880 bp->port.advertising =
a2fbb9ea
ET
7881 (ADVERTISED_10000baseT_Full |
7882 ADVERTISED_FIBRE);
7883 break;
7884 }
7885 BNX2X_ERR("NVRAM config error. "
7886 "Invalid link_config 0x%x"
7887 " Autoneg not supported\n",
34f80b04 7888 bp->port.link_config);
a2fbb9ea
ET
7889 return;
7890 }
7891 break;
7892
7893 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7894 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7895 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7896 bp->port.advertising = (ADVERTISED_10baseT_Full |
7897 ADVERTISED_TP);
a2fbb9ea
ET
7898 } else {
7899 BNX2X_ERR("NVRAM config error. "
7900 "Invalid link_config 0x%x"
7901 " speed_cap_mask 0x%x\n",
34f80b04 7902 bp->port.link_config,
c18487ee 7903 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7904 return;
7905 }
7906 break;
7907
7908 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7909 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7910 bp->link_params.req_line_speed = SPEED_10;
7911 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7912 bp->port.advertising = (ADVERTISED_10baseT_Half |
7913 ADVERTISED_TP);
a2fbb9ea
ET
7914 } else {
7915 BNX2X_ERR("NVRAM config error. "
7916 "Invalid link_config 0x%x"
7917 " speed_cap_mask 0x%x\n",
34f80b04 7918 bp->port.link_config,
c18487ee 7919 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7920 return;
7921 }
7922 break;
7923
7924 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7925 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7926 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7927 bp->port.advertising = (ADVERTISED_100baseT_Full |
7928 ADVERTISED_TP);
a2fbb9ea
ET
7929 } else {
7930 BNX2X_ERR("NVRAM config error. "
7931 "Invalid link_config 0x%x"
7932 " speed_cap_mask 0x%x\n",
34f80b04 7933 bp->port.link_config,
c18487ee 7934 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7935 return;
7936 }
7937 break;
7938
7939 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7940 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7941 bp->link_params.req_line_speed = SPEED_100;
7942 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7943 bp->port.advertising = (ADVERTISED_100baseT_Half |
7944 ADVERTISED_TP);
a2fbb9ea
ET
7945 } else {
7946 BNX2X_ERR("NVRAM config error. "
7947 "Invalid link_config 0x%x"
7948 " speed_cap_mask 0x%x\n",
34f80b04 7949 bp->port.link_config,
c18487ee 7950 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7951 return;
7952 }
7953 break;
7954
7955 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7956 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7957 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7958 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7959 ADVERTISED_TP);
a2fbb9ea
ET
7960 } else {
7961 BNX2X_ERR("NVRAM config error. "
7962 "Invalid link_config 0x%x"
7963 " speed_cap_mask 0x%x\n",
34f80b04 7964 bp->port.link_config,
c18487ee 7965 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7966 return;
7967 }
7968 break;
7969
7970 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7971 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7972 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7973 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7974 ADVERTISED_TP);
a2fbb9ea
ET
7975 } else {
7976 BNX2X_ERR("NVRAM config error. "
7977 "Invalid link_config 0x%x"
7978 " speed_cap_mask 0x%x\n",
34f80b04 7979 bp->port.link_config,
c18487ee 7980 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7981 return;
7982 }
7983 break;
7984
7985 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7986 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7987 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7988 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7989 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7990 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7991 ADVERTISED_FIBRE);
a2fbb9ea
ET
7992 } else {
7993 BNX2X_ERR("NVRAM config error. "
7994 "Invalid link_config 0x%x"
7995 " speed_cap_mask 0x%x\n",
34f80b04 7996 bp->port.link_config,
c18487ee 7997 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7998 return;
7999 }
8000 break;
8001
8002 default:
8003 BNX2X_ERR("NVRAM config error. "
8004 "BAD link speed link_config 0x%x\n",
34f80b04 8005 bp->port.link_config);
c18487ee 8006 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8007 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8008 break;
8009 }
a2fbb9ea 8010
34f80b04
EG
8011 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8012 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8013 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8014 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8015 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8016
c18487ee 8017 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8018 " advertising 0x%x\n",
c18487ee
YR
8019 bp->link_params.req_line_speed,
8020 bp->link_params.req_duplex,
34f80b04 8021 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8022}
8023
34f80b04 8024static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8025{
34f80b04
EG
8026 int port = BP_PORT(bp);
8027 u32 val, val2;
589abe3a 8028 u32 config;
c2c8b03e 8029 u16 i;
a2fbb9ea 8030
c18487ee 8031 bp->link_params.bp = bp;
34f80b04 8032 bp->link_params.port = port;
c18487ee 8033
c18487ee 8034 bp->link_params.lane_config =
a2fbb9ea 8035 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8036 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8037 SHMEM_RD(bp,
8038 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8039 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8040 SHMEM_RD(bp,
8041 dev_info.port_hw_config[port].speed_capability_mask);
8042
34f80b04 8043 bp->port.link_config =
a2fbb9ea
ET
8044 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8045
c2c8b03e
EG
8046 /* Get the 4 lanes xgxs config rx and tx */
8047 for (i = 0; i < 2; i++) {
8048 val = SHMEM_RD(bp,
8049 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8050 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8051 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8052
8053 val = SHMEM_RD(bp,
8054 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8055 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8056 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8057 }
8058
589abe3a
EG
8059 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8060 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8061 bp->link_params.feature_config_flags |=
8062 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8063 else
8064 bp->link_params.feature_config_flags &=
8065 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8066
3ce2c3f9
EG
8067 /* If the device is capable of WoL, set the default state according
8068 * to the HW
8069 */
8070 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8071 (config & PORT_FEATURE_WOL_ENABLED));
8072
c2c8b03e
EG
8073 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8074 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8075 bp->link_params.lane_config,
8076 bp->link_params.ext_phy_config,
34f80b04 8077 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8078
34f80b04 8079 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8080 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8081 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8082
8083 bnx2x_link_settings_requested(bp);
8084
8085 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8086 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8087 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8088 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8089 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8090 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8091 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8092 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8093 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8094 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8095}
8096
8097static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8098{
8099 int func = BP_FUNC(bp);
8100 u32 val, val2;
8101 int rc = 0;
a2fbb9ea 8102
34f80b04 8103 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8104
34f80b04
EG
8105 bp->e1hov = 0;
8106 bp->e1hmf = 0;
8107 if (CHIP_IS_E1H(bp)) {
8108 bp->mf_config =
8109 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8110
3196a88a
EG
8111 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8112 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8113 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8114
34f80b04
EG
8115 bp->e1hov = val;
8116 bp->e1hmf = 1;
8117 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8118 "(0x%04x)\n",
8119 func, bp->e1hov, bp->e1hov);
8120 } else {
f5372251 8121 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8122 if (BP_E1HVN(bp)) {
8123 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8124 " aborting\n", func);
8125 rc = -EPERM;
8126 }
8127 }
8128 }
a2fbb9ea 8129
34f80b04
EG
8130 if (!BP_NOMCP(bp)) {
8131 bnx2x_get_port_hwinfo(bp);
8132
8133 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8134 DRV_MSG_SEQ_NUMBER_MASK);
8135 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8136 }
8137
8138 if (IS_E1HMF(bp)) {
8139 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8140 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8141 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8142 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8143 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8144 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8145 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8146 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8147 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8148 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8149 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8150 ETH_ALEN);
8151 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8152 ETH_ALEN);
a2fbb9ea 8153 }
34f80b04
EG
8154
8155 return rc;
a2fbb9ea
ET
8156 }
8157
34f80b04
EG
8158 if (BP_NOMCP(bp)) {
8159 /* only supposed to happen on emulation/FPGA */
33471629 8160 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8161 random_ether_addr(bp->dev->dev_addr);
8162 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8163 }
a2fbb9ea 8164
34f80b04
EG
8165 return rc;
8166}
8167
8168static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8169{
8170 int func = BP_FUNC(bp);
87942b46 8171 int timer_interval;
34f80b04
EG
8172 int rc;
8173
da5a662a
VZ
8174 /* Disable interrupt handling until HW is initialized */
8175 atomic_set(&bp->intr_sem, 1);
8176
34f80b04 8177 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8178
1cf167f2 8179 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8180 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8181
8182 rc = bnx2x_get_hwinfo(bp);
8183
8184 /* need to reset chip if undi was active */
8185 if (!BP_NOMCP(bp))
8186 bnx2x_undi_unload(bp);
8187
8188 if (CHIP_REV_IS_FPGA(bp))
8189 printk(KERN_ERR PFX "FPGA detected\n");
8190
8191 if (BP_NOMCP(bp) && (func == 0))
8192 printk(KERN_ERR PFX
8193 "MCP disabled, must load devices in order!\n");
8194
555f6c78 8195 /* Set multi queue mode */
8badd27a
EG
8196 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8197 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8198 printk(KERN_ERR PFX
8badd27a 8199 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8200 multi_mode = ETH_RSS_MODE_DISABLED;
8201 }
8202 bp->multi_mode = multi_mode;
8203
8204
7a9b2557
VZ
8205 /* Set TPA flags */
8206 if (disable_tpa) {
8207 bp->flags &= ~TPA_ENABLE_FLAG;
8208 bp->dev->features &= ~NETIF_F_LRO;
8209 } else {
8210 bp->flags |= TPA_ENABLE_FLAG;
8211 bp->dev->features |= NETIF_F_LRO;
8212 }
8213
8d5726c4 8214 bp->mrrs = mrrs;
7a9b2557 8215
34f80b04
EG
8216 bp->tx_ring_size = MAX_TX_AVAIL;
8217 bp->rx_ring_size = MAX_RX_AVAIL;
8218
8219 bp->rx_csum = 1;
34f80b04
EG
8220
8221 bp->tx_ticks = 50;
8222 bp->rx_ticks = 25;
8223
87942b46
EG
8224 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8225 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8226
8227 init_timer(&bp->timer);
8228 bp->timer.expires = jiffies + bp->current_interval;
8229 bp->timer.data = (unsigned long) bp;
8230 bp->timer.function = bnx2x_timer;
8231
8232 return rc;
a2fbb9ea
ET
8233}
8234
8235/*
8236 * ethtool service functions
8237 */
8238
8239/* All ethtool functions called with rtnl_lock */
8240
8241static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8242{
8243 struct bnx2x *bp = netdev_priv(dev);
8244
34f80b04
EG
8245 cmd->supported = bp->port.supported;
8246 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8247
8248 if (netif_carrier_ok(dev)) {
c18487ee
YR
8249 cmd->speed = bp->link_vars.line_speed;
8250 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8251 } else {
c18487ee
YR
8252 cmd->speed = bp->link_params.req_line_speed;
8253 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8254 }
34f80b04
EG
8255 if (IS_E1HMF(bp)) {
8256 u16 vn_max_rate;
8257
8258 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8259 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8260 if (vn_max_rate < cmd->speed)
8261 cmd->speed = vn_max_rate;
8262 }
a2fbb9ea 8263
c18487ee
YR
8264 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8265 u32 ext_phy_type =
8266 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8267
8268 switch (ext_phy_type) {
8269 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8270 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8272 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8273 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8275 cmd->port = PORT_FIBRE;
8276 break;
8277
8278 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8280 cmd->port = PORT_TP;
8281 break;
8282
c18487ee
YR
8283 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8284 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8285 bp->link_params.ext_phy_config);
8286 break;
8287
f1410647
ET
8288 default:
8289 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8290 bp->link_params.ext_phy_config);
8291 break;
f1410647
ET
8292 }
8293 } else
a2fbb9ea 8294 cmd->port = PORT_TP;
a2fbb9ea 8295
34f80b04 8296 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8297 cmd->transceiver = XCVR_INTERNAL;
8298
c18487ee 8299 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8300 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8301 else
a2fbb9ea 8302 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8303
8304 cmd->maxtxpkt = 0;
8305 cmd->maxrxpkt = 0;
8306
8307 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8308 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8309 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8310 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8311 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8312 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8313 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8314
8315 return 0;
8316}
8317
8318static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8319{
8320 struct bnx2x *bp = netdev_priv(dev);
8321 u32 advertising;
8322
34f80b04
EG
8323 if (IS_E1HMF(bp))
8324 return 0;
8325
a2fbb9ea
ET
8326 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8327 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8328 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8329 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8330 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8331 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8332 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8333
a2fbb9ea 8334 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8335 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8336 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8337 return -EINVAL;
f1410647 8338 }
a2fbb9ea
ET
8339
8340 /* advertise the requested speed and duplex if supported */
34f80b04 8341 cmd->advertising &= bp->port.supported;
a2fbb9ea 8342
c18487ee
YR
8343 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8344 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8345 bp->port.advertising |= (ADVERTISED_Autoneg |
8346 cmd->advertising);
a2fbb9ea
ET
8347
8348 } else { /* forced speed */
8349 /* advertise the requested speed and duplex if supported */
8350 switch (cmd->speed) {
8351 case SPEED_10:
8352 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8353 if (!(bp->port.supported &
f1410647
ET
8354 SUPPORTED_10baseT_Full)) {
8355 DP(NETIF_MSG_LINK,
8356 "10M full not supported\n");
a2fbb9ea 8357 return -EINVAL;
f1410647 8358 }
a2fbb9ea
ET
8359
8360 advertising = (ADVERTISED_10baseT_Full |
8361 ADVERTISED_TP);
8362 } else {
34f80b04 8363 if (!(bp->port.supported &
f1410647
ET
8364 SUPPORTED_10baseT_Half)) {
8365 DP(NETIF_MSG_LINK,
8366 "10M half not supported\n");
a2fbb9ea 8367 return -EINVAL;
f1410647 8368 }
a2fbb9ea
ET
8369
8370 advertising = (ADVERTISED_10baseT_Half |
8371 ADVERTISED_TP);
8372 }
8373 break;
8374
8375 case SPEED_100:
8376 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8377 if (!(bp->port.supported &
f1410647
ET
8378 SUPPORTED_100baseT_Full)) {
8379 DP(NETIF_MSG_LINK,
8380 "100M full not supported\n");
a2fbb9ea 8381 return -EINVAL;
f1410647 8382 }
a2fbb9ea
ET
8383
8384 advertising = (ADVERTISED_100baseT_Full |
8385 ADVERTISED_TP);
8386 } else {
34f80b04 8387 if (!(bp->port.supported &
f1410647
ET
8388 SUPPORTED_100baseT_Half)) {
8389 DP(NETIF_MSG_LINK,
8390 "100M half not supported\n");
a2fbb9ea 8391 return -EINVAL;
f1410647 8392 }
a2fbb9ea
ET
8393
8394 advertising = (ADVERTISED_100baseT_Half |
8395 ADVERTISED_TP);
8396 }
8397 break;
8398
8399 case SPEED_1000:
f1410647
ET
8400 if (cmd->duplex != DUPLEX_FULL) {
8401 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8402 return -EINVAL;
f1410647 8403 }
a2fbb9ea 8404
34f80b04 8405 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8406 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8407 return -EINVAL;
f1410647 8408 }
a2fbb9ea
ET
8409
8410 advertising = (ADVERTISED_1000baseT_Full |
8411 ADVERTISED_TP);
8412 break;
8413
8414 case SPEED_2500:
f1410647
ET
8415 if (cmd->duplex != DUPLEX_FULL) {
8416 DP(NETIF_MSG_LINK,
8417 "2.5G half not supported\n");
a2fbb9ea 8418 return -EINVAL;
f1410647 8419 }
a2fbb9ea 8420
34f80b04 8421 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8422 DP(NETIF_MSG_LINK,
8423 "2.5G full not supported\n");
a2fbb9ea 8424 return -EINVAL;
f1410647 8425 }
a2fbb9ea 8426
f1410647 8427 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8428 ADVERTISED_TP);
8429 break;
8430
8431 case SPEED_10000:
f1410647
ET
8432 if (cmd->duplex != DUPLEX_FULL) {
8433 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8434 return -EINVAL;
f1410647 8435 }
a2fbb9ea 8436
34f80b04 8437 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8438 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8439 return -EINVAL;
f1410647 8440 }
a2fbb9ea
ET
8441
8442 advertising = (ADVERTISED_10000baseT_Full |
8443 ADVERTISED_FIBRE);
8444 break;
8445
8446 default:
f1410647 8447 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8448 return -EINVAL;
8449 }
8450
c18487ee
YR
8451 bp->link_params.req_line_speed = cmd->speed;
8452 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8453 bp->port.advertising = advertising;
a2fbb9ea
ET
8454 }
8455
c18487ee 8456 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8457 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8458 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8459 bp->port.advertising);
a2fbb9ea 8460
34f80b04 8461 if (netif_running(dev)) {
bb2a0f7a 8462 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8463 bnx2x_link_set(bp);
8464 }
a2fbb9ea
ET
8465
8466 return 0;
8467}
8468
c18487ee
YR
8469#define PHY_FW_VER_LEN 10
8470
a2fbb9ea
ET
8471static void bnx2x_get_drvinfo(struct net_device *dev,
8472 struct ethtool_drvinfo *info)
8473{
8474 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8475 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8476
8477 strcpy(info->driver, DRV_MODULE_NAME);
8478 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8479
8480 phy_fw_ver[0] = '\0';
34f80b04 8481 if (bp->port.pmf) {
4a37fb66 8482 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8483 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8484 (bp->state != BNX2X_STATE_CLOSED),
8485 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8486 bnx2x_release_phy_lock(bp);
34f80b04 8487 }
c18487ee 8488
f0e53a84
EG
8489 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8490 (bp->common.bc_ver & 0xff0000) >> 16,
8491 (bp->common.bc_ver & 0xff00) >> 8,
8492 (bp->common.bc_ver & 0xff),
8493 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8494 strcpy(info->bus_info, pci_name(bp->pdev));
8495 info->n_stats = BNX2X_NUM_STATS;
8496 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8497 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8498 info->regdump_len = 0;
8499}
8500
8501static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8502{
8503 struct bnx2x *bp = netdev_priv(dev);
8504
8505 if (bp->flags & NO_WOL_FLAG) {
8506 wol->supported = 0;
8507 wol->wolopts = 0;
8508 } else {
8509 wol->supported = WAKE_MAGIC;
8510 if (bp->wol)
8511 wol->wolopts = WAKE_MAGIC;
8512 else
8513 wol->wolopts = 0;
8514 }
8515 memset(&wol->sopass, 0, sizeof(wol->sopass));
8516}
8517
8518static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8519{
8520 struct bnx2x *bp = netdev_priv(dev);
8521
8522 if (wol->wolopts & ~WAKE_MAGIC)
8523 return -EINVAL;
8524
8525 if (wol->wolopts & WAKE_MAGIC) {
8526 if (bp->flags & NO_WOL_FLAG)
8527 return -EINVAL;
8528
8529 bp->wol = 1;
34f80b04 8530 } else
a2fbb9ea 8531 bp->wol = 0;
34f80b04 8532
a2fbb9ea
ET
8533 return 0;
8534}
8535
8536static u32 bnx2x_get_msglevel(struct net_device *dev)
8537{
8538 struct bnx2x *bp = netdev_priv(dev);
8539
8540 return bp->msglevel;
8541}
8542
8543static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8544{
8545 struct bnx2x *bp = netdev_priv(dev);
8546
8547 if (capable(CAP_NET_ADMIN))
8548 bp->msglevel = level;
8549}
8550
8551static int bnx2x_nway_reset(struct net_device *dev)
8552{
8553 struct bnx2x *bp = netdev_priv(dev);
8554
34f80b04
EG
8555 if (!bp->port.pmf)
8556 return 0;
a2fbb9ea 8557
34f80b04 8558 if (netif_running(dev)) {
bb2a0f7a 8559 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8560 bnx2x_link_set(bp);
8561 }
a2fbb9ea
ET
8562
8563 return 0;
8564}
8565
8566static int bnx2x_get_eeprom_len(struct net_device *dev)
8567{
8568 struct bnx2x *bp = netdev_priv(dev);
8569
34f80b04 8570 return bp->common.flash_size;
a2fbb9ea
ET
8571}
8572
8573static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8574{
34f80b04 8575 int port = BP_PORT(bp);
a2fbb9ea
ET
8576 int count, i;
8577 u32 val = 0;
8578
8579 /* adjust timeout for emulation/FPGA */
8580 count = NVRAM_TIMEOUT_COUNT;
8581 if (CHIP_REV_IS_SLOW(bp))
8582 count *= 100;
8583
8584 /* request access to nvram interface */
8585 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8586 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8587
8588 for (i = 0; i < count*10; i++) {
8589 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8590 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8591 break;
8592
8593 udelay(5);
8594 }
8595
8596 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8597 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8598 return -EBUSY;
8599 }
8600
8601 return 0;
8602}
8603
8604static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8605{
34f80b04 8606 int port = BP_PORT(bp);
a2fbb9ea
ET
8607 int count, i;
8608 u32 val = 0;
8609
8610 /* adjust timeout for emulation/FPGA */
8611 count = NVRAM_TIMEOUT_COUNT;
8612 if (CHIP_REV_IS_SLOW(bp))
8613 count *= 100;
8614
8615 /* relinquish nvram interface */
8616 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8617 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8618
8619 for (i = 0; i < count*10; i++) {
8620 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8621 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8622 break;
8623
8624 udelay(5);
8625 }
8626
8627 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8628 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8629 return -EBUSY;
8630 }
8631
8632 return 0;
8633}
8634
8635static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8636{
8637 u32 val;
8638
8639 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8640
8641 /* enable both bits, even on read */
8642 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8643 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8644 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8645}
8646
8647static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8648{
8649 u32 val;
8650
8651 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8652
8653 /* disable both bits, even after read */
8654 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8655 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8656 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8657}
8658
4781bfad 8659static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8660 u32 cmd_flags)
8661{
f1410647 8662 int count, i, rc;
a2fbb9ea
ET
8663 u32 val;
8664
8665 /* build the command word */
8666 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8667
8668 /* need to clear DONE bit separately */
8669 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8670
8671 /* address of the NVRAM to read from */
8672 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8673 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8674
8675 /* issue a read command */
8676 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8677
8678 /* adjust timeout for emulation/FPGA */
8679 count = NVRAM_TIMEOUT_COUNT;
8680 if (CHIP_REV_IS_SLOW(bp))
8681 count *= 100;
8682
8683 /* wait for completion */
8684 *ret_val = 0;
8685 rc = -EBUSY;
8686 for (i = 0; i < count; i++) {
8687 udelay(5);
8688 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8689
8690 if (val & MCPR_NVM_COMMAND_DONE) {
8691 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8692 /* we read nvram data in cpu order
8693 * but ethtool sees it as an array of bytes
8694 * converting to big-endian will do the work */
4781bfad 8695 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8696 rc = 0;
8697 break;
8698 }
8699 }
8700
8701 return rc;
8702}
8703
8704static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8705 int buf_size)
8706{
8707 int rc;
8708 u32 cmd_flags;
4781bfad 8709 __be32 val;
a2fbb9ea
ET
8710
8711 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8712 DP(BNX2X_MSG_NVM,
c14423fe 8713 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8714 offset, buf_size);
8715 return -EINVAL;
8716 }
8717
34f80b04
EG
8718 if (offset + buf_size > bp->common.flash_size) {
8719 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8720 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8721 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8722 return -EINVAL;
8723 }
8724
8725 /* request access to nvram interface */
8726 rc = bnx2x_acquire_nvram_lock(bp);
8727 if (rc)
8728 return rc;
8729
8730 /* enable access to nvram interface */
8731 bnx2x_enable_nvram_access(bp);
8732
8733 /* read the first word(s) */
8734 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8735 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8736 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8737 memcpy(ret_buf, &val, 4);
8738
8739 /* advance to the next dword */
8740 offset += sizeof(u32);
8741 ret_buf += sizeof(u32);
8742 buf_size -= sizeof(u32);
8743 cmd_flags = 0;
8744 }
8745
8746 if (rc == 0) {
8747 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8748 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8749 memcpy(ret_buf, &val, 4);
8750 }
8751
8752 /* disable access to nvram interface */
8753 bnx2x_disable_nvram_access(bp);
8754 bnx2x_release_nvram_lock(bp);
8755
8756 return rc;
8757}
8758
8759static int bnx2x_get_eeprom(struct net_device *dev,
8760 struct ethtool_eeprom *eeprom, u8 *eebuf)
8761{
8762 struct bnx2x *bp = netdev_priv(dev);
8763 int rc;
8764
2add3acb
EG
8765 if (!netif_running(dev))
8766 return -EAGAIN;
8767
34f80b04 8768 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8769 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8770 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8771 eeprom->len, eeprom->len);
8772
8773 /* parameters already validated in ethtool_get_eeprom */
8774
8775 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8776
8777 return rc;
8778}
8779
8780static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8781 u32 cmd_flags)
8782{
f1410647 8783 int count, i, rc;
a2fbb9ea
ET
8784
8785 /* build the command word */
8786 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8787
8788 /* need to clear DONE bit separately */
8789 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8790
8791 /* write the data */
8792 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8793
8794 /* address of the NVRAM to write to */
8795 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8796 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8797
8798 /* issue the write command */
8799 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8800
8801 /* adjust timeout for emulation/FPGA */
8802 count = NVRAM_TIMEOUT_COUNT;
8803 if (CHIP_REV_IS_SLOW(bp))
8804 count *= 100;
8805
8806 /* wait for completion */
8807 rc = -EBUSY;
8808 for (i = 0; i < count; i++) {
8809 udelay(5);
8810 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8811 if (val & MCPR_NVM_COMMAND_DONE) {
8812 rc = 0;
8813 break;
8814 }
8815 }
8816
8817 return rc;
8818}
8819
f1410647 8820#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8821
8822static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8823 int buf_size)
8824{
8825 int rc;
8826 u32 cmd_flags;
8827 u32 align_offset;
4781bfad 8828 __be32 val;
a2fbb9ea 8829
34f80b04
EG
8830 if (offset + buf_size > bp->common.flash_size) {
8831 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8832 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8833 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8834 return -EINVAL;
8835 }
8836
8837 /* request access to nvram interface */
8838 rc = bnx2x_acquire_nvram_lock(bp);
8839 if (rc)
8840 return rc;
8841
8842 /* enable access to nvram interface */
8843 bnx2x_enable_nvram_access(bp);
8844
8845 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8846 align_offset = (offset & ~0x03);
8847 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8848
8849 if (rc == 0) {
8850 val &= ~(0xff << BYTE_OFFSET(offset));
8851 val |= (*data_buf << BYTE_OFFSET(offset));
8852
8853 /* nvram data is returned as an array of bytes
8854 * convert it back to cpu order */
8855 val = be32_to_cpu(val);
8856
a2fbb9ea
ET
8857 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8858 cmd_flags);
8859 }
8860
8861 /* disable access to nvram interface */
8862 bnx2x_disable_nvram_access(bp);
8863 bnx2x_release_nvram_lock(bp);
8864
8865 return rc;
8866}
8867
8868static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8869 int buf_size)
8870{
8871 int rc;
8872 u32 cmd_flags;
8873 u32 val;
8874 u32 written_so_far;
8875
34f80b04 8876 if (buf_size == 1) /* ethtool */
a2fbb9ea 8877 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8878
8879 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8880 DP(BNX2X_MSG_NVM,
c14423fe 8881 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8882 offset, buf_size);
8883 return -EINVAL;
8884 }
8885
34f80b04
EG
8886 if (offset + buf_size > bp->common.flash_size) {
8887 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8888 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8889 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8890 return -EINVAL;
8891 }
8892
8893 /* request access to nvram interface */
8894 rc = bnx2x_acquire_nvram_lock(bp);
8895 if (rc)
8896 return rc;
8897
8898 /* enable access to nvram interface */
8899 bnx2x_enable_nvram_access(bp);
8900
8901 written_so_far = 0;
8902 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8903 while ((written_so_far < buf_size) && (rc == 0)) {
8904 if (written_so_far == (buf_size - sizeof(u32)))
8905 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8906 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8907 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8908 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8909 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8910
8911 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8912
8913 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8914
8915 /* advance to the next dword */
8916 offset += sizeof(u32);
8917 data_buf += sizeof(u32);
8918 written_so_far += sizeof(u32);
8919 cmd_flags = 0;
8920 }
8921
8922 /* disable access to nvram interface */
8923 bnx2x_disable_nvram_access(bp);
8924 bnx2x_release_nvram_lock(bp);
8925
8926 return rc;
8927}
8928
8929static int bnx2x_set_eeprom(struct net_device *dev,
8930 struct ethtool_eeprom *eeprom, u8 *eebuf)
8931{
8932 struct bnx2x *bp = netdev_priv(dev);
8933 int rc;
8934
9f4c9583
EG
8935 if (!netif_running(dev))
8936 return -EAGAIN;
8937
34f80b04 8938 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8939 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8940 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8941 eeprom->len, eeprom->len);
8942
8943 /* parameters already validated in ethtool_set_eeprom */
8944
c18487ee 8945 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8946 if (eeprom->magic == 0x00504859)
8947 if (bp->port.pmf) {
8948
4a37fb66 8949 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8950 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8951 bp->link_params.ext_phy_config,
8952 (bp->state != BNX2X_STATE_CLOSED),
8953 eebuf, eeprom->len);
bb2a0f7a
YG
8954 if ((bp->state == BNX2X_STATE_OPEN) ||
8955 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8956 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8957 &bp->link_vars, 1);
34f80b04
EG
8958 rc |= bnx2x_phy_init(&bp->link_params,
8959 &bp->link_vars);
bb2a0f7a 8960 }
4a37fb66 8961 bnx2x_release_phy_lock(bp);
34f80b04
EG
8962
8963 } else /* Only the PMF can access the PHY */
8964 return -EINVAL;
8965 else
c18487ee 8966 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8967
8968 return rc;
8969}
8970
8971static int bnx2x_get_coalesce(struct net_device *dev,
8972 struct ethtool_coalesce *coal)
8973{
8974 struct bnx2x *bp = netdev_priv(dev);
8975
8976 memset(coal, 0, sizeof(struct ethtool_coalesce));
8977
8978 coal->rx_coalesce_usecs = bp->rx_ticks;
8979 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8980
8981 return 0;
8982}
8983
8984static int bnx2x_set_coalesce(struct net_device *dev,
8985 struct ethtool_coalesce *coal)
8986{
8987 struct bnx2x *bp = netdev_priv(dev);
8988
8989 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8990 if (bp->rx_ticks > 3000)
8991 bp->rx_ticks = 3000;
8992
8993 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8994 if (bp->tx_ticks > 0x3000)
8995 bp->tx_ticks = 0x3000;
8996
34f80b04 8997 if (netif_running(dev))
a2fbb9ea
ET
8998 bnx2x_update_coalesce(bp);
8999
9000 return 0;
9001}
9002
9003static void bnx2x_get_ringparam(struct net_device *dev,
9004 struct ethtool_ringparam *ering)
9005{
9006 struct bnx2x *bp = netdev_priv(dev);
9007
9008 ering->rx_max_pending = MAX_RX_AVAIL;
9009 ering->rx_mini_max_pending = 0;
9010 ering->rx_jumbo_max_pending = 0;
9011
9012 ering->rx_pending = bp->rx_ring_size;
9013 ering->rx_mini_pending = 0;
9014 ering->rx_jumbo_pending = 0;
9015
9016 ering->tx_max_pending = MAX_TX_AVAIL;
9017 ering->tx_pending = bp->tx_ring_size;
9018}
9019
9020static int bnx2x_set_ringparam(struct net_device *dev,
9021 struct ethtool_ringparam *ering)
9022{
9023 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9024 int rc = 0;
a2fbb9ea
ET
9025
9026 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9027 (ering->tx_pending > MAX_TX_AVAIL) ||
9028 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9029 return -EINVAL;
9030
9031 bp->rx_ring_size = ering->rx_pending;
9032 bp->tx_ring_size = ering->tx_pending;
9033
34f80b04
EG
9034 if (netif_running(dev)) {
9035 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9036 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9037 }
9038
34f80b04 9039 return rc;
a2fbb9ea
ET
9040}
9041
9042static void bnx2x_get_pauseparam(struct net_device *dev,
9043 struct ethtool_pauseparam *epause)
9044{
9045 struct bnx2x *bp = netdev_priv(dev);
9046
356e2385
EG
9047 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9048 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9049 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9050
c0700f90
DM
9051 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9052 BNX2X_FLOW_CTRL_RX);
9053 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9054 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9055
9056 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9057 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9058 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9059}
9060
9061static int bnx2x_set_pauseparam(struct net_device *dev,
9062 struct ethtool_pauseparam *epause)
9063{
9064 struct bnx2x *bp = netdev_priv(dev);
9065
34f80b04
EG
9066 if (IS_E1HMF(bp))
9067 return 0;
9068
a2fbb9ea
ET
9069 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9070 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9071 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9072
c0700f90 9073 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9074
f1410647 9075 if (epause->rx_pause)
c0700f90 9076 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9077
f1410647 9078 if (epause->tx_pause)
c0700f90 9079 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9080
c0700f90
DM
9081 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9082 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9083
c18487ee 9084 if (epause->autoneg) {
34f80b04 9085 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9086 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9087 return -EINVAL;
9088 }
a2fbb9ea 9089
c18487ee 9090 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9091 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9092 }
a2fbb9ea 9093
c18487ee
YR
9094 DP(NETIF_MSG_LINK,
9095 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9096
9097 if (netif_running(dev)) {
bb2a0f7a 9098 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9099 bnx2x_link_set(bp);
9100 }
a2fbb9ea
ET
9101
9102 return 0;
9103}
9104
df0f2343
VZ
9105static int bnx2x_set_flags(struct net_device *dev, u32 data)
9106{
9107 struct bnx2x *bp = netdev_priv(dev);
9108 int changed = 0;
9109 int rc = 0;
9110
9111 /* TPA requires Rx CSUM offloading */
9112 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9113 if (!(dev->features & NETIF_F_LRO)) {
9114 dev->features |= NETIF_F_LRO;
9115 bp->flags |= TPA_ENABLE_FLAG;
9116 changed = 1;
9117 }
9118
9119 } else if (dev->features & NETIF_F_LRO) {
9120 dev->features &= ~NETIF_F_LRO;
9121 bp->flags &= ~TPA_ENABLE_FLAG;
9122 changed = 1;
9123 }
9124
9125 if (changed && netif_running(dev)) {
9126 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9127 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9128 }
9129
9130 return rc;
9131}
9132
a2fbb9ea
ET
9133static u32 bnx2x_get_rx_csum(struct net_device *dev)
9134{
9135 struct bnx2x *bp = netdev_priv(dev);
9136
9137 return bp->rx_csum;
9138}
9139
9140static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9141{
9142 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9143 int rc = 0;
a2fbb9ea
ET
9144
9145 bp->rx_csum = data;
df0f2343
VZ
9146
9147 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9148 TPA'ed packets will be discarded due to wrong TCP CSUM */
9149 if (!data) {
9150 u32 flags = ethtool_op_get_flags(dev);
9151
9152 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9153 }
9154
9155 return rc;
a2fbb9ea
ET
9156}
9157
9158static int bnx2x_set_tso(struct net_device *dev, u32 data)
9159{
755735eb 9160 if (data) {
a2fbb9ea 9161 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9162 dev->features |= NETIF_F_TSO6;
9163 } else {
a2fbb9ea 9164 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9165 dev->features &= ~NETIF_F_TSO6;
9166 }
9167
a2fbb9ea
ET
9168 return 0;
9169}
9170
f3c87cdd 9171static const struct {
a2fbb9ea
ET
9172 char string[ETH_GSTRING_LEN];
9173} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9174 { "register_test (offline)" },
9175 { "memory_test (offline)" },
9176 { "loopback_test (offline)" },
9177 { "nvram_test (online)" },
9178 { "interrupt_test (online)" },
9179 { "link_test (online)" },
d3d4f495 9180 { "idle check (online)" }
a2fbb9ea
ET
9181};
9182
9183static int bnx2x_self_test_count(struct net_device *dev)
9184{
9185 return BNX2X_NUM_TESTS;
9186}
9187
f3c87cdd
YG
9188static int bnx2x_test_registers(struct bnx2x *bp)
9189{
9190 int idx, i, rc = -ENODEV;
9191 u32 wr_val = 0;
9dabc424 9192 int port = BP_PORT(bp);
f3c87cdd
YG
9193 static const struct {
9194 u32 offset0;
9195 u32 offset1;
9196 u32 mask;
9197 } reg_tbl[] = {
9198/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9199 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9200 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9201 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9202 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9203 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9204 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9205 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9206 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9207 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9208/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9209 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9210 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9211 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9212 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9213 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9214 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9215 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9216 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9217 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9218/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9219 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9220 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9221 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9222 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9223 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9224 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9225 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9226 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9227 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9228/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9229 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9230 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9231 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9232 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9233 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9234 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9235 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9236
9237 { 0xffffffff, 0, 0x00000000 }
9238 };
9239
9240 if (!netif_running(bp->dev))
9241 return rc;
9242
9243 /* Repeat the test twice:
9244 First by writing 0x00000000, second by writing 0xffffffff */
9245 for (idx = 0; idx < 2; idx++) {
9246
9247 switch (idx) {
9248 case 0:
9249 wr_val = 0;
9250 break;
9251 case 1:
9252 wr_val = 0xffffffff;
9253 break;
9254 }
9255
9256 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9257 u32 offset, mask, save_val, val;
f3c87cdd
YG
9258
9259 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9260 mask = reg_tbl[i].mask;
9261
9262 save_val = REG_RD(bp, offset);
9263
9264 REG_WR(bp, offset, wr_val);
9265 val = REG_RD(bp, offset);
9266
9267 /* Restore the original register's value */
9268 REG_WR(bp, offset, save_val);
9269
9270 /* verify that value is as expected value */
9271 if ((val & mask) != (wr_val & mask))
9272 goto test_reg_exit;
9273 }
9274 }
9275
9276 rc = 0;
9277
9278test_reg_exit:
9279 return rc;
9280}
9281
9282static int bnx2x_test_memory(struct bnx2x *bp)
9283{
9284 int i, j, rc = -ENODEV;
9285 u32 val;
9286 static const struct {
9287 u32 offset;
9288 int size;
9289 } mem_tbl[] = {
9290 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9291 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9292 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9293 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9294 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9295 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9296 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9297
9298 { 0xffffffff, 0 }
9299 };
9300 static const struct {
9301 char *name;
9302 u32 offset;
9dabc424
YG
9303 u32 e1_mask;
9304 u32 e1h_mask;
f3c87cdd 9305 } prty_tbl[] = {
9dabc424
YG
9306 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9307 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9308 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9309 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9310 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9311 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9312
9313 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9314 };
9315
9316 if (!netif_running(bp->dev))
9317 return rc;
9318
9319 /* Go through all the memories */
9320 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9321 for (j = 0; j < mem_tbl[i].size; j++)
9322 REG_RD(bp, mem_tbl[i].offset + j*4);
9323
9324 /* Check the parity status */
9325 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9326 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9327 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9328 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9329 DP(NETIF_MSG_HW,
9330 "%s is 0x%x\n", prty_tbl[i].name, val);
9331 goto test_mem_exit;
9332 }
9333 }
9334
9335 rc = 0;
9336
9337test_mem_exit:
9338 return rc;
9339}
9340
f3c87cdd
YG
9341static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9342{
9343 int cnt = 1000;
9344
9345 if (link_up)
9346 while (bnx2x_link_test(bp) && cnt--)
9347 msleep(10);
9348}
9349
9350static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9351{
9352 unsigned int pkt_size, num_pkts, i;
9353 struct sk_buff *skb;
9354 unsigned char *packet;
9355 struct bnx2x_fastpath *fp = &bp->fp[0];
9356 u16 tx_start_idx, tx_idx;
9357 u16 rx_start_idx, rx_idx;
9358 u16 pkt_prod;
9359 struct sw_tx_bd *tx_buf;
9360 struct eth_tx_bd *tx_bd;
9361 dma_addr_t mapping;
9362 union eth_rx_cqe *cqe;
9363 u8 cqe_fp_flags;
9364 struct sw_rx_bd *rx_buf;
9365 u16 len;
9366 int rc = -ENODEV;
9367
b5bf9068
EG
9368 /* check the loopback mode */
9369 switch (loopback_mode) {
9370 case BNX2X_PHY_LOOPBACK:
9371 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9372 return -EINVAL;
9373 break;
9374 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9375 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9376 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9377 break;
9378 default:
f3c87cdd 9379 return -EINVAL;
b5bf9068 9380 }
f3c87cdd 9381
b5bf9068
EG
9382 /* prepare the loopback packet */
9383 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9384 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9385 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9386 if (!skb) {
9387 rc = -ENOMEM;
9388 goto test_loopback_exit;
9389 }
9390 packet = skb_put(skb, pkt_size);
9391 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9392 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9393 for (i = ETH_HLEN; i < pkt_size; i++)
9394 packet[i] = (unsigned char) (i & 0xff);
9395
b5bf9068 9396 /* send the loopback packet */
f3c87cdd
YG
9397 num_pkts = 0;
9398 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9399 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9400
9401 pkt_prod = fp->tx_pkt_prod++;
9402 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9403 tx_buf->first_bd = fp->tx_bd_prod;
9404 tx_buf->skb = skb;
9405
9406 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9407 mapping = pci_map_single(bp->pdev, skb->data,
9408 skb_headlen(skb), PCI_DMA_TODEVICE);
9409 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9410 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9411 tx_bd->nbd = cpu_to_le16(1);
9412 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9413 tx_bd->vlan = cpu_to_le16(pkt_prod);
9414 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9415 ETH_TX_BD_FLAGS_END_BD);
9416 tx_bd->general_data = ((UNICAST_ADDRESS <<
9417 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9418
58f4c4cf
EG
9419 wmb();
9420
4781bfad 9421 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9422 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9423 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9424 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9425
9426 mmiowb();
9427
9428 num_pkts++;
9429 fp->tx_bd_prod++;
9430 bp->dev->trans_start = jiffies;
9431
9432 udelay(100);
9433
9434 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9435 if (tx_idx != tx_start_idx + num_pkts)
9436 goto test_loopback_exit;
9437
9438 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9439 if (rx_idx != rx_start_idx + num_pkts)
9440 goto test_loopback_exit;
9441
9442 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9443 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9444 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9445 goto test_loopback_rx_exit;
9446
9447 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9448 if (len != pkt_size)
9449 goto test_loopback_rx_exit;
9450
9451 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9452 skb = rx_buf->skb;
9453 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9454 for (i = ETH_HLEN; i < pkt_size; i++)
9455 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9456 goto test_loopback_rx_exit;
9457
9458 rc = 0;
9459
9460test_loopback_rx_exit:
f3c87cdd
YG
9461
9462 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9463 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9464 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9465 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9466
9467 /* Update producers */
9468 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9469 fp->rx_sge_prod);
f3c87cdd
YG
9470
9471test_loopback_exit:
9472 bp->link_params.loopback_mode = LOOPBACK_NONE;
9473
9474 return rc;
9475}
9476
9477static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9478{
b5bf9068 9479 int rc = 0, res;
f3c87cdd
YG
9480
9481 if (!netif_running(bp->dev))
9482 return BNX2X_LOOPBACK_FAILED;
9483
f8ef6e44 9484 bnx2x_netif_stop(bp, 1);
3910c8ae 9485 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9486
b5bf9068
EG
9487 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9488 if (res) {
9489 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9490 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9491 }
9492
b5bf9068
EG
9493 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9494 if (res) {
9495 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9496 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9497 }
9498
3910c8ae 9499 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9500 bnx2x_netif_start(bp);
9501
9502 return rc;
9503}
9504
9505#define CRC32_RESIDUAL 0xdebb20e3
9506
9507static int bnx2x_test_nvram(struct bnx2x *bp)
9508{
9509 static const struct {
9510 int offset;
9511 int size;
9512 } nvram_tbl[] = {
9513 { 0, 0x14 }, /* bootstrap */
9514 { 0x14, 0xec }, /* dir */
9515 { 0x100, 0x350 }, /* manuf_info */
9516 { 0x450, 0xf0 }, /* feature_info */
9517 { 0x640, 0x64 }, /* upgrade_key_info */
9518 { 0x6a4, 0x64 },
9519 { 0x708, 0x70 }, /* manuf_key_info */
9520 { 0x778, 0x70 },
9521 { 0, 0 }
9522 };
4781bfad 9523 __be32 buf[0x350 / 4];
f3c87cdd
YG
9524 u8 *data = (u8 *)buf;
9525 int i, rc;
9526 u32 magic, csum;
9527
9528 rc = bnx2x_nvram_read(bp, 0, data, 4);
9529 if (rc) {
f5372251 9530 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9531 goto test_nvram_exit;
9532 }
9533
9534 magic = be32_to_cpu(buf[0]);
9535 if (magic != 0x669955aa) {
9536 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9537 rc = -ENODEV;
9538 goto test_nvram_exit;
9539 }
9540
9541 for (i = 0; nvram_tbl[i].size; i++) {
9542
9543 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9544 nvram_tbl[i].size);
9545 if (rc) {
9546 DP(NETIF_MSG_PROBE,
f5372251 9547 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9548 goto test_nvram_exit;
9549 }
9550
9551 csum = ether_crc_le(nvram_tbl[i].size, data);
9552 if (csum != CRC32_RESIDUAL) {
9553 DP(NETIF_MSG_PROBE,
9554 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9555 rc = -ENODEV;
9556 goto test_nvram_exit;
9557 }
9558 }
9559
9560test_nvram_exit:
9561 return rc;
9562}
9563
9564static int bnx2x_test_intr(struct bnx2x *bp)
9565{
9566 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9567 int i, rc;
9568
9569 if (!netif_running(bp->dev))
9570 return -ENODEV;
9571
8d9c5f34 9572 config->hdr.length = 0;
af246401
EG
9573 if (CHIP_IS_E1(bp))
9574 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9575 else
9576 config->hdr.offset = BP_FUNC(bp);
0626b899 9577 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9578 config->hdr.reserved1 = 0;
9579
9580 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9581 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9582 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9583 if (rc == 0) {
9584 bp->set_mac_pending++;
9585 for (i = 0; i < 10; i++) {
9586 if (!bp->set_mac_pending)
9587 break;
9588 msleep_interruptible(10);
9589 }
9590 if (i == 10)
9591 rc = -ENODEV;
9592 }
9593
9594 return rc;
9595}
9596
a2fbb9ea
ET
9597static void bnx2x_self_test(struct net_device *dev,
9598 struct ethtool_test *etest, u64 *buf)
9599{
9600 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9601
9602 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9603
f3c87cdd 9604 if (!netif_running(dev))
a2fbb9ea 9605 return;
a2fbb9ea 9606
33471629 9607 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9608 if (IS_E1HMF(bp))
9609 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9610
9611 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9612 u8 link_up;
9613
9614 link_up = bp->link_vars.link_up;
9615 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9616 bnx2x_nic_load(bp, LOAD_DIAG);
9617 /* wait until link state is restored */
9618 bnx2x_wait_for_link(bp, link_up);
9619
9620 if (bnx2x_test_registers(bp) != 0) {
9621 buf[0] = 1;
9622 etest->flags |= ETH_TEST_FL_FAILED;
9623 }
9624 if (bnx2x_test_memory(bp) != 0) {
9625 buf[1] = 1;
9626 etest->flags |= ETH_TEST_FL_FAILED;
9627 }
9628 buf[2] = bnx2x_test_loopback(bp, link_up);
9629 if (buf[2] != 0)
9630 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9631
f3c87cdd
YG
9632 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9633 bnx2x_nic_load(bp, LOAD_NORMAL);
9634 /* wait until link state is restored */
9635 bnx2x_wait_for_link(bp, link_up);
9636 }
9637 if (bnx2x_test_nvram(bp) != 0) {
9638 buf[3] = 1;
a2fbb9ea
ET
9639 etest->flags |= ETH_TEST_FL_FAILED;
9640 }
f3c87cdd
YG
9641 if (bnx2x_test_intr(bp) != 0) {
9642 buf[4] = 1;
9643 etest->flags |= ETH_TEST_FL_FAILED;
9644 }
9645 if (bp->port.pmf)
9646 if (bnx2x_link_test(bp) != 0) {
9647 buf[5] = 1;
9648 etest->flags |= ETH_TEST_FL_FAILED;
9649 }
f3c87cdd
YG
9650
9651#ifdef BNX2X_EXTRA_DEBUG
9652 bnx2x_panic_dump(bp);
9653#endif
a2fbb9ea
ET
9654}
9655
de832a55
EG
9656static const struct {
9657 long offset;
9658 int size;
9659 u8 string[ETH_GSTRING_LEN];
9660} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9661/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9662 { Q_STATS_OFFSET32(error_bytes_received_hi),
9663 8, "[%d]: rx_error_bytes" },
9664 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9665 8, "[%d]: rx_ucast_packets" },
9666 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9667 8, "[%d]: rx_mcast_packets" },
9668 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9669 8, "[%d]: rx_bcast_packets" },
9670 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9671 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9672 4, "[%d]: rx_phy_ip_err_discards"},
9673 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9674 4, "[%d]: rx_skb_alloc_discard" },
9675 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9676
9677/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9678 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9679 8, "[%d]: tx_packets" }
9680};
9681
bb2a0f7a
YG
9682static const struct {
9683 long offset;
9684 int size;
9685 u32 flags;
66e855f3
YG
9686#define STATS_FLAGS_PORT 1
9687#define STATS_FLAGS_FUNC 2
de832a55 9688#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9689 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9690} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9691/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9692 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9693 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9694 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9695 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9696 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9697 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9698 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9699 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9700 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9701 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9702 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9703 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9704 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9705 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9706 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9707 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9708 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9709/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9710 8, STATS_FLAGS_PORT, "rx_fragments" },
9711 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9712 8, STATS_FLAGS_PORT, "rx_jabbers" },
9713 { STATS_OFFSET32(no_buff_discard_hi),
9714 8, STATS_FLAGS_BOTH, "rx_discards" },
9715 { STATS_OFFSET32(mac_filter_discard),
9716 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9717 { STATS_OFFSET32(xxoverflow_discard),
9718 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9719 { STATS_OFFSET32(brb_drop_hi),
9720 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9721 { STATS_OFFSET32(brb_truncate_hi),
9722 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9723 { STATS_OFFSET32(pause_frames_received_hi),
9724 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9725 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9726 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9727 { STATS_OFFSET32(nig_timer_max),
9728 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9729/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9730 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9731 { STATS_OFFSET32(rx_skb_alloc_failed),
9732 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9733 { STATS_OFFSET32(hw_csum_err),
9734 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9735
9736 { STATS_OFFSET32(total_bytes_transmitted_hi),
9737 8, STATS_FLAGS_BOTH, "tx_bytes" },
9738 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9739 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9740 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9741 8, STATS_FLAGS_BOTH, "tx_packets" },
9742 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9743 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9744 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9745 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9746 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9747 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9748 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9749 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9750/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9751 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9752 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9753 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9754 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9755 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9756 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9757 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9758 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9759 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9760 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9761 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9762 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9763 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9764 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9765 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9766 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9767 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9768 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9769 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9770/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9771 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9772 { STATS_OFFSET32(pause_frames_sent_hi),
9773 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9774};
9775
de832a55
EG
9776#define IS_PORT_STAT(i) \
9777 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9778#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9779#define IS_E1HMF_MODE_STAT(bp) \
9780 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9781
a2fbb9ea
ET
9782static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9783{
bb2a0f7a 9784 struct bnx2x *bp = netdev_priv(dev);
de832a55 9785 int i, j, k;
bb2a0f7a 9786
a2fbb9ea
ET
9787 switch (stringset) {
9788 case ETH_SS_STATS:
de832a55
EG
9789 if (is_multi(bp)) {
9790 k = 0;
9791 for_each_queue(bp, i) {
9792 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9793 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9794 bnx2x_q_stats_arr[j].string, i);
9795 k += BNX2X_NUM_Q_STATS;
9796 }
9797 if (IS_E1HMF_MODE_STAT(bp))
9798 break;
9799 for (j = 0; j < BNX2X_NUM_STATS; j++)
9800 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9801 bnx2x_stats_arr[j].string);
9802 } else {
9803 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9804 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9805 continue;
9806 strcpy(buf + j*ETH_GSTRING_LEN,
9807 bnx2x_stats_arr[i].string);
9808 j++;
9809 }
bb2a0f7a 9810 }
a2fbb9ea
ET
9811 break;
9812
9813 case ETH_SS_TEST:
9814 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9815 break;
9816 }
9817}
9818
9819static int bnx2x_get_stats_count(struct net_device *dev)
9820{
bb2a0f7a 9821 struct bnx2x *bp = netdev_priv(dev);
de832a55 9822 int i, num_stats;
bb2a0f7a 9823
de832a55
EG
9824 if (is_multi(bp)) {
9825 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9826 if (!IS_E1HMF_MODE_STAT(bp))
9827 num_stats += BNX2X_NUM_STATS;
9828 } else {
9829 if (IS_E1HMF_MODE_STAT(bp)) {
9830 num_stats = 0;
9831 for (i = 0; i < BNX2X_NUM_STATS; i++)
9832 if (IS_FUNC_STAT(i))
9833 num_stats++;
9834 } else
9835 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9836 }
de832a55 9837
bb2a0f7a 9838 return num_stats;
a2fbb9ea
ET
9839}
9840
9841static void bnx2x_get_ethtool_stats(struct net_device *dev,
9842 struct ethtool_stats *stats, u64 *buf)
9843{
9844 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9845 u32 *hw_stats, *offset;
9846 int i, j, k;
bb2a0f7a 9847
de832a55
EG
9848 if (is_multi(bp)) {
9849 k = 0;
9850 for_each_queue(bp, i) {
9851 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9852 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9853 if (bnx2x_q_stats_arr[j].size == 0) {
9854 /* skip this counter */
9855 buf[k + j] = 0;
9856 continue;
9857 }
9858 offset = (hw_stats +
9859 bnx2x_q_stats_arr[j].offset);
9860 if (bnx2x_q_stats_arr[j].size == 4) {
9861 /* 4-byte counter */
9862 buf[k + j] = (u64) *offset;
9863 continue;
9864 }
9865 /* 8-byte counter */
9866 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9867 }
9868 k += BNX2X_NUM_Q_STATS;
9869 }
9870 if (IS_E1HMF_MODE_STAT(bp))
9871 return;
9872 hw_stats = (u32 *)&bp->eth_stats;
9873 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9874 if (bnx2x_stats_arr[j].size == 0) {
9875 /* skip this counter */
9876 buf[k + j] = 0;
9877 continue;
9878 }
9879 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9880 if (bnx2x_stats_arr[j].size == 4) {
9881 /* 4-byte counter */
9882 buf[k + j] = (u64) *offset;
9883 continue;
9884 }
9885 /* 8-byte counter */
9886 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9887 }
de832a55
EG
9888 } else {
9889 hw_stats = (u32 *)&bp->eth_stats;
9890 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9891 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9892 continue;
9893 if (bnx2x_stats_arr[i].size == 0) {
9894 /* skip this counter */
9895 buf[j] = 0;
9896 j++;
9897 continue;
9898 }
9899 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9900 if (bnx2x_stats_arr[i].size == 4) {
9901 /* 4-byte counter */
9902 buf[j] = (u64) *offset;
9903 j++;
9904 continue;
9905 }
9906 /* 8-byte counter */
9907 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9908 j++;
a2fbb9ea 9909 }
a2fbb9ea
ET
9910 }
9911}
9912
9913static int bnx2x_phys_id(struct net_device *dev, u32 data)
9914{
9915 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9916 int port = BP_PORT(bp);
a2fbb9ea
ET
9917 int i;
9918
34f80b04
EG
9919 if (!netif_running(dev))
9920 return 0;
9921
9922 if (!bp->port.pmf)
9923 return 0;
9924
a2fbb9ea
ET
9925 if (data == 0)
9926 data = 2;
9927
9928 for (i = 0; i < (data * 2); i++) {
c18487ee 9929 if ((i % 2) == 0)
34f80b04 9930 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9931 bp->link_params.hw_led_mode,
9932 bp->link_params.chip_id);
9933 else
34f80b04 9934 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9935 bp->link_params.hw_led_mode,
9936 bp->link_params.chip_id);
9937
a2fbb9ea
ET
9938 msleep_interruptible(500);
9939 if (signal_pending(current))
9940 break;
9941 }
9942
c18487ee 9943 if (bp->link_vars.link_up)
34f80b04 9944 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9945 bp->link_vars.line_speed,
9946 bp->link_params.hw_led_mode,
9947 bp->link_params.chip_id);
a2fbb9ea
ET
9948
9949 return 0;
9950}
9951
9952static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9953 .get_settings = bnx2x_get_settings,
9954 .set_settings = bnx2x_set_settings,
9955 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9956 .get_wol = bnx2x_get_wol,
9957 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9958 .get_msglevel = bnx2x_get_msglevel,
9959 .set_msglevel = bnx2x_set_msglevel,
9960 .nway_reset = bnx2x_nway_reset,
9961 .get_link = ethtool_op_get_link,
9962 .get_eeprom_len = bnx2x_get_eeprom_len,
9963 .get_eeprom = bnx2x_get_eeprom,
9964 .set_eeprom = bnx2x_set_eeprom,
9965 .get_coalesce = bnx2x_get_coalesce,
9966 .set_coalesce = bnx2x_set_coalesce,
9967 .get_ringparam = bnx2x_get_ringparam,
9968 .set_ringparam = bnx2x_set_ringparam,
9969 .get_pauseparam = bnx2x_get_pauseparam,
9970 .set_pauseparam = bnx2x_set_pauseparam,
9971 .get_rx_csum = bnx2x_get_rx_csum,
9972 .set_rx_csum = bnx2x_set_rx_csum,
9973 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9974 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9975 .set_flags = bnx2x_set_flags,
9976 .get_flags = ethtool_op_get_flags,
9977 .get_sg = ethtool_op_get_sg,
9978 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9979 .get_tso = ethtool_op_get_tso,
9980 .set_tso = bnx2x_set_tso,
9981 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9982 .self_test = bnx2x_self_test,
9983 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9984 .phys_id = bnx2x_phys_id,
9985 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9986 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9987};
9988
9989/* end of ethtool_ops */
9990
9991/****************************************************************************
9992* General service functions
9993****************************************************************************/
9994
9995static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9996{
9997 u16 pmcsr;
9998
9999 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10000
10001 switch (state) {
10002 case PCI_D0:
34f80b04 10003 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10004 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10005 PCI_PM_CTRL_PME_STATUS));
10006
10007 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10008 /* delay required during transition out of D3hot */
a2fbb9ea 10009 msleep(20);
34f80b04 10010 break;
a2fbb9ea 10011
34f80b04
EG
10012 case PCI_D3hot:
10013 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10014 pmcsr |= 3;
a2fbb9ea 10015
34f80b04
EG
10016 if (bp->wol)
10017 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10018
34f80b04
EG
10019 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10020 pmcsr);
a2fbb9ea 10021
34f80b04
EG
10022 /* No more memory access after this point until
10023 * device is brought back to D0.
10024 */
10025 break;
10026
10027 default:
10028 return -EINVAL;
10029 }
10030 return 0;
a2fbb9ea
ET
10031}
10032
237907c1
EG
10033static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10034{
10035 u16 rx_cons_sb;
10036
10037 /* Tell compiler that status block fields can change */
10038 barrier();
10039 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10040 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10041 rx_cons_sb++;
10042 return (fp->rx_comp_cons != rx_cons_sb);
10043}
10044
34f80b04
EG
10045/*
10046 * net_device service functions
10047 */
10048
a2fbb9ea
ET
10049static int bnx2x_poll(struct napi_struct *napi, int budget)
10050{
10051 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10052 napi);
10053 struct bnx2x *bp = fp->bp;
10054 int work_done = 0;
10055
10056#ifdef BNX2X_STOP_ON_ERROR
10057 if (unlikely(bp->panic))
34f80b04 10058 goto poll_panic;
a2fbb9ea
ET
10059#endif
10060
10061 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10062 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10063 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10064
10065 bnx2x_update_fpsb_idx(fp);
10066
237907c1 10067 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
10068 bnx2x_tx_int(fp, budget);
10069
237907c1 10070 if (bnx2x_has_rx_work(fp))
a2fbb9ea 10071 work_done = bnx2x_rx_int(fp, budget);
356e2385 10072
da5a662a 10073 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
10074
10075 /* must not complete if we consumed full budget */
da5a662a 10076 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
10077
10078#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10079poll_panic:
a2fbb9ea 10080#endif
288379f0 10081 napi_complete(napi);
a2fbb9ea 10082
0626b899 10083 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10084 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10085 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10086 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10087 }
356e2385 10088
a2fbb9ea
ET
10089 return work_done;
10090}
10091
755735eb
EG
10092
10093/* we split the first BD into headers and data BDs
33471629 10094 * to ease the pain of our fellow microcode engineers
755735eb
EG
10095 * we use one mapping for both BDs
10096 * So far this has only been observed to happen
10097 * in Other Operating Systems(TM)
10098 */
10099static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10100 struct bnx2x_fastpath *fp,
10101 struct eth_tx_bd **tx_bd, u16 hlen,
10102 u16 bd_prod, int nbd)
10103{
10104 struct eth_tx_bd *h_tx_bd = *tx_bd;
10105 struct eth_tx_bd *d_tx_bd;
10106 dma_addr_t mapping;
10107 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10108
10109 /* first fix first BD */
10110 h_tx_bd->nbd = cpu_to_le16(nbd);
10111 h_tx_bd->nbytes = cpu_to_le16(hlen);
10112
10113 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10114 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10115 h_tx_bd->addr_lo, h_tx_bd->nbd);
10116
10117 /* now get a new data BD
10118 * (after the pbd) and fill it */
10119 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10120 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10121
10122 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10123 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10124
10125 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10126 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10127 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10128 d_tx_bd->vlan = 0;
10129 /* this marks the BD as one that has no individual mapping
10130 * the FW ignores this flag in a BD not marked start
10131 */
10132 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10133 DP(NETIF_MSG_TX_QUEUED,
10134 "TSO split data size is %d (%x:%x)\n",
10135 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10136
10137 /* update tx_bd for marking the last BD flag */
10138 *tx_bd = d_tx_bd;
10139
10140 return bd_prod;
10141}
10142
10143static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10144{
10145 if (fix > 0)
10146 csum = (u16) ~csum_fold(csum_sub(csum,
10147 csum_partial(t_header - fix, fix, 0)));
10148
10149 else if (fix < 0)
10150 csum = (u16) ~csum_fold(csum_add(csum,
10151 csum_partial(t_header, -fix, 0)));
10152
10153 return swab16(csum);
10154}
10155
10156static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10157{
10158 u32 rc;
10159
10160 if (skb->ip_summed != CHECKSUM_PARTIAL)
10161 rc = XMIT_PLAIN;
10162
10163 else {
4781bfad 10164 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10165 rc = XMIT_CSUM_V6;
10166 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10167 rc |= XMIT_CSUM_TCP;
10168
10169 } else {
10170 rc = XMIT_CSUM_V4;
10171 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10172 rc |= XMIT_CSUM_TCP;
10173 }
10174 }
10175
10176 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10177 rc |= XMIT_GSO_V4;
10178
10179 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10180 rc |= XMIT_GSO_V6;
10181
10182 return rc;
10183}
10184
632da4d6 10185#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10186/* check if packet requires linearization (packet is too fragmented)
10187 no need to check fragmentation if page size > 8K (there will be no
10188 violation to FW restrictions) */
755735eb
EG
10189static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10190 u32 xmit_type)
10191{
10192 int to_copy = 0;
10193 int hlen = 0;
10194 int first_bd_sz = 0;
10195
10196 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10197 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10198
10199 if (xmit_type & XMIT_GSO) {
10200 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10201 /* Check if LSO packet needs to be copied:
10202 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10203 int wnd_size = MAX_FETCH_BD - 3;
33471629 10204 /* Number of windows to check */
755735eb
EG
10205 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10206 int wnd_idx = 0;
10207 int frag_idx = 0;
10208 u32 wnd_sum = 0;
10209
10210 /* Headers length */
10211 hlen = (int)(skb_transport_header(skb) - skb->data) +
10212 tcp_hdrlen(skb);
10213
10214 /* Amount of data (w/o headers) on linear part of SKB*/
10215 first_bd_sz = skb_headlen(skb) - hlen;
10216
10217 wnd_sum = first_bd_sz;
10218
10219 /* Calculate the first sum - it's special */
10220 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10221 wnd_sum +=
10222 skb_shinfo(skb)->frags[frag_idx].size;
10223
10224 /* If there was data on linear skb data - check it */
10225 if (first_bd_sz > 0) {
10226 if (unlikely(wnd_sum < lso_mss)) {
10227 to_copy = 1;
10228 goto exit_lbl;
10229 }
10230
10231 wnd_sum -= first_bd_sz;
10232 }
10233
10234 /* Others are easier: run through the frag list and
10235 check all windows */
10236 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10237 wnd_sum +=
10238 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10239
10240 if (unlikely(wnd_sum < lso_mss)) {
10241 to_copy = 1;
10242 break;
10243 }
10244 wnd_sum -=
10245 skb_shinfo(skb)->frags[wnd_idx].size;
10246 }
755735eb
EG
10247 } else {
10248 /* in non-LSO too fragmented packet should always
10249 be linearized */
10250 to_copy = 1;
10251 }
10252 }
10253
10254exit_lbl:
10255 if (unlikely(to_copy))
10256 DP(NETIF_MSG_TX_QUEUED,
10257 "Linearization IS REQUIRED for %s packet. "
10258 "num_frags %d hlen %d first_bd_sz %d\n",
10259 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10260 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10261
10262 return to_copy;
10263}
632da4d6 10264#endif
755735eb
EG
10265
10266/* called with netif_tx_lock
a2fbb9ea 10267 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10268 * netif_wake_queue()
a2fbb9ea
ET
10269 */
10270static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10271{
10272 struct bnx2x *bp = netdev_priv(dev);
10273 struct bnx2x_fastpath *fp;
555f6c78 10274 struct netdev_queue *txq;
a2fbb9ea
ET
10275 struct sw_tx_bd *tx_buf;
10276 struct eth_tx_bd *tx_bd;
10277 struct eth_tx_parse_bd *pbd = NULL;
10278 u16 pkt_prod, bd_prod;
755735eb 10279 int nbd, fp_index;
a2fbb9ea 10280 dma_addr_t mapping;
755735eb
EG
10281 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10282 int vlan_off = (bp->e1hov ? 4 : 0);
10283 int i;
10284 u8 hlen = 0;
a2fbb9ea
ET
10285
10286#ifdef BNX2X_STOP_ON_ERROR
10287 if (unlikely(bp->panic))
10288 return NETDEV_TX_BUSY;
10289#endif
10290
555f6c78
EG
10291 fp_index = skb_get_queue_mapping(skb);
10292 txq = netdev_get_tx_queue(dev, fp_index);
10293
a2fbb9ea 10294 fp = &bp->fp[fp_index];
755735eb 10295
231fd58a 10296 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10297 fp->eth_q_stats.driver_xoff++,
555f6c78 10298 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10299 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10300 return NETDEV_TX_BUSY;
10301 }
10302
755735eb
EG
10303 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10304 " gso type %x xmit_type %x\n",
10305 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10306 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10307
632da4d6 10308#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10309 /* First, check if we need to linearize the skb (due to FW
10310 restrictions). No need to check fragmentation if page size > 8K
10311 (there will be no violation to FW restrictions) */
755735eb
EG
10312 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10313 /* Statistics of linearization */
10314 bp->lin_cnt++;
10315 if (skb_linearize(skb) != 0) {
10316 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10317 "silently dropping this SKB\n");
10318 dev_kfree_skb_any(skb);
da5a662a 10319 return NETDEV_TX_OK;
755735eb
EG
10320 }
10321 }
632da4d6 10322#endif
755735eb 10323
a2fbb9ea 10324 /*
755735eb 10325 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10326 then for TSO or xsum we have a parsing info BD,
755735eb 10327 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10328 (don't forget to mark the last one as last,
10329 and to unmap only AFTER you write to the BD ...)
755735eb 10330 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10331 */
10332
10333 pkt_prod = fp->tx_pkt_prod++;
755735eb 10334 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10335
755735eb 10336 /* get a tx_buf and first BD */
a2fbb9ea
ET
10337 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10338 tx_bd = &fp->tx_desc_ring[bd_prod];
10339
10340 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10341 tx_bd->general_data = (UNICAST_ADDRESS <<
10342 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10343 /* header nbd */
10344 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10345
755735eb
EG
10346 /* remember the first BD of the packet */
10347 tx_buf->first_bd = fp->tx_bd_prod;
10348 tx_buf->skb = skb;
a2fbb9ea
ET
10349
10350 DP(NETIF_MSG_TX_QUEUED,
10351 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10352 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10353
0c6671b0
EG
10354#ifdef BCM_VLAN
10355 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10356 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10357 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10358 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10359 vlan_off += 4;
10360 } else
0c6671b0 10361#endif
755735eb 10362 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10363
755735eb 10364 if (xmit_type) {
755735eb 10365 /* turn on parsing and get a BD */
a2fbb9ea
ET
10366 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10367 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10368
10369 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10370 }
10371
10372 if (xmit_type & XMIT_CSUM) {
10373 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10374
10375 /* for now NS flag is not used in Linux */
4781bfad
EG
10376 pbd->global_data =
10377 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10378 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10379
755735eb
EG
10380 pbd->ip_hlen = (skb_transport_header(skb) -
10381 skb_network_header(skb)) / 2;
10382
10383 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10384
755735eb
EG
10385 pbd->total_hlen = cpu_to_le16(hlen);
10386 hlen = hlen*2 - vlan_off;
a2fbb9ea 10387
755735eb
EG
10388 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10389
10390 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10391 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10392 ETH_TX_BD_FLAGS_IP_CSUM;
10393 else
10394 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10395
10396 if (xmit_type & XMIT_CSUM_TCP) {
10397 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10398
10399 } else {
10400 s8 fix = SKB_CS_OFF(skb); /* signed! */
10401
a2fbb9ea 10402 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10403 pbd->cs_offset = fix / 2;
a2fbb9ea 10404
755735eb
EG
10405 DP(NETIF_MSG_TX_QUEUED,
10406 "hlen %d offset %d fix %d csum before fix %x\n",
10407 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10408 SKB_CS(skb));
10409
10410 /* HW bug: fixup the CSUM */
10411 pbd->tcp_pseudo_csum =
10412 bnx2x_csum_fix(skb_transport_header(skb),
10413 SKB_CS(skb), fix);
10414
10415 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10416 pbd->tcp_pseudo_csum);
10417 }
a2fbb9ea
ET
10418 }
10419
10420 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10421 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10422
10423 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10424 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10425 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10426 tx_bd->nbd = cpu_to_le16(nbd);
10427 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10428
10429 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10430 " nbytes %d flags %x vlan %x\n",
10431 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10432 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10433 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10434
755735eb 10435 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10436
10437 DP(NETIF_MSG_TX_QUEUED,
10438 "TSO packet len %d hlen %d total len %d tso size %d\n",
10439 skb->len, hlen, skb_headlen(skb),
10440 skb_shinfo(skb)->gso_size);
10441
10442 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10443
755735eb
EG
10444 if (unlikely(skb_headlen(skb) > hlen))
10445 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10446 bd_prod, ++nbd);
a2fbb9ea
ET
10447
10448 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10449 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10450 pbd->tcp_flags = pbd_tcp_flags(skb);
10451
10452 if (xmit_type & XMIT_GSO_V4) {
10453 pbd->ip_id = swab16(ip_hdr(skb)->id);
10454 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10455 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10456 ip_hdr(skb)->daddr,
10457 0, IPPROTO_TCP, 0));
755735eb
EG
10458
10459 } else
10460 pbd->tcp_pseudo_csum =
10461 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10462 &ipv6_hdr(skb)->daddr,
10463 0, IPPROTO_TCP, 0));
10464
a2fbb9ea
ET
10465 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10466 }
10467
755735eb
EG
10468 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10469 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10470
755735eb
EG
10471 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10472 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10473
755735eb
EG
10474 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10475 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10476
755735eb
EG
10477 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10478 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10479 tx_bd->nbytes = cpu_to_le16(frag->size);
10480 tx_bd->vlan = cpu_to_le16(pkt_prod);
10481 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10482
755735eb
EG
10483 DP(NETIF_MSG_TX_QUEUED,
10484 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10485 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10486 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10487 }
10488
755735eb 10489 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10490 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10491
10492 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10493 tx_bd, tx_bd->bd_flags.as_bitfield);
10494
a2fbb9ea
ET
10495 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10496
755735eb 10497 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10498 * if the packet contains or ends with it
10499 */
10500 if (TX_BD_POFF(bd_prod) < nbd)
10501 nbd++;
10502
10503 if (pbd)
10504 DP(NETIF_MSG_TX_QUEUED,
10505 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10506 " tcp_flags %x xsum %x seq %u hlen %u\n",
10507 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10508 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10509 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10510
755735eb 10511 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10512
58f4c4cf
EG
10513 /*
10514 * Make sure that the BD data is updated before updating the producer
10515 * since FW might read the BD right after the producer is updated.
10516 * This is only applicable for weak-ordered memory model archs such
10517 * as IA-64. The following barrier is also mandatory since FW will
10518 * assumes packets must have BDs.
10519 */
10520 wmb();
10521
4781bfad 10522 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10523 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10524 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10525 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10526
10527 mmiowb();
10528
755735eb 10529 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10530 dev->trans_start = jiffies;
10531
10532 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10533 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10534 if we put Tx into XOFF state. */
10535 smp_mb();
555f6c78 10536 netif_tx_stop_queue(txq);
de832a55 10537 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10538 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10539 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10540 }
10541 fp->tx_pkt++;
10542
10543 return NETDEV_TX_OK;
10544}
10545
bb2a0f7a 10546/* called with rtnl_lock */
a2fbb9ea
ET
10547static int bnx2x_open(struct net_device *dev)
10548{
10549 struct bnx2x *bp = netdev_priv(dev);
10550
6eccabb3
EG
10551 netif_carrier_off(dev);
10552
a2fbb9ea
ET
10553 bnx2x_set_power_state(bp, PCI_D0);
10554
bb2a0f7a 10555 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10556}
10557
bb2a0f7a 10558/* called with rtnl_lock */
a2fbb9ea
ET
10559static int bnx2x_close(struct net_device *dev)
10560{
a2fbb9ea
ET
10561 struct bnx2x *bp = netdev_priv(dev);
10562
10563 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10564 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10565 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10566 if (!CHIP_REV_IS_SLOW(bp))
10567 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10568
10569 return 0;
10570}
10571
f5372251 10572/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10573static void bnx2x_set_rx_mode(struct net_device *dev)
10574{
10575 struct bnx2x *bp = netdev_priv(dev);
10576 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10577 int port = BP_PORT(bp);
10578
10579 if (bp->state != BNX2X_STATE_OPEN) {
10580 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10581 return;
10582 }
10583
10584 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10585
10586 if (dev->flags & IFF_PROMISC)
10587 rx_mode = BNX2X_RX_MODE_PROMISC;
10588
10589 else if ((dev->flags & IFF_ALLMULTI) ||
10590 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10591 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10592
10593 else { /* some multicasts */
10594 if (CHIP_IS_E1(bp)) {
10595 int i, old, offset;
10596 struct dev_mc_list *mclist;
10597 struct mac_configuration_cmd *config =
10598 bnx2x_sp(bp, mcast_config);
10599
10600 for (i = 0, mclist = dev->mc_list;
10601 mclist && (i < dev->mc_count);
10602 i++, mclist = mclist->next) {
10603
10604 config->config_table[i].
10605 cam_entry.msb_mac_addr =
10606 swab16(*(u16 *)&mclist->dmi_addr[0]);
10607 config->config_table[i].
10608 cam_entry.middle_mac_addr =
10609 swab16(*(u16 *)&mclist->dmi_addr[2]);
10610 config->config_table[i].
10611 cam_entry.lsb_mac_addr =
10612 swab16(*(u16 *)&mclist->dmi_addr[4]);
10613 config->config_table[i].cam_entry.flags =
10614 cpu_to_le16(port);
10615 config->config_table[i].
10616 target_table_entry.flags = 0;
10617 config->config_table[i].
10618 target_table_entry.client_id = 0;
10619 config->config_table[i].
10620 target_table_entry.vlan_id = 0;
10621
10622 DP(NETIF_MSG_IFUP,
10623 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10624 config->config_table[i].
10625 cam_entry.msb_mac_addr,
10626 config->config_table[i].
10627 cam_entry.middle_mac_addr,
10628 config->config_table[i].
10629 cam_entry.lsb_mac_addr);
10630 }
8d9c5f34 10631 old = config->hdr.length;
34f80b04
EG
10632 if (old > i) {
10633 for (; i < old; i++) {
10634 if (CAM_IS_INVALID(config->
10635 config_table[i])) {
af246401 10636 /* already invalidated */
34f80b04
EG
10637 break;
10638 }
10639 /* invalidate */
10640 CAM_INVALIDATE(config->
10641 config_table[i]);
10642 }
10643 }
10644
10645 if (CHIP_REV_IS_SLOW(bp))
10646 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10647 else
10648 offset = BNX2X_MAX_MULTICAST*(1 + port);
10649
8d9c5f34 10650 config->hdr.length = i;
34f80b04 10651 config->hdr.offset = offset;
8d9c5f34 10652 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10653 config->hdr.reserved1 = 0;
10654
10655 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10656 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10657 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10658 0);
10659 } else { /* E1H */
10660 /* Accept one or more multicasts */
10661 struct dev_mc_list *mclist;
10662 u32 mc_filter[MC_HASH_SIZE];
10663 u32 crc, bit, regidx;
10664 int i;
10665
10666 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10667
10668 for (i = 0, mclist = dev->mc_list;
10669 mclist && (i < dev->mc_count);
10670 i++, mclist = mclist->next) {
10671
7c510e4b
JB
10672 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10673 mclist->dmi_addr);
34f80b04
EG
10674
10675 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10676 bit = (crc >> 24) & 0xff;
10677 regidx = bit >> 5;
10678 bit &= 0x1f;
10679 mc_filter[regidx] |= (1 << bit);
10680 }
10681
10682 for (i = 0; i < MC_HASH_SIZE; i++)
10683 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10684 mc_filter[i]);
10685 }
10686 }
10687
10688 bp->rx_mode = rx_mode;
10689 bnx2x_set_storm_rx_mode(bp);
10690}
10691
10692/* called with rtnl_lock */
a2fbb9ea
ET
10693static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10694{
10695 struct sockaddr *addr = p;
10696 struct bnx2x *bp = netdev_priv(dev);
10697
34f80b04 10698 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10699 return -EINVAL;
10700
10701 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10702 if (netif_running(dev)) {
10703 if (CHIP_IS_E1(bp))
3101c2bc 10704 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10705 else
3101c2bc 10706 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10707 }
a2fbb9ea
ET
10708
10709 return 0;
10710}
10711
c18487ee 10712/* called with rtnl_lock */
a2fbb9ea
ET
10713static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10714{
10715 struct mii_ioctl_data *data = if_mii(ifr);
10716 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10717 int port = BP_PORT(bp);
a2fbb9ea
ET
10718 int err;
10719
10720 switch (cmd) {
10721 case SIOCGMIIPHY:
34f80b04 10722 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10723
c14423fe 10724 /* fallthrough */
c18487ee 10725
a2fbb9ea 10726 case SIOCGMIIREG: {
c18487ee 10727 u16 mii_regval;
a2fbb9ea 10728
c18487ee
YR
10729 if (!netif_running(dev))
10730 return -EAGAIN;
a2fbb9ea 10731
34f80b04 10732 mutex_lock(&bp->port.phy_mutex);
3196a88a 10733 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10734 DEFAULT_PHY_DEV_ADDR,
10735 (data->reg_num & 0x1f), &mii_regval);
10736 data->val_out = mii_regval;
34f80b04 10737 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10738 return err;
10739 }
10740
10741 case SIOCSMIIREG:
10742 if (!capable(CAP_NET_ADMIN))
10743 return -EPERM;
10744
c18487ee
YR
10745 if (!netif_running(dev))
10746 return -EAGAIN;
10747
34f80b04 10748 mutex_lock(&bp->port.phy_mutex);
3196a88a 10749 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10750 DEFAULT_PHY_DEV_ADDR,
10751 (data->reg_num & 0x1f), data->val_in);
34f80b04 10752 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10753 return err;
10754
10755 default:
10756 /* do nothing */
10757 break;
10758 }
10759
10760 return -EOPNOTSUPP;
10761}
10762
34f80b04 10763/* called with rtnl_lock */
a2fbb9ea
ET
10764static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10765{
10766 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10767 int rc = 0;
a2fbb9ea
ET
10768
10769 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10770 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10771 return -EINVAL;
10772
10773 /* This does not race with packet allocation
c14423fe 10774 * because the actual alloc size is
a2fbb9ea
ET
10775 * only updated as part of load
10776 */
10777 dev->mtu = new_mtu;
10778
10779 if (netif_running(dev)) {
34f80b04
EG
10780 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10781 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10782 }
34f80b04
EG
10783
10784 return rc;
a2fbb9ea
ET
10785}
10786
10787static void bnx2x_tx_timeout(struct net_device *dev)
10788{
10789 struct bnx2x *bp = netdev_priv(dev);
10790
10791#ifdef BNX2X_STOP_ON_ERROR
10792 if (!bp->panic)
10793 bnx2x_panic();
10794#endif
10795 /* This allows the netif to be shutdown gracefully before resetting */
10796 schedule_work(&bp->reset_task);
10797}
10798
10799#ifdef BCM_VLAN
34f80b04 10800/* called with rtnl_lock */
a2fbb9ea
ET
10801static void bnx2x_vlan_rx_register(struct net_device *dev,
10802 struct vlan_group *vlgrp)
10803{
10804 struct bnx2x *bp = netdev_priv(dev);
10805
10806 bp->vlgrp = vlgrp;
0c6671b0
EG
10807
10808 /* Set flags according to the required capabilities */
10809 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10810
10811 if (dev->features & NETIF_F_HW_VLAN_TX)
10812 bp->flags |= HW_VLAN_TX_FLAG;
10813
10814 if (dev->features & NETIF_F_HW_VLAN_RX)
10815 bp->flags |= HW_VLAN_RX_FLAG;
10816
a2fbb9ea 10817 if (netif_running(dev))
49d66772 10818 bnx2x_set_client_config(bp);
a2fbb9ea 10819}
34f80b04 10820
a2fbb9ea
ET
10821#endif
10822
10823#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10824static void poll_bnx2x(struct net_device *dev)
10825{
10826 struct bnx2x *bp = netdev_priv(dev);
10827
10828 disable_irq(bp->pdev->irq);
10829 bnx2x_interrupt(bp->pdev->irq, dev);
10830 enable_irq(bp->pdev->irq);
10831}
10832#endif
10833
c64213cd
SH
10834static const struct net_device_ops bnx2x_netdev_ops = {
10835 .ndo_open = bnx2x_open,
10836 .ndo_stop = bnx2x_close,
10837 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 10838 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
10839 .ndo_set_mac_address = bnx2x_change_mac_addr,
10840 .ndo_validate_addr = eth_validate_addr,
10841 .ndo_do_ioctl = bnx2x_ioctl,
10842 .ndo_change_mtu = bnx2x_change_mtu,
10843 .ndo_tx_timeout = bnx2x_tx_timeout,
10844#ifdef BCM_VLAN
10845 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10846#endif
10847#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10848 .ndo_poll_controller = poll_bnx2x,
10849#endif
10850};
10851
34f80b04
EG
10852static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10853 struct net_device *dev)
a2fbb9ea
ET
10854{
10855 struct bnx2x *bp;
10856 int rc;
10857
10858 SET_NETDEV_DEV(dev, &pdev->dev);
10859 bp = netdev_priv(dev);
10860
34f80b04
EG
10861 bp->dev = dev;
10862 bp->pdev = pdev;
a2fbb9ea 10863 bp->flags = 0;
34f80b04 10864 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10865
10866 rc = pci_enable_device(pdev);
10867 if (rc) {
10868 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10869 goto err_out;
10870 }
10871
10872 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10873 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10874 " aborting\n");
10875 rc = -ENODEV;
10876 goto err_out_disable;
10877 }
10878
10879 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10880 printk(KERN_ERR PFX "Cannot find second PCI device"
10881 " base address, aborting\n");
10882 rc = -ENODEV;
10883 goto err_out_disable;
10884 }
10885
34f80b04
EG
10886 if (atomic_read(&pdev->enable_cnt) == 1) {
10887 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10888 if (rc) {
10889 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10890 " aborting\n");
10891 goto err_out_disable;
10892 }
a2fbb9ea 10893
34f80b04
EG
10894 pci_set_master(pdev);
10895 pci_save_state(pdev);
10896 }
a2fbb9ea
ET
10897
10898 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10899 if (bp->pm_cap == 0) {
10900 printk(KERN_ERR PFX "Cannot find power management"
10901 " capability, aborting\n");
10902 rc = -EIO;
10903 goto err_out_release;
10904 }
10905
10906 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10907 if (bp->pcie_cap == 0) {
10908 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10909 " aborting\n");
10910 rc = -EIO;
10911 goto err_out_release;
10912 }
10913
10914 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10915 bp->flags |= USING_DAC_FLAG;
10916 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10917 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10918 " failed, aborting\n");
10919 rc = -EIO;
10920 goto err_out_release;
10921 }
10922
10923 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10924 printk(KERN_ERR PFX "System does not support DMA,"
10925 " aborting\n");
10926 rc = -EIO;
10927 goto err_out_release;
10928 }
10929
34f80b04
EG
10930 dev->mem_start = pci_resource_start(pdev, 0);
10931 dev->base_addr = dev->mem_start;
10932 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10933
10934 dev->irq = pdev->irq;
10935
275f165f 10936 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10937 if (!bp->regview) {
10938 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10939 rc = -ENOMEM;
10940 goto err_out_release;
10941 }
10942
34f80b04
EG
10943 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10944 min_t(u64, BNX2X_DB_SIZE,
10945 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10946 if (!bp->doorbells) {
10947 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10948 rc = -ENOMEM;
10949 goto err_out_unmap;
10950 }
10951
10952 bnx2x_set_power_state(bp, PCI_D0);
10953
34f80b04
EG
10954 /* clean indirect addresses */
10955 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10956 PCICFG_VENDOR_ID_OFFSET);
10957 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10958 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10959 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10960 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10961
34f80b04 10962 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10963
c64213cd 10964 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10965 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10966 dev->features |= NETIF_F_SG;
10967 dev->features |= NETIF_F_HW_CSUM;
10968 if (bp->flags & USING_DAC_FLAG)
10969 dev->features |= NETIF_F_HIGHDMA;
10970#ifdef BCM_VLAN
10971 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10972 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10973#endif
10974 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10975 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10976
10977 return 0;
10978
10979err_out_unmap:
10980 if (bp->regview) {
10981 iounmap(bp->regview);
10982 bp->regview = NULL;
10983 }
a2fbb9ea
ET
10984 if (bp->doorbells) {
10985 iounmap(bp->doorbells);
10986 bp->doorbells = NULL;
10987 }
10988
10989err_out_release:
34f80b04
EG
10990 if (atomic_read(&pdev->enable_cnt) == 1)
10991 pci_release_regions(pdev);
a2fbb9ea
ET
10992
10993err_out_disable:
10994 pci_disable_device(pdev);
10995 pci_set_drvdata(pdev, NULL);
10996
10997err_out:
10998 return rc;
10999}
11000
25047950
ET
11001static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11002{
11003 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11004
11005 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11006 return val;
11007}
11008
11009/* return value of 1=2.5GHz 2=5GHz */
11010static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11011{
11012 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11013
11014 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11015 return val;
11016}
11017
a2fbb9ea
ET
11018static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11019 const struct pci_device_id *ent)
11020{
11021 static int version_printed;
11022 struct net_device *dev = NULL;
11023 struct bnx2x *bp;
25047950 11024 int rc;
a2fbb9ea
ET
11025
11026 if (version_printed++ == 0)
11027 printk(KERN_INFO "%s", version);
11028
11029 /* dev zeroed in init_etherdev */
555f6c78 11030 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11031 if (!dev) {
11032 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11033 return -ENOMEM;
34f80b04 11034 }
a2fbb9ea 11035
a2fbb9ea
ET
11036 bp = netdev_priv(dev);
11037 bp->msglevel = debug;
11038
34f80b04 11039 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11040 if (rc < 0) {
11041 free_netdev(dev);
11042 return rc;
11043 }
11044
a2fbb9ea
ET
11045 pci_set_drvdata(pdev, dev);
11046
34f80b04 11047 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11048 if (rc)
11049 goto init_one_exit;
11050
11051 rc = register_netdev(dev);
34f80b04 11052 if (rc) {
693fc0d1 11053 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11054 goto init_one_exit;
11055 }
11056
25047950 11057 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11058 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11059 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11060 bnx2x_get_pcie_width(bp),
11061 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11062 dev->base_addr, bp->pdev->irq);
e174961c 11063 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 11064 return 0;
34f80b04
EG
11065
11066init_one_exit:
11067 if (bp->regview)
11068 iounmap(bp->regview);
11069
11070 if (bp->doorbells)
11071 iounmap(bp->doorbells);
11072
11073 free_netdev(dev);
11074
11075 if (atomic_read(&pdev->enable_cnt) == 1)
11076 pci_release_regions(pdev);
11077
11078 pci_disable_device(pdev);
11079 pci_set_drvdata(pdev, NULL);
11080
11081 return rc;
a2fbb9ea
ET
11082}
11083
11084static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11085{
11086 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11087 struct bnx2x *bp;
11088
11089 if (!dev) {
228241eb
ET
11090 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11091 return;
11092 }
228241eb 11093 bp = netdev_priv(dev);
a2fbb9ea 11094
a2fbb9ea
ET
11095 unregister_netdev(dev);
11096
11097 if (bp->regview)
11098 iounmap(bp->regview);
11099
11100 if (bp->doorbells)
11101 iounmap(bp->doorbells);
11102
11103 free_netdev(dev);
34f80b04
EG
11104
11105 if (atomic_read(&pdev->enable_cnt) == 1)
11106 pci_release_regions(pdev);
11107
a2fbb9ea
ET
11108 pci_disable_device(pdev);
11109 pci_set_drvdata(pdev, NULL);
11110}
11111
11112static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11113{
11114 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11115 struct bnx2x *bp;
11116
34f80b04
EG
11117 if (!dev) {
11118 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11119 return -ENODEV;
11120 }
11121 bp = netdev_priv(dev);
a2fbb9ea 11122
34f80b04 11123 rtnl_lock();
a2fbb9ea 11124
34f80b04 11125 pci_save_state(pdev);
228241eb 11126
34f80b04
EG
11127 if (!netif_running(dev)) {
11128 rtnl_unlock();
11129 return 0;
11130 }
a2fbb9ea
ET
11131
11132 netif_device_detach(dev);
a2fbb9ea 11133
da5a662a 11134 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11135
a2fbb9ea 11136 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11137
34f80b04
EG
11138 rtnl_unlock();
11139
a2fbb9ea
ET
11140 return 0;
11141}
11142
11143static int bnx2x_resume(struct pci_dev *pdev)
11144{
11145 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11146 struct bnx2x *bp;
a2fbb9ea
ET
11147 int rc;
11148
228241eb
ET
11149 if (!dev) {
11150 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11151 return -ENODEV;
11152 }
228241eb 11153 bp = netdev_priv(dev);
a2fbb9ea 11154
34f80b04
EG
11155 rtnl_lock();
11156
228241eb 11157 pci_restore_state(pdev);
34f80b04
EG
11158
11159 if (!netif_running(dev)) {
11160 rtnl_unlock();
11161 return 0;
11162 }
11163
a2fbb9ea
ET
11164 bnx2x_set_power_state(bp, PCI_D0);
11165 netif_device_attach(dev);
11166
da5a662a 11167 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11168
34f80b04
EG
11169 rtnl_unlock();
11170
11171 return rc;
a2fbb9ea
ET
11172}
11173
f8ef6e44
YG
11174static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11175{
11176 int i;
11177
11178 bp->state = BNX2X_STATE_ERROR;
11179
11180 bp->rx_mode = BNX2X_RX_MODE_NONE;
11181
11182 bnx2x_netif_stop(bp, 0);
11183
11184 del_timer_sync(&bp->timer);
11185 bp->stats_state = STATS_STATE_DISABLED;
11186 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11187
11188 /* Release IRQs */
11189 bnx2x_free_irq(bp);
11190
11191 if (CHIP_IS_E1(bp)) {
11192 struct mac_configuration_cmd *config =
11193 bnx2x_sp(bp, mcast_config);
11194
8d9c5f34 11195 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11196 CAM_INVALIDATE(config->config_table[i]);
11197 }
11198
11199 /* Free SKBs, SGEs, TPA pool and driver internals */
11200 bnx2x_free_skbs(bp);
555f6c78 11201 for_each_rx_queue(bp, i)
f8ef6e44 11202 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11203 for_each_rx_queue(bp, i)
7cde1c8b 11204 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11205 bnx2x_free_mem(bp);
11206
11207 bp->state = BNX2X_STATE_CLOSED;
11208
11209 netif_carrier_off(bp->dev);
11210
11211 return 0;
11212}
11213
11214static void bnx2x_eeh_recover(struct bnx2x *bp)
11215{
11216 u32 val;
11217
11218 mutex_init(&bp->port.phy_mutex);
11219
11220 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11221 bp->link_params.shmem_base = bp->common.shmem_base;
11222 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11223
11224 if (!bp->common.shmem_base ||
11225 (bp->common.shmem_base < 0xA0000) ||
11226 (bp->common.shmem_base >= 0xC0000)) {
11227 BNX2X_DEV_INFO("MCP not active\n");
11228 bp->flags |= NO_MCP_FLAG;
11229 return;
11230 }
11231
11232 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11233 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11234 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11235 BNX2X_ERR("BAD MCP validity signature\n");
11236
11237 if (!BP_NOMCP(bp)) {
11238 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11239 & DRV_MSG_SEQ_NUMBER_MASK);
11240 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11241 }
11242}
11243
493adb1f
WX
11244/**
11245 * bnx2x_io_error_detected - called when PCI error is detected
11246 * @pdev: Pointer to PCI device
11247 * @state: The current pci connection state
11248 *
11249 * This function is called after a PCI bus error affecting
11250 * this device has been detected.
11251 */
11252static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11253 pci_channel_state_t state)
11254{
11255 struct net_device *dev = pci_get_drvdata(pdev);
11256 struct bnx2x *bp = netdev_priv(dev);
11257
11258 rtnl_lock();
11259
11260 netif_device_detach(dev);
11261
11262 if (netif_running(dev))
f8ef6e44 11263 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11264
11265 pci_disable_device(pdev);
11266
11267 rtnl_unlock();
11268
11269 /* Request a slot reset */
11270 return PCI_ERS_RESULT_NEED_RESET;
11271}
11272
11273/**
11274 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11275 * @pdev: Pointer to PCI device
11276 *
11277 * Restart the card from scratch, as if from a cold-boot.
11278 */
11279static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11280{
11281 struct net_device *dev = pci_get_drvdata(pdev);
11282 struct bnx2x *bp = netdev_priv(dev);
11283
11284 rtnl_lock();
11285
11286 if (pci_enable_device(pdev)) {
11287 dev_err(&pdev->dev,
11288 "Cannot re-enable PCI device after reset\n");
11289 rtnl_unlock();
11290 return PCI_ERS_RESULT_DISCONNECT;
11291 }
11292
11293 pci_set_master(pdev);
11294 pci_restore_state(pdev);
11295
11296 if (netif_running(dev))
11297 bnx2x_set_power_state(bp, PCI_D0);
11298
11299 rtnl_unlock();
11300
11301 return PCI_ERS_RESULT_RECOVERED;
11302}
11303
11304/**
11305 * bnx2x_io_resume - called when traffic can start flowing again
11306 * @pdev: Pointer to PCI device
11307 *
11308 * This callback is called when the error recovery driver tells us that
11309 * its OK to resume normal operation.
11310 */
11311static void bnx2x_io_resume(struct pci_dev *pdev)
11312{
11313 struct net_device *dev = pci_get_drvdata(pdev);
11314 struct bnx2x *bp = netdev_priv(dev);
11315
11316 rtnl_lock();
11317
f8ef6e44
YG
11318 bnx2x_eeh_recover(bp);
11319
493adb1f 11320 if (netif_running(dev))
f8ef6e44 11321 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11322
11323 netif_device_attach(dev);
11324
11325 rtnl_unlock();
11326}
11327
11328static struct pci_error_handlers bnx2x_err_handler = {
11329 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11330 .slot_reset = bnx2x_io_slot_reset,
11331 .resume = bnx2x_io_resume,
493adb1f
WX
11332};
11333
a2fbb9ea 11334static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11335 .name = DRV_MODULE_NAME,
11336 .id_table = bnx2x_pci_tbl,
11337 .probe = bnx2x_init_one,
11338 .remove = __devexit_p(bnx2x_remove_one),
11339 .suspend = bnx2x_suspend,
11340 .resume = bnx2x_resume,
11341 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11342};
11343
11344static int __init bnx2x_init(void)
11345{
1cf167f2
EG
11346 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11347 if (bnx2x_wq == NULL) {
11348 printk(KERN_ERR PFX "Cannot create workqueue\n");
11349 return -ENOMEM;
11350 }
11351
a2fbb9ea
ET
11352 return pci_register_driver(&bnx2x_pci_driver);
11353}
11354
11355static void __exit bnx2x_cleanup(void)
11356{
11357 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11358
11359 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11360}
11361
11362module_init(bnx2x_init);
11363module_exit(bnx2x_cleanup);
11364