]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/bnx2x_main.c
bnx2x: Add rmb to read status block indices on load
[mirror_ubuntu-eoan-kernel.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
56
2b144023
EG
57#define DRV_MODULE_VERSION "1.48.102"
58#define DRV_MODULE_RELDATE "2009/02/12"
34f80b04 59#define BNX2X_BC_VER 0x040200
a2fbb9ea 60
34f80b04
EG
61/* Time in jiffies before concluding the transmitter is hung */
62#define TX_TIMEOUT (5*HZ)
a2fbb9ea 63
53a10565 64static char version[] __devinitdata =
34f80b04 65 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
66 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
24e3fcef 68MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 69MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
70MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 72
555f6c78
EG
73static int multi_mode = 1;
74module_param(multi_mode, int, 0);
75
19680c48 76static int disable_tpa;
19680c48 77module_param(disable_tpa, int, 0);
9898f86d 78MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
79
80static int int_mode;
81module_param(int_mode, int, 0);
82MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
83
9898f86d 84static int poll;
a2fbb9ea 85module_param(poll, int, 0);
9898f86d 86MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
87
88static int mrrs = -1;
89module_param(mrrs, int, 0);
90MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
91
9898f86d 92static int debug;
a2fbb9ea 93module_param(debug, int, 0);
9898f86d
EG
94MODULE_PARM_DESC(debug, " Default debug msglevel");
95
96static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 97
1cf167f2 98static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
ad8d3948 238 if (!cnt) {
a2fbb9ea
ET
239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
ad8d3948 242 cnt--;
12469401
YG
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
a2fbb9ea 248 }
ad8d3948
EG
249
250 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
251}
252
c18487ee 253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 254{
ad8d3948 255 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
34f80b04
EG
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 292 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 293
ad8d3948 294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
301
302 *wb_comp = 0;
303
34f80b04 304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
305
306 udelay(5);
ad8d3948
EG
307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
ad8d3948 310 if (!cnt) {
a2fbb9ea
ET
311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
ad8d3948 314 cnt--;
12469401
YG
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
a2fbb9ea 320 }
ad8d3948 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
324
325 mutex_unlock(&bp->dmae_mutex);
326}
327
328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 336}
a2fbb9ea 337
ad8d3948
EG
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
a2fbb9ea
ET
349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
a2fbb9ea 351 char last_idx;
34f80b04
EG
352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
354
355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
380 }
381 }
382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
a2fbb9ea
ET
464 }
465 }
34f80b04 466
a2fbb9ea
ET
467 return rc;
468}
c14423fe 469
a2fbb9ea
ET
470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
4781bfad 473 __be32 data[9];
a2fbb9ea
ET
474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
49d66772 485 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
49d66772 492 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
66e855f3
YG
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
a2fbb9ea
ET
505 BNX2X_ERR("begin crash dump -----------------\n");
506
8440d2b6
EG
507 /* Indices */
508 /* Common */
509 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
510 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
511 " spq_prod_idx(%u)\n",
512 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
514
515 /* Rx */
516 for_each_rx_queue(bp, i) {
a2fbb9ea 517 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 518
8440d2b6 519 BNX2X_ERR("queue[%d]: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
520 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
521 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 522 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
523 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
526 " fp_u_idx(%x) *sb_u_idx(%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge,
528 le16_to_cpu(fp->fp_u_idx),
529 fp->status_blk->u_status_block.status_block_index);
530 }
a2fbb9ea 531
8440d2b6
EG
532 /* Tx */
533 for_each_tx_queue(bp, i) {
534 struct bnx2x_fastpath *fp = &bp->fp[i];
535 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 536
8440d2b6
EG
537 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
538 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
539 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
540 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
541 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
542 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
543 fp->status_blk->c_status_block.status_block_index,
544 hw_prods->packets_prod, hw_prods->bds_prod);
545 }
a2fbb9ea 546
8440d2b6
EG
547 /* Rings */
548 /* Rx */
549 for_each_rx_queue(bp, i) {
550 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
551
552 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 554 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
555 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
557
558 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 559 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
560 }
561
3196a88a
EG
562 start = RX_SGE(fp->rx_sge_prod);
563 end = RX_SGE(fp->last_max_sge);
8440d2b6 564 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
565 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
567
568 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
569 j, rx_sge[1], rx_sge[0], sw_page->page);
570 }
571
a2fbb9ea
ET
572 start = RCQ_BD(fp->rx_comp_cons - 10);
573 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 574 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
575 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
576
577 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578 j, cqe[0], cqe[1], cqe[2], cqe[3]);
579 }
580 }
581
8440d2b6
EG
582 /* Tx */
583 for_each_tx_queue(bp, i) {
584 struct bnx2x_fastpath *fp = &bp->fp[i];
585
586 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588 for (j = start; j != end; j = TX_BD(j + 1)) {
589 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
590
591 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592 sw_bd->skb, sw_bd->first_bd);
593 }
594
595 start = TX_BD(fp->tx_bd_cons - 10);
596 end = TX_BD(fp->tx_bd_cons + 254);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
599
600 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
602 }
603 }
a2fbb9ea 604
34f80b04 605 bnx2x_fw_dump(bp);
a2fbb9ea
ET
606 bnx2x_mc_assert(bp);
607 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
608}
609
615f8fd9 610static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 611{
34f80b04 612 int port = BP_PORT(bp);
a2fbb9ea
ET
613 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614 u32 val = REG_RD(bp, addr);
615 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 616 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
617
618 if (msix) {
8badd27a
EG
619 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
621 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
623 } else if (msi) {
624 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
628 } else {
629 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 630 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
631 HC_CONFIG_0_REG_INT_LINE_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 633
8badd27a
EG
634 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
635 val, port, addr);
615f8fd9
ET
636
637 REG_WR(bp, addr, val);
638
a2fbb9ea
ET
639 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
640 }
641
8badd27a
EG
642 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
643 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
644
645 REG_WR(bp, addr, val);
34f80b04
EG
646
647 if (CHIP_IS_E1H(bp)) {
648 /* init leading/trailing edge */
649 if (IS_E1HMF(bp)) {
8badd27a 650 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 651 if (bp->port.pmf)
4acac6a5
EG
652 /* enable nig and gpio3 attention */
653 val |= 0x1100;
34f80b04
EG
654 } else
655 val = 0xffff;
656
657 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
659 }
a2fbb9ea
ET
660}
661
615f8fd9 662static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 663{
34f80b04 664 int port = BP_PORT(bp);
a2fbb9ea
ET
665 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666 u32 val = REG_RD(bp, addr);
667
668 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
675
8badd27a
EG
676 /* flush all outstanding writes */
677 mmiowb();
678
a2fbb9ea
ET
679 REG_WR(bp, addr, val);
680 if (REG_RD(bp, addr) != val)
681 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 682
a2fbb9ea
ET
683}
684
f8ef6e44 685static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 686{
a2fbb9ea 687 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 688 int i, offset;
a2fbb9ea 689
34f80b04 690 /* disable interrupt handling */
a2fbb9ea 691 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
692 if (disable_hw)
693 /* prevent the HW from sending interrupts */
694 bnx2x_int_disable(bp);
a2fbb9ea
ET
695
696 /* make sure all ISRs are done */
697 if (msix) {
8badd27a
EG
698 synchronize_irq(bp->msix_table[0].vector);
699 offset = 1;
a2fbb9ea 700 for_each_queue(bp, i)
8badd27a 701 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
702 } else
703 synchronize_irq(bp->pdev->irq);
704
705 /* make sure sp_task is not running */
1cf167f2
EG
706 cancel_delayed_work(&bp->sp_task);
707 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
708}
709
34f80b04 710/* fast path */
a2fbb9ea
ET
711
712/*
34f80b04 713 * General service functions
a2fbb9ea
ET
714 */
715
34f80b04 716static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
717 u8 storm, u16 index, u8 op, u8 update)
718{
5c862848
EG
719 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
720 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
721 struct igu_ack_register igu_ack;
722
723 igu_ack.status_block_index = index;
724 igu_ack.sb_id_and_flags =
34f80b04 725 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
726 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
727 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
728 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
729
5c862848
EG
730 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
731 (*(u32 *)&igu_ack), hc_addr);
732 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
733}
734
735static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
736{
737 struct host_status_block *fpsb = fp->status_blk;
738 u16 rc = 0;
739
740 barrier(); /* status block is written to by the chip */
741 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
742 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
743 rc |= 1;
744 }
745 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
746 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
747 rc |= 2;
748 }
749 return rc;
750}
751
a2fbb9ea
ET
752static u16 bnx2x_ack_int(struct bnx2x *bp)
753{
5c862848
EG
754 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
755 COMMAND_REG_SIMD_MASK);
756 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 757
5c862848
EG
758 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
759 result, hc_addr);
a2fbb9ea 760
a2fbb9ea
ET
761 return result;
762}
763
764
765/*
766 * fast path service functions
767 */
768
237907c1
EG
769static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
770{
771 u16 tx_cons_sb;
772
773 /* Tell compiler that status block fields can change */
774 barrier();
775 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
776 return (fp->tx_pkt_cons != tx_cons_sb);
777}
778
779static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
780{
781 /* Tell compiler that consumer and producer can change */
782 barrier();
783 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
784}
785
a2fbb9ea
ET
786/* free skb in the packet ring at pos idx
787 * return idx of last bd freed
788 */
789static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
790 u16 idx)
791{
792 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793 struct eth_tx_bd *tx_bd;
794 struct sk_buff *skb = tx_buf->skb;
34f80b04 795 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
796 int nbd;
797
798 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
799 idx, tx_buf, skb);
800
801 /* unmap first bd */
802 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
806
807 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 808 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
809#ifdef BNX2X_STOP_ON_ERROR
810 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 811 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
812 bnx2x_panic();
813 }
814#endif
815
816 /* Skip a parse bd and the TSO split header bd
817 since they have no mapping */
818 if (nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820
821 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822 ETH_TX_BD_FLAGS_TCP_CSUM |
823 ETH_TX_BD_FLAGS_SW_LSO)) {
824 if (--nbd)
825 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 /* is this a TSO split header bd? */
828 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
829 if (--nbd)
830 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
831 }
832 }
833
834 /* now free frags */
835 while (nbd > 0) {
836
837 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838 tx_bd = &fp->tx_desc_ring[bd_idx];
839 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
841 if (--nbd)
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843 }
844
845 /* release skb */
53e5e96e 846 WARN_ON(!skb);
a2fbb9ea
ET
847 dev_kfree_skb(skb);
848 tx_buf->first_bd = 0;
849 tx_buf->skb = NULL;
850
34f80b04 851 return new_cons;
a2fbb9ea
ET
852}
853
34f80b04 854static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 855{
34f80b04
EG
856 s16 used;
857 u16 prod;
858 u16 cons;
a2fbb9ea 859
34f80b04 860 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
861 prod = fp->tx_bd_prod;
862 cons = fp->tx_bd_cons;
863
34f80b04
EG
864 /* NUM_TX_RINGS = number of "next-page" entries
865 It will be used as a threshold */
866 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 867
34f80b04 868#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
869 WARN_ON(used < 0);
870 WARN_ON(used > fp->bp->tx_ring_size);
871 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 872#endif
a2fbb9ea 873
34f80b04 874 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
875}
876
877static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
878{
879 struct bnx2x *bp = fp->bp;
555f6c78 880 struct netdev_queue *txq;
a2fbb9ea
ET
881 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
882 int done = 0;
883
884#ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
886 return;
887#endif
888
555f6c78 889 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
890 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891 sw_cons = fp->tx_pkt_cons;
892
893 while (sw_cons != hw_cons) {
894 u16 pkt_cons;
895
896 pkt_cons = TX_BD(sw_cons);
897
898 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
899
34f80b04 900 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
901 hw_cons, sw_cons, pkt_cons);
902
34f80b04 903/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
904 rmb();
905 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
906 }
907*/
908 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
909 sw_cons++;
910 done++;
911
912 if (done == work)
913 break;
914 }
915
916 fp->tx_pkt_cons = sw_cons;
917 fp->tx_bd_cons = bd_cons;
918
a2fbb9ea 919 /* TBD need a thresh? */
555f6c78 920 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 921
555f6c78 922 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 923
6044735d
EG
924 /* Need to make the tx_bd_cons update visible to start_xmit()
925 * before checking for netif_tx_queue_stopped(). Without the
926 * memory barrier, there is a small possibility that
927 * start_xmit() will miss it and cause the queue to be stopped
928 * forever.
929 */
930 smp_mb();
931
555f6c78 932 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 933 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 934 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 935 netif_tx_wake_queue(txq);
a2fbb9ea 936
555f6c78 937 __netif_tx_unlock(txq);
a2fbb9ea
ET
938 }
939}
940
3196a88a 941
a2fbb9ea
ET
942static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
943 union eth_rx_cqe *rr_cqe)
944{
945 struct bnx2x *bp = fp->bp;
946 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
947 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
948
34f80b04 949 DP(BNX2X_MSG_SP,
a2fbb9ea 950 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 951 fp->index, cid, command, bp->state,
34f80b04 952 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
953
954 bp->spq_left++;
955
0626b899 956 if (fp->index) {
a2fbb9ea
ET
957 switch (command | fp->state) {
958 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
959 BNX2X_FP_STATE_OPENING):
960 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
961 cid);
962 fp->state = BNX2X_FP_STATE_OPEN;
963 break;
964
965 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
966 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
967 cid);
968 fp->state = BNX2X_FP_STATE_HALTED;
969 break;
970
971 default:
34f80b04
EG
972 BNX2X_ERR("unexpected MC reply (%d) "
973 "fp->state is %x\n", command, fp->state);
974 break;
a2fbb9ea 975 }
34f80b04 976 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
977 return;
978 }
c14423fe 979
a2fbb9ea
ET
980 switch (command | bp->state) {
981 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
982 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
983 bp->state = BNX2X_STATE_OPEN;
984 break;
985
986 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
987 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
988 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
989 fp->state = BNX2X_FP_STATE_HALTED;
990 break;
991
a2fbb9ea 992 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 993 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 994 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
995 break;
996
3196a88a 997
a2fbb9ea 998 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 999 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1000 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1001 bp->set_mac_pending = 0;
a2fbb9ea
ET
1002 break;
1003
49d66772 1004 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1005 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1006 break;
1007
a2fbb9ea 1008 default:
34f80b04 1009 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1010 command, bp->state);
34f80b04 1011 break;
a2fbb9ea 1012 }
34f80b04 1013 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1014}
1015
7a9b2557
VZ
1016static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1017 struct bnx2x_fastpath *fp, u16 index)
1018{
1019 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1020 struct page *page = sw_buf->page;
1021 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1022
1023 /* Skip "next page" elements */
1024 if (!page)
1025 return;
1026
1027 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1028 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1029 __free_pages(page, PAGES_PER_SGE_SHIFT);
1030
1031 sw_buf->page = NULL;
1032 sge->addr_hi = 0;
1033 sge->addr_lo = 0;
1034}
1035
1036static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1037 struct bnx2x_fastpath *fp, int last)
1038{
1039 int i;
1040
1041 for (i = 0; i < last; i++)
1042 bnx2x_free_rx_sge(bp, fp, i);
1043}
1044
1045static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1046 struct bnx2x_fastpath *fp, u16 index)
1047{
1048 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1049 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1050 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1051 dma_addr_t mapping;
1052
1053 if (unlikely(page == NULL))
1054 return -ENOMEM;
1055
4f40f2cb 1056 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1057 PCI_DMA_FROMDEVICE);
8d8bb39b 1058 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1059 __free_pages(page, PAGES_PER_SGE_SHIFT);
1060 return -ENOMEM;
1061 }
1062
1063 sw_buf->page = page;
1064 pci_unmap_addr_set(sw_buf, mapping, mapping);
1065
1066 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1067 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1068
1069 return 0;
1070}
1071
a2fbb9ea
ET
1072static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1073 struct bnx2x_fastpath *fp, u16 index)
1074{
1075 struct sk_buff *skb;
1076 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1077 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1078 dma_addr_t mapping;
1079
1080 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1081 if (unlikely(skb == NULL))
1082 return -ENOMEM;
1083
437cf2f1 1084 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1085 PCI_DMA_FROMDEVICE);
8d8bb39b 1086 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1087 dev_kfree_skb(skb);
1088 return -ENOMEM;
1089 }
1090
1091 rx_buf->skb = skb;
1092 pci_unmap_addr_set(rx_buf, mapping, mapping);
1093
1094 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1095 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1096
1097 return 0;
1098}
1099
1100/* note that we are not allocating a new skb,
1101 * we are just moving one from cons to prod
1102 * we are not creating a new mapping,
1103 * so there is no need to check for dma_mapping_error().
1104 */
1105static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1106 struct sk_buff *skb, u16 cons, u16 prod)
1107{
1108 struct bnx2x *bp = fp->bp;
1109 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1110 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1111 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1112 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1113
1114 pci_dma_sync_single_for_device(bp->pdev,
1115 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1116 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1117
1118 prod_rx_buf->skb = cons_rx_buf->skb;
1119 pci_unmap_addr_set(prod_rx_buf, mapping,
1120 pci_unmap_addr(cons_rx_buf, mapping));
1121 *prod_bd = *cons_bd;
1122}
1123
7a9b2557
VZ
1124static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1125 u16 idx)
1126{
1127 u16 last_max = fp->last_max_sge;
1128
1129 if (SUB_S16(idx, last_max) > 0)
1130 fp->last_max_sge = idx;
1131}
1132
1133static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1134{
1135 int i, j;
1136
1137 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1138 int idx = RX_SGE_CNT * i - 1;
1139
1140 for (j = 0; j < 2; j++) {
1141 SGE_MASK_CLEAR_BIT(fp, idx);
1142 idx--;
1143 }
1144 }
1145}
1146
1147static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1148 struct eth_fast_path_rx_cqe *fp_cqe)
1149{
1150 struct bnx2x *bp = fp->bp;
4f40f2cb 1151 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1152 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1153 SGE_PAGE_SHIFT;
7a9b2557
VZ
1154 u16 last_max, last_elem, first_elem;
1155 u16 delta = 0;
1156 u16 i;
1157
1158 if (!sge_len)
1159 return;
1160
1161 /* First mark all used pages */
1162 for (i = 0; i < sge_len; i++)
1163 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1164
1165 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1166 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1167
1168 /* Here we assume that the last SGE index is the biggest */
1169 prefetch((void *)(fp->sge_mask));
1170 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1171
1172 last_max = RX_SGE(fp->last_max_sge);
1173 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1174 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1175
1176 /* If ring is not full */
1177 if (last_elem + 1 != first_elem)
1178 last_elem++;
1179
1180 /* Now update the prod */
1181 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1182 if (likely(fp->sge_mask[i]))
1183 break;
1184
1185 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1186 delta += RX_SGE_MASK_ELEM_SZ;
1187 }
1188
1189 if (delta > 0) {
1190 fp->rx_sge_prod += delta;
1191 /* clear page-end entries */
1192 bnx2x_clear_sge_mask_next_elems(fp);
1193 }
1194
1195 DP(NETIF_MSG_RX_STATUS,
1196 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1197 fp->last_max_sge, fp->rx_sge_prod);
1198}
1199
1200static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1201{
1202 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1203 memset(fp->sge_mask, 0xff,
1204 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1205
33471629
EG
1206 /* Clear the two last indices in the page to 1:
1207 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1208 hence will never be indicated and should be removed from
1209 the calculations. */
1210 bnx2x_clear_sge_mask_next_elems(fp);
1211}
1212
1213static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1214 struct sk_buff *skb, u16 cons, u16 prod)
1215{
1216 struct bnx2x *bp = fp->bp;
1217 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1218 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1219 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1220 dma_addr_t mapping;
1221
1222 /* move empty skb from pool to prod and map it */
1223 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1224 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1225 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1226 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1227
1228 /* move partial skb from cons to pool (don't unmap yet) */
1229 fp->tpa_pool[queue] = *cons_rx_buf;
1230
1231 /* mark bin state as start - print error if current state != stop */
1232 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1233 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1234
1235 fp->tpa_state[queue] = BNX2X_TPA_START;
1236
1237 /* point prod_bd to new skb */
1238 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1239 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1240
1241#ifdef BNX2X_STOP_ON_ERROR
1242 fp->tpa_queue_used |= (1 << queue);
1243#ifdef __powerpc64__
1244 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1245#else
1246 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1247#endif
1248 fp->tpa_queue_used);
1249#endif
1250}
1251
1252static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1253 struct sk_buff *skb,
1254 struct eth_fast_path_rx_cqe *fp_cqe,
1255 u16 cqe_idx)
1256{
1257 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1258 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1259 u32 i, frag_len, frag_size, pages;
1260 int err;
1261 int j;
1262
1263 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1264 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1265
1266 /* This is needed in order to enable forwarding support */
1267 if (frag_size)
4f40f2cb 1268 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1269 max(frag_size, (u32)len_on_bd));
1270
1271#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1272 if (pages >
1273 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1274 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1275 pages, cqe_idx);
1276 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1277 fp_cqe->pkt_len, len_on_bd);
1278 bnx2x_panic();
1279 return -EINVAL;
1280 }
1281#endif
1282
1283 /* Run through the SGL and compose the fragmented skb */
1284 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1285 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1286
1287 /* FW gives the indices of the SGE as if the ring is an array
1288 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1289 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1290 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1291 old_rx_pg = *rx_pg;
1292
1293 /* If we fail to allocate a substitute page, we simply stop
1294 where we are and drop the whole packet */
1295 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1296 if (unlikely(err)) {
de832a55 1297 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1298 return err;
1299 }
1300
1301 /* Unmap the page as we r going to pass it to the stack */
1302 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1303 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1304
1305 /* Add one frag and update the appropriate fields in the skb */
1306 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1307
1308 skb->data_len += frag_len;
1309 skb->truesize += frag_len;
1310 skb->len += frag_len;
1311
1312 frag_size -= frag_len;
1313 }
1314
1315 return 0;
1316}
1317
1318static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1319 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1320 u16 cqe_idx)
1321{
1322 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1323 struct sk_buff *skb = rx_buf->skb;
1324 /* alloc new skb */
1325 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1326
1327 /* Unmap skb in the pool anyway, as we are going to change
1328 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1329 fails. */
1330 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1331 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1332
7a9b2557 1333 if (likely(new_skb)) {
66e855f3
YG
1334 /* fix ip xsum and give it to the stack */
1335 /* (no need to map the new skb) */
0c6671b0
EG
1336#ifdef BCM_VLAN
1337 int is_vlan_cqe =
1338 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1339 PARSING_FLAGS_VLAN);
1340 int is_not_hwaccel_vlan_cqe =
1341 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1342#endif
7a9b2557
VZ
1343
1344 prefetch(skb);
1345 prefetch(((char *)(skb)) + 128);
1346
7a9b2557
VZ
1347#ifdef BNX2X_STOP_ON_ERROR
1348 if (pad + len > bp->rx_buf_size) {
1349 BNX2X_ERR("skb_put is about to fail... "
1350 "pad %d len %d rx_buf_size %d\n",
1351 pad, len, bp->rx_buf_size);
1352 bnx2x_panic();
1353 return;
1354 }
1355#endif
1356
1357 skb_reserve(skb, pad);
1358 skb_put(skb, len);
1359
1360 skb->protocol = eth_type_trans(skb, bp->dev);
1361 skb->ip_summed = CHECKSUM_UNNECESSARY;
1362
1363 {
1364 struct iphdr *iph;
1365
1366 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1367#ifdef BCM_VLAN
1368 /* If there is no Rx VLAN offloading -
1369 take VLAN tag into an account */
1370 if (unlikely(is_not_hwaccel_vlan_cqe))
1371 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1372#endif
7a9b2557
VZ
1373 iph->check = 0;
1374 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1375 }
1376
1377 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1378 &cqe->fast_path_cqe, cqe_idx)) {
1379#ifdef BCM_VLAN
0c6671b0
EG
1380 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1381 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1382 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1383 le16_to_cpu(cqe->fast_path_cqe.
1384 vlan_tag));
1385 else
1386#endif
1387 netif_receive_skb(skb);
1388 } else {
1389 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1390 " - dropping packet!\n");
1391 dev_kfree_skb(skb);
1392 }
1393
7a9b2557
VZ
1394
1395 /* put new skb in bin */
1396 fp->tpa_pool[queue].skb = new_skb;
1397
1398 } else {
66e855f3 1399 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1400 DP(NETIF_MSG_RX_STATUS,
1401 "Failed to allocate new skb - dropping packet!\n");
de832a55 1402 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1403 }
1404
1405 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1406}
1407
1408static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1409 struct bnx2x_fastpath *fp,
1410 u16 bd_prod, u16 rx_comp_prod,
1411 u16 rx_sge_prod)
1412{
8d9c5f34 1413 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1414 int i;
1415
1416 /* Update producers */
1417 rx_prods.bd_prod = bd_prod;
1418 rx_prods.cqe_prod = rx_comp_prod;
1419 rx_prods.sge_prod = rx_sge_prod;
1420
58f4c4cf
EG
1421 /*
1422 * Make sure that the BD and SGE data is updated before updating the
1423 * producers since FW might read the BD/SGE right after the producer
1424 * is updated.
1425 * This is only applicable for weak-ordered memory model archs such
1426 * as IA-64. The following barrier is also mandatory since FW will
1427 * assumes BDs must have buffers.
1428 */
1429 wmb();
1430
8d9c5f34
EG
1431 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1432 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1433 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1434 ((u32 *)&rx_prods)[i]);
1435
58f4c4cf
EG
1436 mmiowb(); /* keep prod updates ordered */
1437
7a9b2557 1438 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1439 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1440 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1441}
1442
a2fbb9ea
ET
1443static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1444{
1445 struct bnx2x *bp = fp->bp;
34f80b04 1446 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1447 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1448 int rx_pkt = 0;
1449
1450#ifdef BNX2X_STOP_ON_ERROR
1451 if (unlikely(bp->panic))
1452 return 0;
1453#endif
1454
34f80b04
EG
1455 /* CQ "next element" is of the size of the regular element,
1456 that's why it's ok here */
a2fbb9ea
ET
1457 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1458 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1459 hw_comp_cons++;
1460
1461 bd_cons = fp->rx_bd_cons;
1462 bd_prod = fp->rx_bd_prod;
34f80b04 1463 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1464 sw_comp_cons = fp->rx_comp_cons;
1465 sw_comp_prod = fp->rx_comp_prod;
1466
1467 /* Memory barrier necessary as speculative reads of the rx
1468 * buffer can be ahead of the index in the status block
1469 */
1470 rmb();
1471
1472 DP(NETIF_MSG_RX_STATUS,
1473 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1474 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1475
1476 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1477 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1478 struct sk_buff *skb;
1479 union eth_rx_cqe *cqe;
34f80b04
EG
1480 u8 cqe_fp_flags;
1481 u16 len, pad;
a2fbb9ea
ET
1482
1483 comp_ring_cons = RCQ_BD(sw_comp_cons);
1484 bd_prod = RX_BD(bd_prod);
1485 bd_cons = RX_BD(bd_cons);
1486
1487 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1488 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1489
a2fbb9ea 1490 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1491 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1492 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1493 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1494 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1495 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1496
1497 /* is this a slowpath msg? */
34f80b04 1498 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1499 bnx2x_sp_event(fp, cqe);
1500 goto next_cqe;
1501
1502 /* this is an rx packet */
1503 } else {
1504 rx_buf = &fp->rx_buf_ring[bd_cons];
1505 skb = rx_buf->skb;
a2fbb9ea
ET
1506 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1507 pad = cqe->fast_path_cqe.placement_offset;
1508
7a9b2557
VZ
1509 /* If CQE is marked both TPA_START and TPA_END
1510 it is a non-TPA CQE */
1511 if ((!fp->disable_tpa) &&
1512 (TPA_TYPE(cqe_fp_flags) !=
1513 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1514 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1515
1516 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1517 DP(NETIF_MSG_RX_STATUS,
1518 "calling tpa_start on queue %d\n",
1519 queue);
1520
1521 bnx2x_tpa_start(fp, queue, skb,
1522 bd_cons, bd_prod);
1523 goto next_rx;
1524 }
1525
1526 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1527 DP(NETIF_MSG_RX_STATUS,
1528 "calling tpa_stop on queue %d\n",
1529 queue);
1530
1531 if (!BNX2X_RX_SUM_FIX(cqe))
1532 BNX2X_ERR("STOP on none TCP "
1533 "data\n");
1534
1535 /* This is a size of the linear data
1536 on this skb */
1537 len = le16_to_cpu(cqe->fast_path_cqe.
1538 len_on_bd);
1539 bnx2x_tpa_stop(bp, fp, queue, pad,
1540 len, cqe, comp_ring_cons);
1541#ifdef BNX2X_STOP_ON_ERROR
1542 if (bp->panic)
1543 return -EINVAL;
1544#endif
1545
1546 bnx2x_update_sge_prod(fp,
1547 &cqe->fast_path_cqe);
1548 goto next_cqe;
1549 }
1550 }
1551
a2fbb9ea
ET
1552 pci_dma_sync_single_for_device(bp->pdev,
1553 pci_unmap_addr(rx_buf, mapping),
1554 pad + RX_COPY_THRESH,
1555 PCI_DMA_FROMDEVICE);
1556 prefetch(skb);
1557 prefetch(((char *)(skb)) + 128);
1558
1559 /* is this an error packet? */
34f80b04 1560 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1561 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1562 "ERROR flags %x rx packet %u\n",
1563 cqe_fp_flags, sw_comp_cons);
de832a55 1564 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1565 goto reuse_rx;
1566 }
1567
1568 /* Since we don't have a jumbo ring
1569 * copy small packets if mtu > 1500
1570 */
1571 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1572 (len <= RX_COPY_THRESH)) {
1573 struct sk_buff *new_skb;
1574
1575 new_skb = netdev_alloc_skb(bp->dev,
1576 len + pad);
1577 if (new_skb == NULL) {
1578 DP(NETIF_MSG_RX_ERR,
34f80b04 1579 "ERROR packet dropped "
a2fbb9ea 1580 "because of alloc failure\n");
de832a55 1581 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1582 goto reuse_rx;
1583 }
1584
1585 /* aligned copy */
1586 skb_copy_from_linear_data_offset(skb, pad,
1587 new_skb->data + pad, len);
1588 skb_reserve(new_skb, pad);
1589 skb_put(new_skb, len);
1590
1591 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1592
1593 skb = new_skb;
1594
1595 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1596 pci_unmap_single(bp->pdev,
1597 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1598 bp->rx_buf_size,
a2fbb9ea
ET
1599 PCI_DMA_FROMDEVICE);
1600 skb_reserve(skb, pad);
1601 skb_put(skb, len);
1602
1603 } else {
1604 DP(NETIF_MSG_RX_ERR,
34f80b04 1605 "ERROR packet dropped because "
a2fbb9ea 1606 "of alloc failure\n");
de832a55 1607 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1608reuse_rx:
1609 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1610 goto next_rx;
1611 }
1612
1613 skb->protocol = eth_type_trans(skb, bp->dev);
1614
1615 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1616 if (bp->rx_csum) {
1adcd8be
EG
1617 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1618 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1619 else
de832a55 1620 fp->eth_q_stats.hw_csum_err++;
66e855f3 1621 }
a2fbb9ea
ET
1622 }
1623
748e5439 1624 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1625#ifdef BCM_VLAN
0c6671b0 1626 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1627 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1628 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1629 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1630 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1631 else
1632#endif
34f80b04 1633 netif_receive_skb(skb);
a2fbb9ea 1634
a2fbb9ea
ET
1635
1636next_rx:
1637 rx_buf->skb = NULL;
1638
1639 bd_cons = NEXT_RX_IDX(bd_cons);
1640 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1641 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1642 rx_pkt++;
a2fbb9ea
ET
1643next_cqe:
1644 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1645 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1646
34f80b04 1647 if (rx_pkt == budget)
a2fbb9ea
ET
1648 break;
1649 } /* while */
1650
1651 fp->rx_bd_cons = bd_cons;
34f80b04 1652 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1653 fp->rx_comp_cons = sw_comp_cons;
1654 fp->rx_comp_prod = sw_comp_prod;
1655
7a9b2557
VZ
1656 /* Update producers */
1657 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1658 fp->rx_sge_prod);
a2fbb9ea
ET
1659
1660 fp->rx_pkt += rx_pkt;
1661 fp->rx_calls++;
1662
1663 return rx_pkt;
1664}
1665
1666static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1667{
1668 struct bnx2x_fastpath *fp = fp_cookie;
1669 struct bnx2x *bp = fp->bp;
0626b899 1670 int index = fp->index;
a2fbb9ea 1671
da5a662a
VZ
1672 /* Return here if interrupt is disabled */
1673 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1674 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1675 return IRQ_HANDLED;
1676 }
1677
34f80b04 1678 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1679 index, fp->sb_id);
1680 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1681
1682#ifdef BNX2X_STOP_ON_ERROR
1683 if (unlikely(bp->panic))
1684 return IRQ_HANDLED;
1685#endif
1686
1687 prefetch(fp->rx_cons_sb);
1688 prefetch(fp->tx_cons_sb);
1689 prefetch(&fp->status_blk->c_status_block.status_block_index);
1690 prefetch(&fp->status_blk->u_status_block.status_block_index);
1691
288379f0 1692 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1693
a2fbb9ea
ET
1694 return IRQ_HANDLED;
1695}
1696
1697static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1698{
555f6c78 1699 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1700 u16 status = bnx2x_ack_int(bp);
34f80b04 1701 u16 mask;
a2fbb9ea 1702
34f80b04 1703 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1704 if (unlikely(status == 0)) {
1705 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1706 return IRQ_NONE;
1707 }
f5372251 1708 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1709
34f80b04 1710 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1711 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1712 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1713 return IRQ_HANDLED;
1714 }
1715
3196a88a
EG
1716#ifdef BNX2X_STOP_ON_ERROR
1717 if (unlikely(bp->panic))
1718 return IRQ_HANDLED;
1719#endif
1720
34f80b04
EG
1721 mask = 0x2 << bp->fp[0].sb_id;
1722 if (status & mask) {
a2fbb9ea
ET
1723 struct bnx2x_fastpath *fp = &bp->fp[0];
1724
1725 prefetch(fp->rx_cons_sb);
1726 prefetch(fp->tx_cons_sb);
1727 prefetch(&fp->status_blk->c_status_block.status_block_index);
1728 prefetch(&fp->status_blk->u_status_block.status_block_index);
1729
288379f0 1730 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1731
34f80b04 1732 status &= ~mask;
a2fbb9ea
ET
1733 }
1734
a2fbb9ea 1735
34f80b04 1736 if (unlikely(status & 0x1)) {
1cf167f2 1737 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1738
1739 status &= ~0x1;
1740 if (!status)
1741 return IRQ_HANDLED;
1742 }
1743
34f80b04
EG
1744 if (status)
1745 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1746 status);
a2fbb9ea 1747
c18487ee 1748 return IRQ_HANDLED;
a2fbb9ea
ET
1749}
1750
c18487ee 1751/* end of fast path */
a2fbb9ea 1752
bb2a0f7a 1753static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1754
c18487ee
YR
1755/* Link */
1756
1757/*
1758 * General service functions
1759 */
a2fbb9ea 1760
4a37fb66 1761static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1762{
1763 u32 lock_status;
1764 u32 resource_bit = (1 << resource);
4a37fb66
YG
1765 int func = BP_FUNC(bp);
1766 u32 hw_lock_control_reg;
c18487ee 1767 int cnt;
a2fbb9ea 1768
c18487ee
YR
1769 /* Validating that the resource is within range */
1770 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1771 DP(NETIF_MSG_HW,
1772 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1773 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1774 return -EINVAL;
1775 }
a2fbb9ea 1776
4a37fb66
YG
1777 if (func <= 5) {
1778 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1779 } else {
1780 hw_lock_control_reg =
1781 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1782 }
1783
c18487ee 1784 /* Validating that the resource is not already taken */
4a37fb66 1785 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1786 if (lock_status & resource_bit) {
1787 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1788 lock_status, resource_bit);
1789 return -EEXIST;
1790 }
a2fbb9ea 1791
46230476
EG
1792 /* Try for 5 second every 5ms */
1793 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1794 /* Try to acquire the lock */
4a37fb66
YG
1795 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1796 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1797 if (lock_status & resource_bit)
1798 return 0;
a2fbb9ea 1799
c18487ee 1800 msleep(5);
a2fbb9ea 1801 }
c18487ee
YR
1802 DP(NETIF_MSG_HW, "Timeout\n");
1803 return -EAGAIN;
1804}
a2fbb9ea 1805
4a37fb66 1806static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1807{
1808 u32 lock_status;
1809 u32 resource_bit = (1 << resource);
4a37fb66
YG
1810 int func = BP_FUNC(bp);
1811 u32 hw_lock_control_reg;
a2fbb9ea 1812
c18487ee
YR
1813 /* Validating that the resource is within range */
1814 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1815 DP(NETIF_MSG_HW,
1816 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1817 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1818 return -EINVAL;
1819 }
1820
4a37fb66
YG
1821 if (func <= 5) {
1822 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1823 } else {
1824 hw_lock_control_reg =
1825 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1826 }
1827
c18487ee 1828 /* Validating that the resource is currently taken */
4a37fb66 1829 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1830 if (!(lock_status & resource_bit)) {
1831 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1832 lock_status, resource_bit);
1833 return -EFAULT;
a2fbb9ea
ET
1834 }
1835
4a37fb66 1836 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1837 return 0;
1838}
1839
1840/* HW Lock for shared dual port PHYs */
4a37fb66 1841static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1842{
34f80b04 1843 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1844
46c6a674
EG
1845 if (bp->port.need_hw_lock)
1846 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1847}
a2fbb9ea 1848
4a37fb66 1849static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1850{
46c6a674
EG
1851 if (bp->port.need_hw_lock)
1852 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1853
34f80b04 1854 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1855}
a2fbb9ea 1856
4acac6a5
EG
1857int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1858{
1859 /* The GPIO should be swapped if swap register is set and active */
1860 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1861 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1862 int gpio_shift = gpio_num +
1863 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1864 u32 gpio_mask = (1 << gpio_shift);
1865 u32 gpio_reg;
1866 int value;
1867
1868 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1869 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1870 return -EINVAL;
1871 }
1872
1873 /* read GPIO value */
1874 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1875
1876 /* get the requested pin value */
1877 if ((gpio_reg & gpio_mask) == gpio_mask)
1878 value = 1;
1879 else
1880 value = 0;
1881
1882 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1883
1884 return value;
1885}
1886
17de50b7 1887int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1888{
1889 /* The GPIO should be swapped if swap register is set and active */
1890 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1891 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1892 int gpio_shift = gpio_num +
1893 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1894 u32 gpio_mask = (1 << gpio_shift);
1895 u32 gpio_reg;
a2fbb9ea 1896
c18487ee
YR
1897 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1898 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1899 return -EINVAL;
1900 }
a2fbb9ea 1901
4a37fb66 1902 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1903 /* read GPIO and mask except the float bits */
1904 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1905
c18487ee
YR
1906 switch (mode) {
1907 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1908 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1909 gpio_num, gpio_shift);
1910 /* clear FLOAT and set CLR */
1911 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1912 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1913 break;
a2fbb9ea 1914
c18487ee
YR
1915 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1916 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1917 gpio_num, gpio_shift);
1918 /* clear FLOAT and set SET */
1919 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1920 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1921 break;
a2fbb9ea 1922
17de50b7 1923 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1924 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1925 gpio_num, gpio_shift);
1926 /* set FLOAT */
1927 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1928 break;
a2fbb9ea 1929
c18487ee
YR
1930 default:
1931 break;
a2fbb9ea
ET
1932 }
1933
c18487ee 1934 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1935 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1936
c18487ee 1937 return 0;
a2fbb9ea
ET
1938}
1939
4acac6a5
EG
1940int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1941{
1942 /* The GPIO should be swapped if swap register is set and active */
1943 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1944 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1945 int gpio_shift = gpio_num +
1946 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1947 u32 gpio_mask = (1 << gpio_shift);
1948 u32 gpio_reg;
1949
1950 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1951 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1952 return -EINVAL;
1953 }
1954
1955 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1956 /* read GPIO int */
1957 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1958
1959 switch (mode) {
1960 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1961 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1962 "output low\n", gpio_num, gpio_shift);
1963 /* clear SET and set CLR */
1964 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1965 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1966 break;
1967
1968 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1969 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1970 "output high\n", gpio_num, gpio_shift);
1971 /* clear CLR and set SET */
1972 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1973 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1974 break;
1975
1976 default:
1977 break;
1978 }
1979
1980 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1981 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1982
1983 return 0;
1984}
1985
c18487ee 1986static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1987{
c18487ee
YR
1988 u32 spio_mask = (1 << spio_num);
1989 u32 spio_reg;
a2fbb9ea 1990
c18487ee
YR
1991 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1992 (spio_num > MISC_REGISTERS_SPIO_7)) {
1993 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1994 return -EINVAL;
a2fbb9ea
ET
1995 }
1996
4a37fb66 1997 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1998 /* read SPIO and mask except the float bits */
1999 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2000
c18487ee 2001 switch (mode) {
6378c025 2002 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2003 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2004 /* clear FLOAT and set CLR */
2005 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2006 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2007 break;
a2fbb9ea 2008
6378c025 2009 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2010 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2011 /* clear FLOAT and set SET */
2012 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2013 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2014 break;
a2fbb9ea 2015
c18487ee
YR
2016 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2017 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2018 /* set FLOAT */
2019 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2020 break;
a2fbb9ea 2021
c18487ee
YR
2022 default:
2023 break;
a2fbb9ea
ET
2024 }
2025
c18487ee 2026 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2027 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2028
a2fbb9ea
ET
2029 return 0;
2030}
2031
c18487ee 2032static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2033{
ad33ea3a
EG
2034 switch (bp->link_vars.ieee_fc &
2035 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2036 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2037 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2038 ADVERTISED_Pause);
2039 break;
356e2385 2040
c18487ee 2041 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2042 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2043 ADVERTISED_Pause);
2044 break;
356e2385 2045
c18487ee 2046 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2047 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2048 break;
356e2385 2049
c18487ee 2050 default:
34f80b04 2051 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2052 ADVERTISED_Pause);
2053 break;
2054 }
2055}
f1410647 2056
c18487ee
YR
2057static void bnx2x_link_report(struct bnx2x *bp)
2058{
2059 if (bp->link_vars.link_up) {
2060 if (bp->state == BNX2X_STATE_OPEN)
2061 netif_carrier_on(bp->dev);
2062 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2063
c18487ee 2064 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2065
c18487ee
YR
2066 if (bp->link_vars.duplex == DUPLEX_FULL)
2067 printk("full duplex");
2068 else
2069 printk("half duplex");
f1410647 2070
c0700f90
DM
2071 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2072 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2073 printk(", receive ");
356e2385
EG
2074 if (bp->link_vars.flow_ctrl &
2075 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2076 printk("& transmit ");
2077 } else {
2078 printk(", transmit ");
2079 }
2080 printk("flow control ON");
2081 }
2082 printk("\n");
f1410647 2083
c18487ee
YR
2084 } else { /* link_down */
2085 netif_carrier_off(bp->dev);
2086 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2087 }
c18487ee
YR
2088}
2089
b5bf9068 2090static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2091{
19680c48
EG
2092 if (!BP_NOMCP(bp)) {
2093 u8 rc;
a2fbb9ea 2094
19680c48 2095 /* Initialize link parameters structure variables */
8c99e7b0
YR
2096 /* It is recommended to turn off RX FC for jumbo frames
2097 for better performance */
2098 if (IS_E1HMF(bp))
c0700f90 2099 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2100 else if (bp->dev->mtu > 5000)
c0700f90 2101 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2102 else
c0700f90 2103 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2104
4a37fb66 2105 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2106
2107 if (load_mode == LOAD_DIAG)
2108 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2109
19680c48 2110 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2111
4a37fb66 2112 bnx2x_release_phy_lock(bp);
a2fbb9ea 2113
3c96c68b
EG
2114 bnx2x_calc_fc_adv(bp);
2115
b5bf9068
EG
2116 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2117 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2118 bnx2x_link_report(bp);
b5bf9068 2119 }
34f80b04 2120
19680c48
EG
2121 return rc;
2122 }
f5372251 2123 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2124 return -EINVAL;
a2fbb9ea
ET
2125}
2126
c18487ee 2127static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2128{
19680c48 2129 if (!BP_NOMCP(bp)) {
4a37fb66 2130 bnx2x_acquire_phy_lock(bp);
19680c48 2131 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2132 bnx2x_release_phy_lock(bp);
a2fbb9ea 2133
19680c48
EG
2134 bnx2x_calc_fc_adv(bp);
2135 } else
f5372251 2136 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2137}
a2fbb9ea 2138
c18487ee
YR
2139static void bnx2x__link_reset(struct bnx2x *bp)
2140{
19680c48 2141 if (!BP_NOMCP(bp)) {
4a37fb66 2142 bnx2x_acquire_phy_lock(bp);
589abe3a 2143 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2144 bnx2x_release_phy_lock(bp);
19680c48 2145 } else
f5372251 2146 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2147}
a2fbb9ea 2148
c18487ee
YR
2149static u8 bnx2x_link_test(struct bnx2x *bp)
2150{
2151 u8 rc;
a2fbb9ea 2152
4a37fb66 2153 bnx2x_acquire_phy_lock(bp);
c18487ee 2154 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2155 bnx2x_release_phy_lock(bp);
a2fbb9ea 2156
c18487ee
YR
2157 return rc;
2158}
a2fbb9ea 2159
8a1c38d1 2160static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2161{
8a1c38d1
EG
2162 u32 r_param = bp->link_vars.line_speed / 8;
2163 u32 fair_periodic_timeout_usec;
2164 u32 t_fair;
34f80b04 2165
8a1c38d1
EG
2166 memset(&(bp->cmng.rs_vars), 0,
2167 sizeof(struct rate_shaping_vars_per_port));
2168 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2169
8a1c38d1
EG
2170 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2171 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2172
8a1c38d1
EG
2173 /* this is the threshold below which no timer arming will occur
2174 1.25 coefficient is for the threshold to be a little bigger
2175 than the real time, to compensate for timer in-accuracy */
2176 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2177 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2178
8a1c38d1
EG
2179 /* resolution of fairness timer */
2180 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2181 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2182 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2183
8a1c38d1
EG
2184 /* this is the threshold below which we won't arm the timer anymore */
2185 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2186
8a1c38d1
EG
2187 /* we multiply by 1e3/8 to get bytes/msec.
2188 We don't want the credits to pass a credit
2189 of the t_fair*FAIR_MEM (algorithm resolution) */
2190 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2191 /* since each tick is 4 usec */
2192 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2193}
2194
8a1c38d1 2195static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2196{
2197 struct rate_shaping_vars_per_vn m_rs_vn;
2198 struct fairness_vars_per_vn m_fair_vn;
2199 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2200 u16 vn_min_rate, vn_max_rate;
2201 int i;
2202
2203 /* If function is hidden - set min and max to zeroes */
2204 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2205 vn_min_rate = 0;
2206 vn_max_rate = 0;
2207
2208 } else {
2209 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2210 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2211 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2212 if current min rate is zero - set it to 1.
33471629 2213 This is a requirement of the algorithm. */
8a1c38d1 2214 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2215 vn_min_rate = DEF_MIN_RATE;
2216 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2217 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2218 }
2219
8a1c38d1
EG
2220 DP(NETIF_MSG_IFUP,
2221 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2222 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2223
2224 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2225 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2226
2227 /* global vn counter - maximal Mbps for this vn */
2228 m_rs_vn.vn_counter.rate = vn_max_rate;
2229
2230 /* quota - number of bytes transmitted in this period */
2231 m_rs_vn.vn_counter.quota =
2232 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2233
8a1c38d1 2234 if (bp->vn_weight_sum) {
34f80b04
EG
2235 /* credit for each period of the fairness algorithm:
2236 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2237 vn_weight_sum should not be larger than 10000, thus
2238 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2239 than zero */
34f80b04 2240 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2241 max((u32)(vn_min_rate * (T_FAIR_COEF /
2242 (8 * bp->vn_weight_sum))),
2243 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2244 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2245 m_fair_vn.vn_credit_delta);
2246 }
2247
34f80b04
EG
2248 /* Store it to internal memory */
2249 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2250 REG_WR(bp, BAR_XSTRORM_INTMEM +
2251 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2252 ((u32 *)(&m_rs_vn))[i]);
2253
2254 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2255 REG_WR(bp, BAR_XSTRORM_INTMEM +
2256 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2257 ((u32 *)(&m_fair_vn))[i]);
2258}
2259
8a1c38d1 2260
c18487ee
YR
2261/* This function is called upon link interrupt */
2262static void bnx2x_link_attn(struct bnx2x *bp)
2263{
bb2a0f7a
YG
2264 /* Make sure that we are synced with the current statistics */
2265 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2266
c18487ee 2267 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2268
bb2a0f7a
YG
2269 if (bp->link_vars.link_up) {
2270
1c06328c
EG
2271 /* dropless flow control */
2272 if (CHIP_IS_E1H(bp)) {
2273 int port = BP_PORT(bp);
2274 u32 pause_enabled = 0;
2275
2276 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2277 pause_enabled = 1;
2278
2279 REG_WR(bp, BAR_USTRORM_INTMEM +
2280 USTORM_PAUSE_ENABLED_OFFSET(port),
2281 pause_enabled);
2282 }
2283
bb2a0f7a
YG
2284 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2285 struct host_port_stats *pstats;
2286
2287 pstats = bnx2x_sp(bp, port_stats);
2288 /* reset old bmac stats */
2289 memset(&(pstats->mac_stx[0]), 0,
2290 sizeof(struct mac_stx));
2291 }
2292 if ((bp->state == BNX2X_STATE_OPEN) ||
2293 (bp->state == BNX2X_STATE_DISABLED))
2294 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2295 }
2296
c18487ee
YR
2297 /* indicate link status */
2298 bnx2x_link_report(bp);
34f80b04
EG
2299
2300 if (IS_E1HMF(bp)) {
8a1c38d1 2301 int port = BP_PORT(bp);
34f80b04 2302 int func;
8a1c38d1 2303 int vn;
34f80b04
EG
2304
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306 if (vn == BP_E1HVN(bp))
2307 continue;
2308
8a1c38d1 2309 func = ((vn << 1) | port);
34f80b04
EG
2310
2311 /* Set the attention towards other drivers
2312 on the same port */
2313 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2314 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2315 }
34f80b04 2316
8a1c38d1
EG
2317 if (bp->link_vars.link_up) {
2318 int i;
2319
2320 /* Init rate shaping and fairness contexts */
2321 bnx2x_init_port_minmax(bp);
34f80b04 2322
34f80b04 2323 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2324 bnx2x_init_vn_minmax(bp, 2*vn + port);
2325
2326 /* Store it to internal memory */
2327 for (i = 0;
2328 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2329 REG_WR(bp, BAR_XSTRORM_INTMEM +
2330 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2331 ((u32 *)(&bp->cmng))[i]);
2332 }
34f80b04 2333 }
c18487ee 2334}
a2fbb9ea 2335
c18487ee
YR
2336static void bnx2x__link_status_update(struct bnx2x *bp)
2337{
2338 if (bp->state != BNX2X_STATE_OPEN)
2339 return;
a2fbb9ea 2340
c18487ee 2341 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2342
bb2a0f7a
YG
2343 if (bp->link_vars.link_up)
2344 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2345 else
2346 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2347
c18487ee
YR
2348 /* indicate link status */
2349 bnx2x_link_report(bp);
a2fbb9ea 2350}
a2fbb9ea 2351
34f80b04
EG
2352static void bnx2x_pmf_update(struct bnx2x *bp)
2353{
2354 int port = BP_PORT(bp);
2355 u32 val;
2356
2357 bp->port.pmf = 1;
2358 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2359
2360 /* enable nig attention */
2361 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2362 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2363 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2364
2365 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2366}
2367
c18487ee 2368/* end of Link */
a2fbb9ea
ET
2369
2370/* slow path */
2371
2372/*
2373 * General service functions
2374 */
2375
2376/* the slow path queue is odd since completions arrive on the fastpath ring */
2377static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2378 u32 data_hi, u32 data_lo, int common)
2379{
34f80b04 2380 int func = BP_FUNC(bp);
a2fbb9ea 2381
34f80b04
EG
2382 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2383 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2384 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2385 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2386 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2387
2388#ifdef BNX2X_STOP_ON_ERROR
2389 if (unlikely(bp->panic))
2390 return -EIO;
2391#endif
2392
34f80b04 2393 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2394
2395 if (!bp->spq_left) {
2396 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2397 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2398 bnx2x_panic();
2399 return -EBUSY;
2400 }
f1410647 2401
a2fbb9ea
ET
2402 /* CID needs port number to be encoded int it */
2403 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2404 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2405 HW_CID(bp, cid)));
2406 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2407 if (common)
2408 bp->spq_prod_bd->hdr.type |=
2409 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2410
2411 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2412 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2413
2414 bp->spq_left--;
2415
2416 if (bp->spq_prod_bd == bp->spq_last_bd) {
2417 bp->spq_prod_bd = bp->spq;
2418 bp->spq_prod_idx = 0;
2419 DP(NETIF_MSG_TIMER, "end of spq\n");
2420
2421 } else {
2422 bp->spq_prod_bd++;
2423 bp->spq_prod_idx++;
2424 }
2425
34f80b04 2426 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2427 bp->spq_prod_idx);
2428
34f80b04 2429 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2430 return 0;
2431}
2432
2433/* acquire split MCP access lock register */
4a37fb66 2434static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2435{
a2fbb9ea 2436 u32 i, j, val;
34f80b04 2437 int rc = 0;
a2fbb9ea
ET
2438
2439 might_sleep();
2440 i = 100;
2441 for (j = 0; j < i*10; j++) {
2442 val = (1UL << 31);
2443 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2444 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2445 if (val & (1L << 31))
2446 break;
2447
2448 msleep(5);
2449 }
a2fbb9ea 2450 if (!(val & (1L << 31))) {
19680c48 2451 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2452 rc = -EBUSY;
2453 }
2454
2455 return rc;
2456}
2457
4a37fb66
YG
2458/* release split MCP access lock register */
2459static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2460{
2461 u32 val = 0;
2462
2463 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2464}
2465
2466static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2467{
2468 struct host_def_status_block *def_sb = bp->def_status_blk;
2469 u16 rc = 0;
2470
2471 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2472 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2473 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2474 rc |= 1;
2475 }
2476 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2477 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2478 rc |= 2;
2479 }
2480 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2481 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2482 rc |= 4;
2483 }
2484 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2485 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2486 rc |= 8;
2487 }
2488 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2489 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2490 rc |= 16;
2491 }
2492 return rc;
2493}
2494
2495/*
2496 * slow path service functions
2497 */
2498
2499static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2500{
34f80b04 2501 int port = BP_PORT(bp);
5c862848
EG
2502 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2503 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2504 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2505 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2506 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2507 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2508 u32 aeu_mask;
87942b46 2509 u32 nig_mask = 0;
a2fbb9ea 2510
a2fbb9ea
ET
2511 if (bp->attn_state & asserted)
2512 BNX2X_ERR("IGU ERROR\n");
2513
3fcaf2e5
EG
2514 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2515 aeu_mask = REG_RD(bp, aeu_addr);
2516
a2fbb9ea 2517 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2518 aeu_mask, asserted);
2519 aeu_mask &= ~(asserted & 0xff);
2520 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2521
3fcaf2e5
EG
2522 REG_WR(bp, aeu_addr, aeu_mask);
2523 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2524
3fcaf2e5 2525 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2526 bp->attn_state |= asserted;
3fcaf2e5 2527 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2528
2529 if (asserted & ATTN_HARD_WIRED_MASK) {
2530 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2531
a5e9a7cf
EG
2532 bnx2x_acquire_phy_lock(bp);
2533
877e9aa4 2534 /* save nig interrupt mask */
87942b46 2535 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2536 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2537
c18487ee 2538 bnx2x_link_attn(bp);
a2fbb9ea
ET
2539
2540 /* handle unicore attn? */
2541 }
2542 if (asserted & ATTN_SW_TIMER_4_FUNC)
2543 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2544
2545 if (asserted & GPIO_2_FUNC)
2546 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2547
2548 if (asserted & GPIO_3_FUNC)
2549 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2550
2551 if (asserted & GPIO_4_FUNC)
2552 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2553
2554 if (port == 0) {
2555 if (asserted & ATTN_GENERAL_ATTN_1) {
2556 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2557 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2558 }
2559 if (asserted & ATTN_GENERAL_ATTN_2) {
2560 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2561 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2562 }
2563 if (asserted & ATTN_GENERAL_ATTN_3) {
2564 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2565 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2566 }
2567 } else {
2568 if (asserted & ATTN_GENERAL_ATTN_4) {
2569 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2570 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2571 }
2572 if (asserted & ATTN_GENERAL_ATTN_5) {
2573 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2574 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2575 }
2576 if (asserted & ATTN_GENERAL_ATTN_6) {
2577 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2578 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2579 }
2580 }
2581
2582 } /* if hardwired */
2583
5c862848
EG
2584 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2585 asserted, hc_addr);
2586 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2587
2588 /* now set back the mask */
a5e9a7cf 2589 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2590 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2591 bnx2x_release_phy_lock(bp);
2592 }
a2fbb9ea
ET
2593}
2594
877e9aa4 2595static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2596{
34f80b04 2597 int port = BP_PORT(bp);
877e9aa4
ET
2598 int reg_offset;
2599 u32 val;
2600
34f80b04
EG
2601 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2602 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2603
34f80b04 2604 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2605
2606 val = REG_RD(bp, reg_offset);
2607 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2608 REG_WR(bp, reg_offset, val);
2609
2610 BNX2X_ERR("SPIO5 hw attention\n");
2611
35b19ba5
EG
2612 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2613 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2614 /* Fan failure attention */
2615
17de50b7 2616 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2617 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2618 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2619 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2620 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2621 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2622 /* mark the failure */
c18487ee 2623 bp->link_params.ext_phy_config &=
877e9aa4 2624 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2625 bp->link_params.ext_phy_config |=
877e9aa4
ET
2626 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627 SHMEM_WR(bp,
2628 dev_info.port_hw_config[port].
2629 external_phy_config,
c18487ee 2630 bp->link_params.ext_phy_config);
877e9aa4
ET
2631 /* log the failure */
2632 printk(KERN_ERR PFX "Fan Failure on Network"
2633 " Controller %s has caused the driver to"
2634 " shutdown the card to prevent permanent"
2635 " damage. Please contact Dell Support for"
2636 " assistance\n", bp->dev->name);
2637 break;
2638
2639 default:
2640 break;
2641 }
2642 }
34f80b04 2643
589abe3a
EG
2644 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2645 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2646 bnx2x_acquire_phy_lock(bp);
2647 bnx2x_handle_module_detect_int(&bp->link_params);
2648 bnx2x_release_phy_lock(bp);
2649 }
2650
34f80b04
EG
2651 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2652
2653 val = REG_RD(bp, reg_offset);
2654 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2655 REG_WR(bp, reg_offset, val);
2656
2657 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2658 (attn & HW_INTERRUT_ASSERT_SET_0));
2659 bnx2x_panic();
2660 }
877e9aa4
ET
2661}
2662
2663static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2664{
2665 u32 val;
2666
0626b899 2667 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2668
2669 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2670 BNX2X_ERR("DB hw attention 0x%x\n", val);
2671 /* DORQ discard attention */
2672 if (val & 0x2)
2673 BNX2X_ERR("FATAL error from DORQ\n");
2674 }
34f80b04
EG
2675
2676 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2677
2678 int port = BP_PORT(bp);
2679 int reg_offset;
2680
2681 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2682 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2683
2684 val = REG_RD(bp, reg_offset);
2685 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2686 REG_WR(bp, reg_offset, val);
2687
2688 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2689 (attn & HW_INTERRUT_ASSERT_SET_1));
2690 bnx2x_panic();
2691 }
877e9aa4
ET
2692}
2693
2694static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2695{
2696 u32 val;
2697
2698 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2699
2700 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2701 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2702 /* CFC error attention */
2703 if (val & 0x2)
2704 BNX2X_ERR("FATAL error from CFC\n");
2705 }
2706
2707 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2708
2709 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2710 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2711 /* RQ_USDMDP_FIFO_OVERFLOW */
2712 if (val & 0x18000)
2713 BNX2X_ERR("FATAL error from PXP\n");
2714 }
34f80b04
EG
2715
2716 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2717
2718 int port = BP_PORT(bp);
2719 int reg_offset;
2720
2721 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2722 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2723
2724 val = REG_RD(bp, reg_offset);
2725 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2726 REG_WR(bp, reg_offset, val);
2727
2728 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2729 (attn & HW_INTERRUT_ASSERT_SET_2));
2730 bnx2x_panic();
2731 }
877e9aa4
ET
2732}
2733
2734static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2735{
34f80b04
EG
2736 u32 val;
2737
877e9aa4
ET
2738 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2739
34f80b04
EG
2740 if (attn & BNX2X_PMF_LINK_ASSERT) {
2741 int func = BP_FUNC(bp);
2742
2743 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2744 bnx2x__link_status_update(bp);
2745 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2746 DRV_STATUS_PMF)
2747 bnx2x_pmf_update(bp);
2748
2749 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2750
2751 BNX2X_ERR("MC assert!\n");
2752 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2753 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2754 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2755 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2756 bnx2x_panic();
2757
2758 } else if (attn & BNX2X_MCP_ASSERT) {
2759
2760 BNX2X_ERR("MCP assert!\n");
2761 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2762 bnx2x_fw_dump(bp);
877e9aa4
ET
2763
2764 } else
2765 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2766 }
2767
2768 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2769 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2770 if (attn & BNX2X_GRC_TIMEOUT) {
2771 val = CHIP_IS_E1H(bp) ?
2772 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2773 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2774 }
2775 if (attn & BNX2X_GRC_RSV) {
2776 val = CHIP_IS_E1H(bp) ?
2777 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2778 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2779 }
877e9aa4 2780 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2781 }
2782}
2783
2784static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2785{
a2fbb9ea
ET
2786 struct attn_route attn;
2787 struct attn_route group_mask;
34f80b04 2788 int port = BP_PORT(bp);
877e9aa4 2789 int index;
a2fbb9ea
ET
2790 u32 reg_addr;
2791 u32 val;
3fcaf2e5 2792 u32 aeu_mask;
a2fbb9ea
ET
2793
2794 /* need to take HW lock because MCP or other port might also
2795 try to handle this event */
4a37fb66 2796 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2797
2798 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2799 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2800 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2801 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2802 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2803 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2804
2805 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2806 if (deasserted & (1 << index)) {
2807 group_mask = bp->attn_group[index];
2808
34f80b04
EG
2809 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2810 index, group_mask.sig[0], group_mask.sig[1],
2811 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2812
877e9aa4
ET
2813 bnx2x_attn_int_deasserted3(bp,
2814 attn.sig[3] & group_mask.sig[3]);
2815 bnx2x_attn_int_deasserted1(bp,
2816 attn.sig[1] & group_mask.sig[1]);
2817 bnx2x_attn_int_deasserted2(bp,
2818 attn.sig[2] & group_mask.sig[2]);
2819 bnx2x_attn_int_deasserted0(bp,
2820 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2821
a2fbb9ea
ET
2822 if ((attn.sig[0] & group_mask.sig[0] &
2823 HW_PRTY_ASSERT_SET_0) ||
2824 (attn.sig[1] & group_mask.sig[1] &
2825 HW_PRTY_ASSERT_SET_1) ||
2826 (attn.sig[2] & group_mask.sig[2] &
2827 HW_PRTY_ASSERT_SET_2))
6378c025 2828 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2829 }
2830 }
2831
4a37fb66 2832 bnx2x_release_alr(bp);
a2fbb9ea 2833
5c862848 2834 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2835
2836 val = ~deasserted;
3fcaf2e5
EG
2837 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2838 val, reg_addr);
5c862848 2839 REG_WR(bp, reg_addr, val);
a2fbb9ea 2840
a2fbb9ea 2841 if (~bp->attn_state & deasserted)
3fcaf2e5 2842 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2843
2844 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2845 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2846
3fcaf2e5
EG
2847 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2848 aeu_mask = REG_RD(bp, reg_addr);
2849
2850 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2851 aeu_mask, deasserted);
2852 aeu_mask |= (deasserted & 0xff);
2853 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2854
3fcaf2e5
EG
2855 REG_WR(bp, reg_addr, aeu_mask);
2856 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2857
2858 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2859 bp->attn_state &= ~deasserted;
2860 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2861}
2862
2863static void bnx2x_attn_int(struct bnx2x *bp)
2864{
2865 /* read local copy of bits */
68d59484
EG
2866 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2867 attn_bits);
2868 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2869 attn_bits_ack);
a2fbb9ea
ET
2870 u32 attn_state = bp->attn_state;
2871
2872 /* look for changed bits */
2873 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2874 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2875
2876 DP(NETIF_MSG_HW,
2877 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2878 attn_bits, attn_ack, asserted, deasserted);
2879
2880 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2881 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2882
2883 /* handle bits that were raised */
2884 if (asserted)
2885 bnx2x_attn_int_asserted(bp, asserted);
2886
2887 if (deasserted)
2888 bnx2x_attn_int_deasserted(bp, deasserted);
2889}
2890
2891static void bnx2x_sp_task(struct work_struct *work)
2892{
1cf167f2 2893 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2894 u16 status;
2895
34f80b04 2896
a2fbb9ea
ET
2897 /* Return here if interrupt is disabled */
2898 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2899 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2900 return;
2901 }
2902
2903 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2904/* if (status == 0) */
2905/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2906
3196a88a 2907 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2908
877e9aa4
ET
2909 /* HW attentions */
2910 if (status & 0x1)
a2fbb9ea 2911 bnx2x_attn_int(bp);
a2fbb9ea 2912
68d59484 2913 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2914 IGU_INT_NOP, 1);
2915 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2916 IGU_INT_NOP, 1);
2917 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2918 IGU_INT_NOP, 1);
2919 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2920 IGU_INT_NOP, 1);
2921 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2922 IGU_INT_ENABLE, 1);
877e9aa4 2923
a2fbb9ea
ET
2924}
2925
2926static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2927{
2928 struct net_device *dev = dev_instance;
2929 struct bnx2x *bp = netdev_priv(dev);
2930
2931 /* Return here if interrupt is disabled */
2932 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2933 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2934 return IRQ_HANDLED;
2935 }
2936
8d9c5f34 2937 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2938
2939#ifdef BNX2X_STOP_ON_ERROR
2940 if (unlikely(bp->panic))
2941 return IRQ_HANDLED;
2942#endif
2943
1cf167f2 2944 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2945
2946 return IRQ_HANDLED;
2947}
2948
2949/* end of slow path */
2950
2951/* Statistics */
2952
2953/****************************************************************************
2954* Macros
2955****************************************************************************/
2956
a2fbb9ea
ET
2957/* sum[hi:lo] += add[hi:lo] */
2958#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2959 do { \
2960 s_lo += a_lo; \
f5ba6772 2961 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2962 } while (0)
2963
2964/* difference = minuend - subtrahend */
2965#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2966 do { \
bb2a0f7a
YG
2967 if (m_lo < s_lo) { \
2968 /* underflow */ \
a2fbb9ea 2969 d_hi = m_hi - s_hi; \
bb2a0f7a 2970 if (d_hi > 0) { \
6378c025 2971 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2972 d_hi--; \
2973 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2974 } else { \
6378c025 2975 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2976 d_hi = 0; \
2977 d_lo = 0; \
2978 } \
bb2a0f7a
YG
2979 } else { \
2980 /* m_lo >= s_lo */ \
a2fbb9ea 2981 if (m_hi < s_hi) { \
bb2a0f7a
YG
2982 d_hi = 0; \
2983 d_lo = 0; \
2984 } else { \
6378c025 2985 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2986 d_hi = m_hi - s_hi; \
2987 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2988 } \
2989 } \
2990 } while (0)
2991
bb2a0f7a 2992#define UPDATE_STAT64(s, t) \
a2fbb9ea 2993 do { \
bb2a0f7a
YG
2994 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2995 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2996 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2997 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2998 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2999 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3000 } while (0)
3001
bb2a0f7a 3002#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3003 do { \
bb2a0f7a
YG
3004 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3005 diff.lo, new->s##_lo, old->s##_lo); \
3006 ADD_64(estats->t##_hi, diff.hi, \
3007 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3008 } while (0)
3009
3010/* sum[hi:lo] += add */
3011#define ADD_EXTEND_64(s_hi, s_lo, a) \
3012 do { \
3013 s_lo += a; \
3014 s_hi += (s_lo < a) ? 1 : 0; \
3015 } while (0)
3016
bb2a0f7a 3017#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3018 do { \
bb2a0f7a
YG
3019 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3020 pstats->mac_stx[1].s##_lo, \
3021 new->s); \
a2fbb9ea
ET
3022 } while (0)
3023
bb2a0f7a 3024#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3025 do { \
4781bfad
EG
3026 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3027 old_tclient->s = tclient->s; \
de832a55
EG
3028 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3029 } while (0)
3030
3031#define UPDATE_EXTEND_USTAT(s, t) \
3032 do { \
3033 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3034 old_uclient->s = uclient->s; \
3035 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3036 } while (0)
3037
3038#define UPDATE_EXTEND_XSTAT(s, t) \
3039 do { \
4781bfad
EG
3040 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3041 old_xclient->s = xclient->s; \
de832a55
EG
3042 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3043 } while (0)
3044
3045/* minuend -= subtrahend */
3046#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3047 do { \
3048 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3049 } while (0)
3050
3051/* minuend[hi:lo] -= subtrahend */
3052#define SUB_EXTEND_64(m_hi, m_lo, s) \
3053 do { \
3054 SUB_64(m_hi, 0, m_lo, s); \
3055 } while (0)
3056
3057#define SUB_EXTEND_USTAT(s, t) \
3058 do { \
3059 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3060 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3061 } while (0)
3062
3063/*
3064 * General service functions
3065 */
3066
3067static inline long bnx2x_hilo(u32 *hiref)
3068{
3069 u32 lo = *(hiref + 1);
3070#if (BITS_PER_LONG == 64)
3071 u32 hi = *hiref;
3072
3073 return HILO_U64(hi, lo);
3074#else
3075 return lo;
3076#endif
3077}
3078
3079/*
3080 * Init service functions
3081 */
3082
bb2a0f7a
YG
3083static void bnx2x_storm_stats_post(struct bnx2x *bp)
3084{
3085 if (!bp->stats_pending) {
3086 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3087 int i, rc;
bb2a0f7a
YG
3088
3089 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3090 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3091 for_each_queue(bp, i)
3092 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3093
3094 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3095 ((u32 *)&ramrod_data)[1],
3096 ((u32 *)&ramrod_data)[0], 0);
3097 if (rc == 0) {
3098 /* stats ramrod has it's own slot on the spq */
3099 bp->spq_left++;
3100 bp->stats_pending = 1;
3101 }
3102 }
3103}
3104
3105static void bnx2x_stats_init(struct bnx2x *bp)
3106{
3107 int port = BP_PORT(bp);
de832a55 3108 int i;
bb2a0f7a 3109
de832a55 3110 bp->stats_pending = 0;
bb2a0f7a
YG
3111 bp->executer_idx = 0;
3112 bp->stats_counter = 0;
3113
3114 /* port stats */
3115 if (!BP_NOMCP(bp))
3116 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3117 else
3118 bp->port.port_stx = 0;
3119 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3120
3121 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3122 bp->port.old_nig_stats.brb_discard =
3123 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3124 bp->port.old_nig_stats.brb_truncate =
3125 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3126 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3127 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3128 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3129 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3130
3131 /* function stats */
de832a55
EG
3132 for_each_queue(bp, i) {
3133 struct bnx2x_fastpath *fp = &bp->fp[i];
3134
3135 memset(&fp->old_tclient, 0,
3136 sizeof(struct tstorm_per_client_stats));
3137 memset(&fp->old_uclient, 0,
3138 sizeof(struct ustorm_per_client_stats));
3139 memset(&fp->old_xclient, 0,
3140 sizeof(struct xstorm_per_client_stats));
3141 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3142 }
3143
bb2a0f7a 3144 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3145 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3146
3147 bp->stats_state = STATS_STATE_DISABLED;
3148 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3149 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3150}
3151
3152static void bnx2x_hw_stats_post(struct bnx2x *bp)
3153{
3154 struct dmae_command *dmae = &bp->stats_dmae;
3155 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3156
3157 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3158 if (CHIP_REV_IS_SLOW(bp))
3159 return;
bb2a0f7a
YG
3160
3161 /* loader */
3162 if (bp->executer_idx) {
3163 int loader_idx = PMF_DMAE_C(bp);
3164
3165 memset(dmae, 0, sizeof(struct dmae_command));
3166
3167 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3168 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3169 DMAE_CMD_DST_RESET |
3170#ifdef __BIG_ENDIAN
3171 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3172#else
3173 DMAE_CMD_ENDIANITY_DW_SWAP |
3174#endif
3175 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3176 DMAE_CMD_PORT_0) |
3177 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3178 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3179 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3180 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3181 sizeof(struct dmae_command) *
3182 (loader_idx + 1)) >> 2;
3183 dmae->dst_addr_hi = 0;
3184 dmae->len = sizeof(struct dmae_command) >> 2;
3185 if (CHIP_IS_E1(bp))
3186 dmae->len--;
3187 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3188 dmae->comp_addr_hi = 0;
3189 dmae->comp_val = 1;
3190
3191 *stats_comp = 0;
3192 bnx2x_post_dmae(bp, dmae, loader_idx);
3193
3194 } else if (bp->func_stx) {
3195 *stats_comp = 0;
3196 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3197 }
3198}
3199
3200static int bnx2x_stats_comp(struct bnx2x *bp)
3201{
3202 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3203 int cnt = 10;
3204
3205 might_sleep();
3206 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3207 if (!cnt) {
3208 BNX2X_ERR("timeout waiting for stats finished\n");
3209 break;
3210 }
3211 cnt--;
12469401 3212 msleep(1);
bb2a0f7a
YG
3213 }
3214 return 1;
3215}
3216
3217/*
3218 * Statistics service functions
3219 */
3220
3221static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3222{
3223 struct dmae_command *dmae;
3224 u32 opcode;
3225 int loader_idx = PMF_DMAE_C(bp);
3226 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3227
3228 /* sanity */
3229 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3230 BNX2X_ERR("BUG!\n");
3231 return;
3232 }
3233
3234 bp->executer_idx = 0;
3235
3236 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3237 DMAE_CMD_C_ENABLE |
3238 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3239#ifdef __BIG_ENDIAN
3240 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3241#else
3242 DMAE_CMD_ENDIANITY_DW_SWAP |
3243#endif
3244 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3245 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3246
3247 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3248 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3249 dmae->src_addr_lo = bp->port.port_stx >> 2;
3250 dmae->src_addr_hi = 0;
3251 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3252 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3253 dmae->len = DMAE_LEN32_RD_MAX;
3254 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3255 dmae->comp_addr_hi = 0;
3256 dmae->comp_val = 1;
3257
3258 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3259 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3260 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3261 dmae->src_addr_hi = 0;
7a9b2557
VZ
3262 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3263 DMAE_LEN32_RD_MAX * 4);
3264 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3265 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3266 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3267 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3268 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3269 dmae->comp_val = DMAE_COMP_VAL;
3270
3271 *stats_comp = 0;
3272 bnx2x_hw_stats_post(bp);
3273 bnx2x_stats_comp(bp);
3274}
3275
3276static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3277{
3278 struct dmae_command *dmae;
34f80b04 3279 int port = BP_PORT(bp);
bb2a0f7a 3280 int vn = BP_E1HVN(bp);
a2fbb9ea 3281 u32 opcode;
bb2a0f7a 3282 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3283 u32 mac_addr;
bb2a0f7a
YG
3284 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3285
3286 /* sanity */
3287 if (!bp->link_vars.link_up || !bp->port.pmf) {
3288 BNX2X_ERR("BUG!\n");
3289 return;
3290 }
a2fbb9ea
ET
3291
3292 bp->executer_idx = 0;
bb2a0f7a
YG
3293
3294 /* MCP */
3295 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3296 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3298#ifdef __BIG_ENDIAN
bb2a0f7a 3299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3300#else
bb2a0f7a 3301 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3302#endif
bb2a0f7a
YG
3303 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3304 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3305
bb2a0f7a 3306 if (bp->port.port_stx) {
a2fbb9ea
ET
3307
3308 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3309 dmae->opcode = opcode;
bb2a0f7a
YG
3310 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3311 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3312 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3313 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3314 dmae->len = sizeof(struct host_port_stats) >> 2;
3315 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3316 dmae->comp_addr_hi = 0;
3317 dmae->comp_val = 1;
a2fbb9ea
ET
3318 }
3319
bb2a0f7a
YG
3320 if (bp->func_stx) {
3321
3322 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3323 dmae->opcode = opcode;
3324 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3325 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3326 dmae->dst_addr_lo = bp->func_stx >> 2;
3327 dmae->dst_addr_hi = 0;
3328 dmae->len = sizeof(struct host_func_stats) >> 2;
3329 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3330 dmae->comp_addr_hi = 0;
3331 dmae->comp_val = 1;
a2fbb9ea
ET
3332 }
3333
bb2a0f7a 3334 /* MAC */
a2fbb9ea
ET
3335 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3336 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3337 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3338#ifdef __BIG_ENDIAN
3339 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3340#else
3341 DMAE_CMD_ENDIANITY_DW_SWAP |
3342#endif
bb2a0f7a
YG
3343 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3344 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3345
c18487ee 3346 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3347
3348 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3349 NIG_REG_INGRESS_BMAC0_MEM);
3350
3351 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3352 BIGMAC_REGISTER_TX_STAT_GTBYT */
3353 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354 dmae->opcode = opcode;
3355 dmae->src_addr_lo = (mac_addr +
3356 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3357 dmae->src_addr_hi = 0;
3358 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3359 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3360 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3361 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3364 dmae->comp_val = 1;
3365
3366 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3367 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3368 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3369 dmae->opcode = opcode;
3370 dmae->src_addr_lo = (mac_addr +
3371 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3372 dmae->src_addr_hi = 0;
3373 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3374 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3375 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3376 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3377 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3378 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3379 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3380 dmae->comp_addr_hi = 0;
3381 dmae->comp_val = 1;
3382
c18487ee 3383 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3384
3385 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3386
3387 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3388 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3389 dmae->opcode = opcode;
3390 dmae->src_addr_lo = (mac_addr +
3391 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3392 dmae->src_addr_hi = 0;
3393 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3394 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3395 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3396 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3397 dmae->comp_addr_hi = 0;
3398 dmae->comp_val = 1;
3399
3400 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3401 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3402 dmae->opcode = opcode;
3403 dmae->src_addr_lo = (mac_addr +
3404 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3405 dmae->src_addr_hi = 0;
3406 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3407 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3408 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3409 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3410 dmae->len = 1;
3411 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3412 dmae->comp_addr_hi = 0;
3413 dmae->comp_val = 1;
3414
3415 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3416 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3417 dmae->opcode = opcode;
3418 dmae->src_addr_lo = (mac_addr +
3419 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3420 dmae->src_addr_hi = 0;
3421 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3422 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3423 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3424 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3425 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3426 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3427 dmae->comp_addr_hi = 0;
3428 dmae->comp_val = 1;
3429 }
3430
3431 /* NIG */
bb2a0f7a
YG
3432 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3433 dmae->opcode = opcode;
3434 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3435 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3436 dmae->src_addr_hi = 0;
3437 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3438 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3439 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3440 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3441 dmae->comp_addr_hi = 0;
3442 dmae->comp_val = 1;
3443
3444 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3445 dmae->opcode = opcode;
3446 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3447 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3448 dmae->src_addr_hi = 0;
3449 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3450 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3451 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3452 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3453 dmae->len = (2*sizeof(u32)) >> 2;
3454 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3455 dmae->comp_addr_hi = 0;
3456 dmae->comp_val = 1;
3457
a2fbb9ea
ET
3458 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3459 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3460 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3461 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3462#ifdef __BIG_ENDIAN
3463 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3464#else
3465 DMAE_CMD_ENDIANITY_DW_SWAP |
3466#endif
bb2a0f7a
YG
3467 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3468 (vn << DMAE_CMD_E1HVN_SHIFT));
3469 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3470 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3471 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3472 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3473 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3474 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3475 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3476 dmae->len = (2*sizeof(u32)) >> 2;
3477 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3478 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3479 dmae->comp_val = DMAE_COMP_VAL;
3480
3481 *stats_comp = 0;
a2fbb9ea
ET
3482}
3483
bb2a0f7a 3484static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3485{
bb2a0f7a
YG
3486 struct dmae_command *dmae = &bp->stats_dmae;
3487 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3488
bb2a0f7a
YG
3489 /* sanity */
3490 if (!bp->func_stx) {
3491 BNX2X_ERR("BUG!\n");
3492 return;
3493 }
a2fbb9ea 3494
bb2a0f7a
YG
3495 bp->executer_idx = 0;
3496 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3497
bb2a0f7a
YG
3498 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3499 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3500 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3501#ifdef __BIG_ENDIAN
3502 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3503#else
3504 DMAE_CMD_ENDIANITY_DW_SWAP |
3505#endif
3506 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3507 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3508 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3509 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3510 dmae->dst_addr_lo = bp->func_stx >> 2;
3511 dmae->dst_addr_hi = 0;
3512 dmae->len = sizeof(struct host_func_stats) >> 2;
3513 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3514 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3515 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3516
bb2a0f7a
YG
3517 *stats_comp = 0;
3518}
a2fbb9ea 3519
bb2a0f7a
YG
3520static void bnx2x_stats_start(struct bnx2x *bp)
3521{
3522 if (bp->port.pmf)
3523 bnx2x_port_stats_init(bp);
3524
3525 else if (bp->func_stx)
3526 bnx2x_func_stats_init(bp);
3527
3528 bnx2x_hw_stats_post(bp);
3529 bnx2x_storm_stats_post(bp);
3530}
3531
3532static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3533{
3534 bnx2x_stats_comp(bp);
3535 bnx2x_stats_pmf_update(bp);
3536 bnx2x_stats_start(bp);
3537}
3538
3539static void bnx2x_stats_restart(struct bnx2x *bp)
3540{
3541 bnx2x_stats_comp(bp);
3542 bnx2x_stats_start(bp);
3543}
3544
3545static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3546{
3547 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3548 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3549 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3550 struct {
3551 u32 lo;
3552 u32 hi;
3553 } diff;
bb2a0f7a
YG
3554
3555 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3556 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3557 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3558 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3559 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3560 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3561 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3562 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3563 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3564 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3565 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3566 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3567 UPDATE_STAT64(tx_stat_gt127,
3568 tx_stat_etherstatspkts65octetsto127octets);
3569 UPDATE_STAT64(tx_stat_gt255,
3570 tx_stat_etherstatspkts128octetsto255octets);
3571 UPDATE_STAT64(tx_stat_gt511,
3572 tx_stat_etherstatspkts256octetsto511octets);
3573 UPDATE_STAT64(tx_stat_gt1023,
3574 tx_stat_etherstatspkts512octetsto1023octets);
3575 UPDATE_STAT64(tx_stat_gt1518,
3576 tx_stat_etherstatspkts1024octetsto1522octets);
3577 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3578 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3579 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3580 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3581 UPDATE_STAT64(tx_stat_gterr,
3582 tx_stat_dot3statsinternalmactransmiterrors);
3583 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3584
3585 estats->pause_frames_received_hi =
3586 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3587 estats->pause_frames_received_lo =
3588 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3589
3590 estats->pause_frames_sent_hi =
3591 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3592 estats->pause_frames_sent_lo =
3593 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3594}
3595
3596static void bnx2x_emac_stats_update(struct bnx2x *bp)
3597{
3598 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3599 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3600 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3601
3602 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3603 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3605 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3606 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3607 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3608 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3609 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3610 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3611 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3612 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3613 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3614 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3615 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3616 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3617 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3618 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3619 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3620 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3621 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3622 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3623 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3624 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3627 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3628 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3629 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3630 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3631 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3632 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3633
3634 estats->pause_frames_received_hi =
3635 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3636 estats->pause_frames_received_lo =
3637 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3638 ADD_64(estats->pause_frames_received_hi,
3639 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3640 estats->pause_frames_received_lo,
3641 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3642
3643 estats->pause_frames_sent_hi =
3644 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3645 estats->pause_frames_sent_lo =
3646 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3647 ADD_64(estats->pause_frames_sent_hi,
3648 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3649 estats->pause_frames_sent_lo,
3650 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3651}
3652
3653static int bnx2x_hw_stats_update(struct bnx2x *bp)
3654{
3655 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3656 struct nig_stats *old = &(bp->port.old_nig_stats);
3657 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3658 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3659 struct {
3660 u32 lo;
3661 u32 hi;
3662 } diff;
de832a55 3663 u32 nig_timer_max;
bb2a0f7a
YG
3664
3665 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3666 bnx2x_bmac_stats_update(bp);
3667
3668 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3669 bnx2x_emac_stats_update(bp);
3670
3671 else { /* unreached */
3672 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3673 return -1;
3674 }
a2fbb9ea 3675
bb2a0f7a
YG
3676 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3677 new->brb_discard - old->brb_discard);
66e855f3
YG
3678 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3679 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3680
bb2a0f7a
YG
3681 UPDATE_STAT64_NIG(egress_mac_pkt0,
3682 etherstatspkts1024octetsto1522octets);
3683 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3684
bb2a0f7a 3685 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3686
bb2a0f7a
YG
3687 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3688 sizeof(struct mac_stx));
3689 estats->brb_drop_hi = pstats->brb_drop_hi;
3690 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3691
bb2a0f7a 3692 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3693
de832a55
EG
3694 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3695 if (nig_timer_max != estats->nig_timer_max) {
3696 estats->nig_timer_max = nig_timer_max;
3697 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3698 }
3699
bb2a0f7a 3700 return 0;
a2fbb9ea
ET
3701}
3702
bb2a0f7a 3703static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3704{
3705 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3706 struct tstorm_per_port_stats *tport =
de832a55 3707 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3708 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3709 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3710 int i;
3711
3712 memset(&(fstats->total_bytes_received_hi), 0,
3713 sizeof(struct host_func_stats) - 2*sizeof(u32));
3714 estats->error_bytes_received_hi = 0;
3715 estats->error_bytes_received_lo = 0;
3716 estats->etherstatsoverrsizepkts_hi = 0;
3717 estats->etherstatsoverrsizepkts_lo = 0;
3718 estats->no_buff_discard_hi = 0;
3719 estats->no_buff_discard_lo = 0;
a2fbb9ea 3720
de832a55
EG
3721 for_each_queue(bp, i) {
3722 struct bnx2x_fastpath *fp = &bp->fp[i];
3723 int cl_id = fp->cl_id;
3724 struct tstorm_per_client_stats *tclient =
3725 &stats->tstorm_common.client_statistics[cl_id];
3726 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3727 struct ustorm_per_client_stats *uclient =
3728 &stats->ustorm_common.client_statistics[cl_id];
3729 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3730 struct xstorm_per_client_stats *xclient =
3731 &stats->xstorm_common.client_statistics[cl_id];
3732 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3733 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3734 u32 diff;
3735
3736 /* are storm stats valid? */
3737 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3738 bp->stats_counter) {
de832a55
EG
3739 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3740 " xstorm counter (%d) != stats_counter (%d)\n",
3741 i, xclient->stats_counter, bp->stats_counter);
3742 return -1;
3743 }
3744 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3745 bp->stats_counter) {
de832a55
EG
3746 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3747 " tstorm counter (%d) != stats_counter (%d)\n",
3748 i, tclient->stats_counter, bp->stats_counter);
3749 return -2;
3750 }
3751 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3752 bp->stats_counter) {
3753 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3754 " ustorm counter (%d) != stats_counter (%d)\n",
3755 i, uclient->stats_counter, bp->stats_counter);
3756 return -4;
3757 }
a2fbb9ea 3758
de832a55
EG
3759 qstats->total_bytes_received_hi =
3760 qstats->valid_bytes_received_hi =
a2fbb9ea 3761 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3762 qstats->total_bytes_received_lo =
3763 qstats->valid_bytes_received_lo =
a2fbb9ea 3764 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3765
de832a55 3766 qstats->error_bytes_received_hi =
bb2a0f7a 3767 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3768 qstats->error_bytes_received_lo =
bb2a0f7a 3769 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3770
de832a55
EG
3771 ADD_64(qstats->total_bytes_received_hi,
3772 qstats->error_bytes_received_hi,
3773 qstats->total_bytes_received_lo,
3774 qstats->error_bytes_received_lo);
3775
3776 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3777 total_unicast_packets_received);
3778 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3779 total_multicast_packets_received);
3780 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3781 total_broadcast_packets_received);
3782 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3783 etherstatsoverrsizepkts);
3784 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3785
3786 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3787 total_unicast_packets_received);
3788 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3789 total_multicast_packets_received);
3790 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3791 total_broadcast_packets_received);
3792 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3793 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3794 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3795
3796 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3797 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3798 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3799 le32_to_cpu(xclient->total_sent_bytes.lo);
3800
de832a55
EG
3801 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3802 total_unicast_packets_transmitted);
3803 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3804 total_multicast_packets_transmitted);
3805 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3806 total_broadcast_packets_transmitted);
3807
3808 old_tclient->checksum_discard = tclient->checksum_discard;
3809 old_tclient->ttl0_discard = tclient->ttl0_discard;
3810
3811 ADD_64(fstats->total_bytes_received_hi,
3812 qstats->total_bytes_received_hi,
3813 fstats->total_bytes_received_lo,
3814 qstats->total_bytes_received_lo);
3815 ADD_64(fstats->total_bytes_transmitted_hi,
3816 qstats->total_bytes_transmitted_hi,
3817 fstats->total_bytes_transmitted_lo,
3818 qstats->total_bytes_transmitted_lo);
3819 ADD_64(fstats->total_unicast_packets_received_hi,
3820 qstats->total_unicast_packets_received_hi,
3821 fstats->total_unicast_packets_received_lo,
3822 qstats->total_unicast_packets_received_lo);
3823 ADD_64(fstats->total_multicast_packets_received_hi,
3824 qstats->total_multicast_packets_received_hi,
3825 fstats->total_multicast_packets_received_lo,
3826 qstats->total_multicast_packets_received_lo);
3827 ADD_64(fstats->total_broadcast_packets_received_hi,
3828 qstats->total_broadcast_packets_received_hi,
3829 fstats->total_broadcast_packets_received_lo,
3830 qstats->total_broadcast_packets_received_lo);
3831 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3832 qstats->total_unicast_packets_transmitted_hi,
3833 fstats->total_unicast_packets_transmitted_lo,
3834 qstats->total_unicast_packets_transmitted_lo);
3835 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3836 qstats->total_multicast_packets_transmitted_hi,
3837 fstats->total_multicast_packets_transmitted_lo,
3838 qstats->total_multicast_packets_transmitted_lo);
3839 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3840 qstats->total_broadcast_packets_transmitted_hi,
3841 fstats->total_broadcast_packets_transmitted_lo,
3842 qstats->total_broadcast_packets_transmitted_lo);
3843 ADD_64(fstats->valid_bytes_received_hi,
3844 qstats->valid_bytes_received_hi,
3845 fstats->valid_bytes_received_lo,
3846 qstats->valid_bytes_received_lo);
3847
3848 ADD_64(estats->error_bytes_received_hi,
3849 qstats->error_bytes_received_hi,
3850 estats->error_bytes_received_lo,
3851 qstats->error_bytes_received_lo);
3852 ADD_64(estats->etherstatsoverrsizepkts_hi,
3853 qstats->etherstatsoverrsizepkts_hi,
3854 estats->etherstatsoverrsizepkts_lo,
3855 qstats->etherstatsoverrsizepkts_lo);
3856 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3857 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3858 }
3859
3860 ADD_64(fstats->total_bytes_received_hi,
3861 estats->rx_stat_ifhcinbadoctets_hi,
3862 fstats->total_bytes_received_lo,
3863 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3864
3865 memcpy(estats, &(fstats->total_bytes_received_hi),
3866 sizeof(struct host_func_stats) - 2*sizeof(u32));
3867
de832a55
EG
3868 ADD_64(estats->etherstatsoverrsizepkts_hi,
3869 estats->rx_stat_dot3statsframestoolong_hi,
3870 estats->etherstatsoverrsizepkts_lo,
3871 estats->rx_stat_dot3statsframestoolong_lo);
3872 ADD_64(estats->error_bytes_received_hi,
3873 estats->rx_stat_ifhcinbadoctets_hi,
3874 estats->error_bytes_received_lo,
3875 estats->rx_stat_ifhcinbadoctets_lo);
3876
3877 if (bp->port.pmf) {
3878 estats->mac_filter_discard =
3879 le32_to_cpu(tport->mac_filter_discard);
3880 estats->xxoverflow_discard =
3881 le32_to_cpu(tport->xxoverflow_discard);
3882 estats->brb_truncate_discard =
bb2a0f7a 3883 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3884 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3885 }
bb2a0f7a
YG
3886
3887 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3888
de832a55
EG
3889 bp->stats_pending = 0;
3890
a2fbb9ea
ET
3891 return 0;
3892}
3893
bb2a0f7a 3894static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3895{
bb2a0f7a 3896 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3897 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3898 int i;
a2fbb9ea
ET
3899
3900 nstats->rx_packets =
3901 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3902 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3903 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3904
3905 nstats->tx_packets =
3906 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3907 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3908 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3909
de832a55 3910 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3911
0e39e645 3912 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3913
de832a55
EG
3914 nstats->rx_dropped = estats->mac_discard;
3915 for_each_queue(bp, i)
3916 nstats->rx_dropped +=
3917 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3918
a2fbb9ea
ET
3919 nstats->tx_dropped = 0;
3920
3921 nstats->multicast =
de832a55 3922 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3923
bb2a0f7a 3924 nstats->collisions =
de832a55 3925 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3926
3927 nstats->rx_length_errors =
de832a55
EG
3928 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3929 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3930 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3931 bnx2x_hilo(&estats->brb_truncate_hi);
3932 nstats->rx_crc_errors =
3933 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3934 nstats->rx_frame_errors =
3935 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3936 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3937 nstats->rx_missed_errors = estats->xxoverflow_discard;
3938
3939 nstats->rx_errors = nstats->rx_length_errors +
3940 nstats->rx_over_errors +
3941 nstats->rx_crc_errors +
3942 nstats->rx_frame_errors +
0e39e645
ET
3943 nstats->rx_fifo_errors +
3944 nstats->rx_missed_errors;
a2fbb9ea 3945
bb2a0f7a 3946 nstats->tx_aborted_errors =
de832a55
EG
3947 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3948 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3949 nstats->tx_carrier_errors =
3950 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3951 nstats->tx_fifo_errors = 0;
3952 nstats->tx_heartbeat_errors = 0;
3953 nstats->tx_window_errors = 0;
3954
3955 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3956 nstats->tx_carrier_errors +
3957 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3958}
3959
3960static void bnx2x_drv_stats_update(struct bnx2x *bp)
3961{
3962 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3963 int i;
3964
3965 estats->driver_xoff = 0;
3966 estats->rx_err_discard_pkt = 0;
3967 estats->rx_skb_alloc_failed = 0;
3968 estats->hw_csum_err = 0;
3969 for_each_queue(bp, i) {
3970 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3971
3972 estats->driver_xoff += qstats->driver_xoff;
3973 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3974 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3975 estats->hw_csum_err += qstats->hw_csum_err;
3976 }
a2fbb9ea
ET
3977}
3978
bb2a0f7a 3979static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3980{
bb2a0f7a 3981 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3982
bb2a0f7a
YG
3983 if (*stats_comp != DMAE_COMP_VAL)
3984 return;
3985
3986 if (bp->port.pmf)
de832a55 3987 bnx2x_hw_stats_update(bp);
a2fbb9ea 3988
de832a55
EG
3989 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3990 BNX2X_ERR("storm stats were not updated for 3 times\n");
3991 bnx2x_panic();
3992 return;
a2fbb9ea
ET
3993 }
3994
de832a55
EG
3995 bnx2x_net_stats_update(bp);
3996 bnx2x_drv_stats_update(bp);
3997
a2fbb9ea 3998 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3999 struct tstorm_per_client_stats *old_tclient =
4000 &bp->fp->old_tclient;
4001 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4002 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4003 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4004 int i;
a2fbb9ea
ET
4005
4006 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4007 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4008 " tx pkt (%lx)\n",
4009 bnx2x_tx_avail(bp->fp),
7a9b2557 4010 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4011 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4012 " rx pkt (%lx)\n",
7a9b2557
VZ
4013 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4014 bp->fp->rx_comp_cons),
4015 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4016 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4017 "brb truncate %u\n",
4018 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4019 qstats->driver_xoff,
4020 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4021 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4022 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4023 "mac_discard %u mac_filter_discard %u "
4024 "xxovrflow_discard %u brb_truncate_discard %u "
4025 "ttl0_discard %u\n",
4781bfad 4026 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4027 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4028 bnx2x_hilo(&qstats->no_buff_discard_hi),
4029 estats->mac_discard, estats->mac_filter_discard,
4030 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4031 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4032
4033 for_each_queue(bp, i) {
4034 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4035 bnx2x_fp(bp, i, tx_pkt),
4036 bnx2x_fp(bp, i, rx_pkt),
4037 bnx2x_fp(bp, i, rx_calls));
4038 }
4039 }
4040
bb2a0f7a
YG
4041 bnx2x_hw_stats_post(bp);
4042 bnx2x_storm_stats_post(bp);
4043}
a2fbb9ea 4044
bb2a0f7a
YG
4045static void bnx2x_port_stats_stop(struct bnx2x *bp)
4046{
4047 struct dmae_command *dmae;
4048 u32 opcode;
4049 int loader_idx = PMF_DMAE_C(bp);
4050 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4051
bb2a0f7a 4052 bp->executer_idx = 0;
a2fbb9ea 4053
bb2a0f7a
YG
4054 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4055 DMAE_CMD_C_ENABLE |
4056 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4057#ifdef __BIG_ENDIAN
bb2a0f7a 4058 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4059#else
bb2a0f7a 4060 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4061#endif
bb2a0f7a
YG
4062 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4063 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4064
4065 if (bp->port.port_stx) {
4066
4067 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4068 if (bp->func_stx)
4069 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4070 else
4071 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4072 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4073 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4074 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4075 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4076 dmae->len = sizeof(struct host_port_stats) >> 2;
4077 if (bp->func_stx) {
4078 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4079 dmae->comp_addr_hi = 0;
4080 dmae->comp_val = 1;
4081 } else {
4082 dmae->comp_addr_lo =
4083 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4084 dmae->comp_addr_hi =
4085 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4086 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4087
bb2a0f7a
YG
4088 *stats_comp = 0;
4089 }
a2fbb9ea
ET
4090 }
4091
bb2a0f7a
YG
4092 if (bp->func_stx) {
4093
4094 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4095 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4096 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4097 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4098 dmae->dst_addr_lo = bp->func_stx >> 2;
4099 dmae->dst_addr_hi = 0;
4100 dmae->len = sizeof(struct host_func_stats) >> 2;
4101 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4102 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4103 dmae->comp_val = DMAE_COMP_VAL;
4104
4105 *stats_comp = 0;
a2fbb9ea 4106 }
bb2a0f7a
YG
4107}
4108
4109static void bnx2x_stats_stop(struct bnx2x *bp)
4110{
4111 int update = 0;
4112
4113 bnx2x_stats_comp(bp);
4114
4115 if (bp->port.pmf)
4116 update = (bnx2x_hw_stats_update(bp) == 0);
4117
4118 update |= (bnx2x_storm_stats_update(bp) == 0);
4119
4120 if (update) {
4121 bnx2x_net_stats_update(bp);
a2fbb9ea 4122
bb2a0f7a
YG
4123 if (bp->port.pmf)
4124 bnx2x_port_stats_stop(bp);
4125
4126 bnx2x_hw_stats_post(bp);
4127 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4128 }
4129}
4130
bb2a0f7a
YG
4131static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4132{
4133}
4134
4135static const struct {
4136 void (*action)(struct bnx2x *bp);
4137 enum bnx2x_stats_state next_state;
4138} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4139/* state event */
4140{
4141/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4142/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4143/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4144/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4145},
4146{
4147/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4148/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4149/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4150/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4151}
4152};
4153
4154static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4155{
4156 enum bnx2x_stats_state state = bp->stats_state;
4157
4158 bnx2x_stats_stm[state][event].action(bp);
4159 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4160
4161 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4162 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4163 state, event, bp->stats_state);
4164}
4165
a2fbb9ea
ET
4166static void bnx2x_timer(unsigned long data)
4167{
4168 struct bnx2x *bp = (struct bnx2x *) data;
4169
4170 if (!netif_running(bp->dev))
4171 return;
4172
4173 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4174 goto timer_restart;
a2fbb9ea
ET
4175
4176 if (poll) {
4177 struct bnx2x_fastpath *fp = &bp->fp[0];
4178 int rc;
4179
4180 bnx2x_tx_int(fp, 1000);
4181 rc = bnx2x_rx_int(fp, 1000);
4182 }
4183
34f80b04
EG
4184 if (!BP_NOMCP(bp)) {
4185 int func = BP_FUNC(bp);
a2fbb9ea
ET
4186 u32 drv_pulse;
4187 u32 mcp_pulse;
4188
4189 ++bp->fw_drv_pulse_wr_seq;
4190 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4191 /* TBD - add SYSTEM_TIME */
4192 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4193 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4194
34f80b04 4195 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4196 MCP_PULSE_SEQ_MASK);
4197 /* The delta between driver pulse and mcp response
4198 * should be 1 (before mcp response) or 0 (after mcp response)
4199 */
4200 if ((drv_pulse != mcp_pulse) &&
4201 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4202 /* someone lost a heartbeat... */
4203 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4204 drv_pulse, mcp_pulse);
4205 }
4206 }
4207
bb2a0f7a
YG
4208 if ((bp->state == BNX2X_STATE_OPEN) ||
4209 (bp->state == BNX2X_STATE_DISABLED))
4210 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4211
f1410647 4212timer_restart:
a2fbb9ea
ET
4213 mod_timer(&bp->timer, jiffies + bp->current_interval);
4214}
4215
4216/* end of Statistics */
4217
4218/* nic init */
4219
4220/*
4221 * nic init service functions
4222 */
4223
34f80b04 4224static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4225{
34f80b04
EG
4226 int port = BP_PORT(bp);
4227
4228 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4229 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4230 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4231 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4232 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4233 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4234}
4235
5c862848
EG
4236static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4237 dma_addr_t mapping, int sb_id)
34f80b04
EG
4238{
4239 int port = BP_PORT(bp);
bb2a0f7a 4240 int func = BP_FUNC(bp);
a2fbb9ea 4241 int index;
34f80b04 4242 u64 section;
a2fbb9ea
ET
4243
4244 /* USTORM */
4245 section = ((u64)mapping) + offsetof(struct host_status_block,
4246 u_status_block);
34f80b04 4247 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4248
4249 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4250 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4251 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4252 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4253 U64_HI(section));
bb2a0f7a
YG
4254 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4255 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4256
4257 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4258 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4259 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4260
4261 /* CSTORM */
4262 section = ((u64)mapping) + offsetof(struct host_status_block,
4263 c_status_block);
34f80b04 4264 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4265
4266 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4267 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4268 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4269 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4270 U64_HI(section));
7a9b2557
VZ
4271 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4272 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4273
4274 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4275 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4276 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4277
4278 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4279}
4280
4281static void bnx2x_zero_def_sb(struct bnx2x *bp)
4282{
4283 int func = BP_FUNC(bp);
a2fbb9ea 4284
34f80b04
EG
4285 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4286 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4287 sizeof(struct ustorm_def_status_block)/4);
4288 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4289 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4290 sizeof(struct cstorm_def_status_block)/4);
4291 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4292 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4293 sizeof(struct xstorm_def_status_block)/4);
4294 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4295 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4296 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4297}
4298
4299static void bnx2x_init_def_sb(struct bnx2x *bp,
4300 struct host_def_status_block *def_sb,
34f80b04 4301 dma_addr_t mapping, int sb_id)
a2fbb9ea 4302{
34f80b04
EG
4303 int port = BP_PORT(bp);
4304 int func = BP_FUNC(bp);
a2fbb9ea
ET
4305 int index, val, reg_offset;
4306 u64 section;
4307
4308 /* ATTN */
4309 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4310 atten_status_block);
34f80b04 4311 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4312
49d66772
ET
4313 bp->attn_state = 0;
4314
a2fbb9ea
ET
4315 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4316 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4317
34f80b04 4318 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4319 bp->attn_group[index].sig[0] = REG_RD(bp,
4320 reg_offset + 0x10*index);
4321 bp->attn_group[index].sig[1] = REG_RD(bp,
4322 reg_offset + 0x4 + 0x10*index);
4323 bp->attn_group[index].sig[2] = REG_RD(bp,
4324 reg_offset + 0x8 + 0x10*index);
4325 bp->attn_group[index].sig[3] = REG_RD(bp,
4326 reg_offset + 0xc + 0x10*index);
4327 }
4328
a2fbb9ea
ET
4329 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4330 HC_REG_ATTN_MSG0_ADDR_L);
4331
4332 REG_WR(bp, reg_offset, U64_LO(section));
4333 REG_WR(bp, reg_offset + 4, U64_HI(section));
4334
4335 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4336
4337 val = REG_RD(bp, reg_offset);
34f80b04 4338 val |= sb_id;
a2fbb9ea
ET
4339 REG_WR(bp, reg_offset, val);
4340
4341 /* USTORM */
4342 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4343 u_def_status_block);
34f80b04 4344 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4345
4346 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4347 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4348 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4349 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4350 U64_HI(section));
5c862848 4351 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4352 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4353
4354 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4355 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4356 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4357
4358 /* CSTORM */
4359 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4360 c_def_status_block);
34f80b04 4361 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4362
4363 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4364 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4365 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4366 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4367 U64_HI(section));
5c862848 4368 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4369 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4370
4371 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4372 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4373 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4374
4375 /* TSTORM */
4376 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4377 t_def_status_block);
34f80b04 4378 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4379
4380 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4381 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4382 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4383 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4384 U64_HI(section));
5c862848 4385 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4386 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4387
4388 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4389 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4390 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4391
4392 /* XSTORM */
4393 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4394 x_def_status_block);
34f80b04 4395 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4396
4397 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4398 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4399 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4400 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4401 U64_HI(section));
5c862848 4402 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4403 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4404
4405 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4406 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4407 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4408
bb2a0f7a 4409 bp->stats_pending = 0;
66e855f3 4410 bp->set_mac_pending = 0;
bb2a0f7a 4411
34f80b04 4412 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4413}
4414
4415static void bnx2x_update_coalesce(struct bnx2x *bp)
4416{
34f80b04 4417 int port = BP_PORT(bp);
a2fbb9ea
ET
4418 int i;
4419
4420 for_each_queue(bp, i) {
34f80b04 4421 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4422
4423 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4424 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4425 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4426 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4427 bp->rx_ticks/12);
a2fbb9ea 4428 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4429 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4430 U_SB_ETH_RX_CQ_INDEX),
4431 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4432
4433 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4434 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4435 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4436 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4437 bp->tx_ticks/12);
a2fbb9ea 4438 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4439 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4440 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4441 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4442 }
4443}
4444
7a9b2557
VZ
4445static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4446 struct bnx2x_fastpath *fp, int last)
4447{
4448 int i;
4449
4450 for (i = 0; i < last; i++) {
4451 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4452 struct sk_buff *skb = rx_buf->skb;
4453
4454 if (skb == NULL) {
4455 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4456 continue;
4457 }
4458
4459 if (fp->tpa_state[i] == BNX2X_TPA_START)
4460 pci_unmap_single(bp->pdev,
4461 pci_unmap_addr(rx_buf, mapping),
356e2385 4462 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4463
4464 dev_kfree_skb(skb);
4465 rx_buf->skb = NULL;
4466 }
4467}
4468
a2fbb9ea
ET
4469static void bnx2x_init_rx_rings(struct bnx2x *bp)
4470{
7a9b2557 4471 int func = BP_FUNC(bp);
32626230
EG
4472 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4473 ETH_MAX_AGGREGATION_QUEUES_E1H;
4474 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4475 int i, j;
a2fbb9ea 4476
87942b46 4477 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4478 DP(NETIF_MSG_IFUP,
4479 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4480
7a9b2557 4481 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4482
555f6c78 4483 for_each_rx_queue(bp, j) {
32626230 4484 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4485
32626230 4486 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4487 fp->tpa_pool[i].skb =
4488 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4489 if (!fp->tpa_pool[i].skb) {
4490 BNX2X_ERR("Failed to allocate TPA "
4491 "skb pool for queue[%d] - "
4492 "disabling TPA on this "
4493 "queue!\n", j);
4494 bnx2x_free_tpa_pool(bp, fp, i);
4495 fp->disable_tpa = 1;
4496 break;
4497 }
4498 pci_unmap_addr_set((struct sw_rx_bd *)
4499 &bp->fp->tpa_pool[i],
4500 mapping, 0);
4501 fp->tpa_state[i] = BNX2X_TPA_STOP;
4502 }
4503 }
4504 }
4505
555f6c78 4506 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4507 struct bnx2x_fastpath *fp = &bp->fp[j];
4508
4509 fp->rx_bd_cons = 0;
4510 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4511 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4512
4513 /* "next page" elements initialization */
4514 /* SGE ring */
4515 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4516 struct eth_rx_sge *sge;
4517
4518 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4519 sge->addr_hi =
4520 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4521 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4522 sge->addr_lo =
4523 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4524 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4525 }
4526
4527 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4528
7a9b2557 4529 /* RX BD ring */
a2fbb9ea
ET
4530 for (i = 1; i <= NUM_RX_RINGS; i++) {
4531 struct eth_rx_bd *rx_bd;
4532
4533 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4534 rx_bd->addr_hi =
4535 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4536 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4537 rx_bd->addr_lo =
4538 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4539 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4540 }
4541
34f80b04 4542 /* CQ ring */
a2fbb9ea
ET
4543 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4544 struct eth_rx_cqe_next_page *nextpg;
4545
4546 nextpg = (struct eth_rx_cqe_next_page *)
4547 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4548 nextpg->addr_hi =
4549 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4550 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4551 nextpg->addr_lo =
4552 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4553 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4554 }
4555
7a9b2557
VZ
4556 /* Allocate SGEs and initialize the ring elements */
4557 for (i = 0, ring_prod = 0;
4558 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4559
7a9b2557
VZ
4560 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4561 BNX2X_ERR("was only able to allocate "
4562 "%d rx sges\n", i);
4563 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4564 /* Cleanup already allocated elements */
4565 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4566 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4567 fp->disable_tpa = 1;
4568 ring_prod = 0;
4569 break;
4570 }
4571 ring_prod = NEXT_SGE_IDX(ring_prod);
4572 }
4573 fp->rx_sge_prod = ring_prod;
4574
4575 /* Allocate BDs and initialize BD ring */
66e855f3 4576 fp->rx_comp_cons = 0;
7a9b2557 4577 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4578 for (i = 0; i < bp->rx_ring_size; i++) {
4579 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4580 BNX2X_ERR("was only able to allocate "
de832a55
EG
4581 "%d rx skbs on queue[%d]\n", i, j);
4582 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4583 break;
4584 }
4585 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4586 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4587 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4588 }
4589
7a9b2557
VZ
4590 fp->rx_bd_prod = ring_prod;
4591 /* must not have more available CQEs than BDs */
4592 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4593 cqe_ring_prod);
a2fbb9ea
ET
4594 fp->rx_pkt = fp->rx_calls = 0;
4595
7a9b2557
VZ
4596 /* Warning!
4597 * this will generate an interrupt (to the TSTORM)
4598 * must only be done after chip is initialized
4599 */
4600 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4601 fp->rx_sge_prod);
a2fbb9ea
ET
4602 if (j != 0)
4603 continue;
4604
4605 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4606 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4607 U64_LO(fp->rx_comp_mapping));
4608 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4609 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4610 U64_HI(fp->rx_comp_mapping));
4611 }
4612}
4613
4614static void bnx2x_init_tx_ring(struct bnx2x *bp)
4615{
4616 int i, j;
4617
555f6c78 4618 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4619 struct bnx2x_fastpath *fp = &bp->fp[j];
4620
4621 for (i = 1; i <= NUM_TX_RINGS; i++) {
4622 struct eth_tx_bd *tx_bd =
4623 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4624
4625 tx_bd->addr_hi =
4626 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4627 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4628 tx_bd->addr_lo =
4629 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4630 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4631 }
4632
4633 fp->tx_pkt_prod = 0;
4634 fp->tx_pkt_cons = 0;
4635 fp->tx_bd_prod = 0;
4636 fp->tx_bd_cons = 0;
4637 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4638 fp->tx_pkt = 0;
4639 }
4640}
4641
4642static void bnx2x_init_sp_ring(struct bnx2x *bp)
4643{
34f80b04 4644 int func = BP_FUNC(bp);
a2fbb9ea
ET
4645
4646 spin_lock_init(&bp->spq_lock);
4647
4648 bp->spq_left = MAX_SPQ_PENDING;
4649 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4650 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4651 bp->spq_prod_bd = bp->spq;
4652 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4653
34f80b04 4654 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4655 U64_LO(bp->spq_mapping));
34f80b04
EG
4656 REG_WR(bp,
4657 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4658 U64_HI(bp->spq_mapping));
4659
34f80b04 4660 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4661 bp->spq_prod_idx);
4662}
4663
4664static void bnx2x_init_context(struct bnx2x *bp)
4665{
4666 int i;
4667
4668 for_each_queue(bp, i) {
4669 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4670 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4671 u8 cl_id = fp->cl_id;
0626b899 4672 u8 sb_id = fp->sb_id;
a2fbb9ea 4673
34f80b04
EG
4674 context->ustorm_st_context.common.sb_index_numbers =
4675 BNX2X_RX_SB_INDEX_NUM;
0626b899 4676 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4677 context->ustorm_st_context.common.status_block_id = sb_id;
4678 context->ustorm_st_context.common.flags =
de832a55
EG
4679 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4680 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4681 context->ustorm_st_context.common.statistics_counter_id =
4682 cl_id;
8d9c5f34 4683 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4684 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4685 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4686 bp->rx_buf_size;
34f80b04 4687 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4688 U64_HI(fp->rx_desc_mapping);
34f80b04 4689 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4690 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4691 if (!fp->disable_tpa) {
4692 context->ustorm_st_context.common.flags |=
4693 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4694 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4695 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4696 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4697 (u32)0xffff);
7a9b2557
VZ
4698 context->ustorm_st_context.common.sge_page_base_hi =
4699 U64_HI(fp->rx_sge_mapping);
4700 context->ustorm_st_context.common.sge_page_base_lo =
4701 U64_LO(fp->rx_sge_mapping);
4702 }
4703
8d9c5f34
EG
4704 context->ustorm_ag_context.cdu_usage =
4705 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4706 CDU_REGION_NUMBER_UCM_AG,
4707 ETH_CONNECTION_TYPE);
4708
4709 context->xstorm_st_context.tx_bd_page_base_hi =
4710 U64_HI(fp->tx_desc_mapping);
4711 context->xstorm_st_context.tx_bd_page_base_lo =
4712 U64_LO(fp->tx_desc_mapping);
4713 context->xstorm_st_context.db_data_addr_hi =
4714 U64_HI(fp->tx_prods_mapping);
4715 context->xstorm_st_context.db_data_addr_lo =
4716 U64_LO(fp->tx_prods_mapping);
0626b899 4717 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4718 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4719 context->cstorm_st_context.sb_index_number =
5c862848 4720 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4721 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4722
4723 context->xstorm_ag_context.cdu_reserved =
4724 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4725 CDU_REGION_NUMBER_XCM_AG,
4726 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4727 }
4728}
4729
4730static void bnx2x_init_ind_table(struct bnx2x *bp)
4731{
26c8fa4d 4732 int func = BP_FUNC(bp);
a2fbb9ea
ET
4733 int i;
4734
555f6c78 4735 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4736 return;
4737
555f6c78
EG
4738 DP(NETIF_MSG_IFUP,
4739 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4740 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4741 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4742 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4743 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4744}
4745
49d66772
ET
4746static void bnx2x_set_client_config(struct bnx2x *bp)
4747{
49d66772 4748 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4749 int port = BP_PORT(bp);
4750 int i;
49d66772 4751
e7799c5f 4752 tstorm_client.mtu = bp->dev->mtu;
49d66772 4753 tstorm_client.config_flags =
de832a55
EG
4754 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4755 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4756#ifdef BCM_VLAN
0c6671b0 4757 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4758 tstorm_client.config_flags |=
8d9c5f34 4759 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4760 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4761 }
4762#endif
49d66772 4763
7a9b2557
VZ
4764 if (bp->flags & TPA_ENABLE_FLAG) {
4765 tstorm_client.max_sges_for_packet =
4f40f2cb 4766 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4767 tstorm_client.max_sges_for_packet =
4768 ((tstorm_client.max_sges_for_packet +
4769 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4770 PAGES_PER_SGE_SHIFT;
4771
4772 tstorm_client.config_flags |=
4773 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4774 }
4775
49d66772 4776 for_each_queue(bp, i) {
de832a55
EG
4777 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4778
49d66772 4779 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4780 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4781 ((u32 *)&tstorm_client)[0]);
4782 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4783 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4784 ((u32 *)&tstorm_client)[1]);
4785 }
4786
34f80b04
EG
4787 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4788 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4789}
4790
a2fbb9ea
ET
4791static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4792{
a2fbb9ea 4793 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4794 int mode = bp->rx_mode;
4795 int mask = (1 << BP_L_ID(bp));
4796 int func = BP_FUNC(bp);
a2fbb9ea
ET
4797 int i;
4798
3196a88a 4799 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4800
4801 switch (mode) {
4802 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4803 tstorm_mac_filter.ucast_drop_all = mask;
4804 tstorm_mac_filter.mcast_drop_all = mask;
4805 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4806 break;
356e2385 4807
a2fbb9ea 4808 case BNX2X_RX_MODE_NORMAL:
34f80b04 4809 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4810 break;
356e2385 4811
a2fbb9ea 4812 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4813 tstorm_mac_filter.mcast_accept_all = mask;
4814 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4815 break;
356e2385 4816
a2fbb9ea 4817 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4818 tstorm_mac_filter.ucast_accept_all = mask;
4819 tstorm_mac_filter.mcast_accept_all = mask;
4820 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4821 break;
356e2385 4822
a2fbb9ea 4823 default:
34f80b04
EG
4824 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4825 break;
a2fbb9ea
ET
4826 }
4827
4828 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4829 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4830 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4831 ((u32 *)&tstorm_mac_filter)[i]);
4832
34f80b04 4833/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4834 ((u32 *)&tstorm_mac_filter)[i]); */
4835 }
a2fbb9ea 4836
49d66772
ET
4837 if (mode != BNX2X_RX_MODE_NONE)
4838 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4839}
4840
471de716
EG
4841static void bnx2x_init_internal_common(struct bnx2x *bp)
4842{
4843 int i;
4844
3cdf1db7
YG
4845 if (bp->flags & TPA_ENABLE_FLAG) {
4846 struct tstorm_eth_tpa_exist tpa = {0};
4847
4848 tpa.tpa_exist = 1;
4849
4850 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4851 ((u32 *)&tpa)[0]);
4852 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4853 ((u32 *)&tpa)[1]);
4854 }
4855
471de716
EG
4856 /* Zero this manually as its initialization is
4857 currently missing in the initTool */
4858 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4859 REG_WR(bp, BAR_USTRORM_INTMEM +
4860 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4861}
4862
4863static void bnx2x_init_internal_port(struct bnx2x *bp)
4864{
4865 int port = BP_PORT(bp);
4866
4867 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4869 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4870 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4871}
4872
8a1c38d1
EG
4873/* Calculates the sum of vn_min_rates.
4874 It's needed for further normalizing of the min_rates.
4875 Returns:
4876 sum of vn_min_rates.
4877 or
4878 0 - if all the min_rates are 0.
4879 In the later case fainess algorithm should be deactivated.
4880 If not all min_rates are zero then those that are zeroes will be set to 1.
4881 */
4882static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4883{
4884 int all_zero = 1;
4885 int port = BP_PORT(bp);
4886 int vn;
4887
4888 bp->vn_weight_sum = 0;
4889 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4890 int func = 2*vn + port;
4891 u32 vn_cfg =
4892 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4893 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4894 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4895
4896 /* Skip hidden vns */
4897 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4898 continue;
4899
4900 /* If min rate is zero - set it to 1 */
4901 if (!vn_min_rate)
4902 vn_min_rate = DEF_MIN_RATE;
4903 else
4904 all_zero = 0;
4905
4906 bp->vn_weight_sum += vn_min_rate;
4907 }
4908
4909 /* ... only if all min rates are zeros - disable fairness */
4910 if (all_zero)
4911 bp->vn_weight_sum = 0;
4912}
4913
471de716 4914static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4915{
a2fbb9ea
ET
4916 struct tstorm_eth_function_common_config tstorm_config = {0};
4917 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4918 int port = BP_PORT(bp);
4919 int func = BP_FUNC(bp);
de832a55
EG
4920 int i, j;
4921 u32 offset;
471de716 4922 u16 max_agg_size;
a2fbb9ea
ET
4923
4924 if (is_multi(bp)) {
555f6c78 4925 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4926 tstorm_config.rss_result_mask = MULTI_MASK;
4927 }
8d9c5f34
EG
4928 if (IS_E1HMF(bp))
4929 tstorm_config.config_flags |=
4930 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4931
34f80b04
EG
4932 tstorm_config.leading_client_id = BP_L_ID(bp);
4933
a2fbb9ea 4934 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4935 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4936 (*(u32 *)&tstorm_config));
4937
c14423fe 4938 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4939 bnx2x_set_storm_rx_mode(bp);
4940
de832a55
EG
4941 for_each_queue(bp, i) {
4942 u8 cl_id = bp->fp[i].cl_id;
4943
4944 /* reset xstorm per client statistics */
4945 offset = BAR_XSTRORM_INTMEM +
4946 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4947 for (j = 0;
4948 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4949 REG_WR(bp, offset + j*4, 0);
4950
4951 /* reset tstorm per client statistics */
4952 offset = BAR_TSTRORM_INTMEM +
4953 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4954 for (j = 0;
4955 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4956 REG_WR(bp, offset + j*4, 0);
4957
4958 /* reset ustorm per client statistics */
4959 offset = BAR_USTRORM_INTMEM +
4960 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4961 for (j = 0;
4962 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4963 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4964 }
4965
4966 /* Init statistics related context */
34f80b04 4967 stats_flags.collect_eth = 1;
a2fbb9ea 4968
66e855f3 4969 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4970 ((u32 *)&stats_flags)[0]);
66e855f3 4971 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4972 ((u32 *)&stats_flags)[1]);
4973
66e855f3 4974 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4975 ((u32 *)&stats_flags)[0]);
66e855f3 4976 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4977 ((u32 *)&stats_flags)[1]);
4978
de832a55
EG
4979 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4980 ((u32 *)&stats_flags)[0]);
4981 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4982 ((u32 *)&stats_flags)[1]);
4983
66e855f3 4984 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4985 ((u32 *)&stats_flags)[0]);
66e855f3 4986 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4987 ((u32 *)&stats_flags)[1]);
4988
66e855f3
YG
4989 REG_WR(bp, BAR_XSTRORM_INTMEM +
4990 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4991 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4992 REG_WR(bp, BAR_XSTRORM_INTMEM +
4993 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4994 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4995
4996 REG_WR(bp, BAR_TSTRORM_INTMEM +
4997 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4998 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4999 REG_WR(bp, BAR_TSTRORM_INTMEM +
5000 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5001 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5002
de832a55
EG
5003 REG_WR(bp, BAR_USTRORM_INTMEM +
5004 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5005 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5006 REG_WR(bp, BAR_USTRORM_INTMEM +
5007 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5008 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5009
34f80b04
EG
5010 if (CHIP_IS_E1H(bp)) {
5011 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5012 IS_E1HMF(bp));
5013 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5014 IS_E1HMF(bp));
5015 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5016 IS_E1HMF(bp));
5017 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5018 IS_E1HMF(bp));
5019
7a9b2557
VZ
5020 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5021 bp->e1hov);
34f80b04
EG
5022 }
5023
4f40f2cb
EG
5024 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5025 max_agg_size =
5026 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5027 SGE_PAGE_SIZE * PAGES_PER_SGE),
5028 (u32)0xffff);
555f6c78 5029 for_each_rx_queue(bp, i) {
7a9b2557 5030 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5031
5032 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5033 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5034 U64_LO(fp->rx_comp_mapping));
5035 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5036 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5037 U64_HI(fp->rx_comp_mapping));
5038
7a9b2557 5039 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5040 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5041 max_agg_size);
5042 }
8a1c38d1 5043
1c06328c
EG
5044 /* dropless flow control */
5045 if (CHIP_IS_E1H(bp)) {
5046 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5047
5048 rx_pause.bd_thr_low = 250;
5049 rx_pause.cqe_thr_low = 250;
5050 rx_pause.cos = 1;
5051 rx_pause.sge_thr_low = 0;
5052 rx_pause.bd_thr_high = 350;
5053 rx_pause.cqe_thr_high = 350;
5054 rx_pause.sge_thr_high = 0;
5055
5056 for_each_rx_queue(bp, i) {
5057 struct bnx2x_fastpath *fp = &bp->fp[i];
5058
5059 if (!fp->disable_tpa) {
5060 rx_pause.sge_thr_low = 150;
5061 rx_pause.sge_thr_high = 250;
5062 }
5063
5064
5065 offset = BAR_USTRORM_INTMEM +
5066 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5067 fp->cl_id);
5068 for (j = 0;
5069 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5070 j++)
5071 REG_WR(bp, offset + j*4,
5072 ((u32 *)&rx_pause)[j]);
5073 }
5074 }
5075
8a1c38d1
EG
5076 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5077
5078 /* Init rate shaping and fairness contexts */
5079 if (IS_E1HMF(bp)) {
5080 int vn;
5081
5082 /* During init there is no active link
5083 Until link is up, set link rate to 10Gbps */
5084 bp->link_vars.line_speed = SPEED_10000;
5085 bnx2x_init_port_minmax(bp);
5086
5087 bnx2x_calc_vn_weight_sum(bp);
5088
5089 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5090 bnx2x_init_vn_minmax(bp, 2*vn + port);
5091
5092 /* Enable rate shaping and fairness */
5093 bp->cmng.flags.cmng_enables =
5094 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5095 if (bp->vn_weight_sum)
5096 bp->cmng.flags.cmng_enables |=
5097 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5098 else
5099 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5100 " fairness will be disabled\n");
5101 } else {
5102 /* rate shaping and fairness are disabled */
5103 DP(NETIF_MSG_IFUP,
5104 "single function mode minmax will be disabled\n");
5105 }
5106
5107
5108 /* Store it to internal memory */
5109 if (bp->port.pmf)
5110 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5111 REG_WR(bp, BAR_XSTRORM_INTMEM +
5112 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5113 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5114}
5115
471de716
EG
5116static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5117{
5118 switch (load_code) {
5119 case FW_MSG_CODE_DRV_LOAD_COMMON:
5120 bnx2x_init_internal_common(bp);
5121 /* no break */
5122
5123 case FW_MSG_CODE_DRV_LOAD_PORT:
5124 bnx2x_init_internal_port(bp);
5125 /* no break */
5126
5127 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5128 bnx2x_init_internal_func(bp);
5129 break;
5130
5131 default:
5132 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5133 break;
5134 }
5135}
5136
5137static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5138{
5139 int i;
5140
5141 for_each_queue(bp, i) {
5142 struct bnx2x_fastpath *fp = &bp->fp[i];
5143
34f80b04 5144 fp->bp = bp;
a2fbb9ea 5145 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5146 fp->index = i;
34f80b04
EG
5147 fp->cl_id = BP_L_ID(bp) + i;
5148 fp->sb_id = fp->cl_id;
5149 DP(NETIF_MSG_IFUP,
f5372251
EG
5150 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5151 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5152 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5153 fp->sb_id);
5c862848 5154 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5155 }
5156
16119785
EG
5157 /* ensure status block indices were read */
5158 rmb();
5159
5160
5c862848
EG
5161 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5162 DEF_SB_ID);
5163 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5164 bnx2x_update_coalesce(bp);
5165 bnx2x_init_rx_rings(bp);
5166 bnx2x_init_tx_ring(bp);
5167 bnx2x_init_sp_ring(bp);
5168 bnx2x_init_context(bp);
471de716 5169 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5170 bnx2x_init_ind_table(bp);
0ef00459
EG
5171 bnx2x_stats_init(bp);
5172
5173 /* At this point, we are ready for interrupts */
5174 atomic_set(&bp->intr_sem, 0);
5175
5176 /* flush all before enabling interrupts */
5177 mb();
5178 mmiowb();
5179
615f8fd9 5180 bnx2x_int_enable(bp);
a2fbb9ea
ET
5181}
5182
5183/* end of nic init */
5184
5185/*
5186 * gzip service functions
5187 */
5188
5189static int bnx2x_gunzip_init(struct bnx2x *bp)
5190{
5191 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5192 &bp->gunzip_mapping);
5193 if (bp->gunzip_buf == NULL)
5194 goto gunzip_nomem1;
5195
5196 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5197 if (bp->strm == NULL)
5198 goto gunzip_nomem2;
5199
5200 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5201 GFP_KERNEL);
5202 if (bp->strm->workspace == NULL)
5203 goto gunzip_nomem3;
5204
5205 return 0;
5206
5207gunzip_nomem3:
5208 kfree(bp->strm);
5209 bp->strm = NULL;
5210
5211gunzip_nomem2:
5212 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5213 bp->gunzip_mapping);
5214 bp->gunzip_buf = NULL;
5215
5216gunzip_nomem1:
5217 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5218 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5219 return -ENOMEM;
5220}
5221
5222static void bnx2x_gunzip_end(struct bnx2x *bp)
5223{
5224 kfree(bp->strm->workspace);
5225
5226 kfree(bp->strm);
5227 bp->strm = NULL;
5228
5229 if (bp->gunzip_buf) {
5230 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5231 bp->gunzip_mapping);
5232 bp->gunzip_buf = NULL;
5233 }
5234}
5235
5236static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5237{
5238 int n, rc;
5239
5240 /* check gzip header */
5241 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5242 return -EINVAL;
5243
5244 n = 10;
5245
34f80b04 5246#define FNAME 0x8
a2fbb9ea
ET
5247
5248 if (zbuf[3] & FNAME)
5249 while ((zbuf[n++] != 0) && (n < len));
5250
5251 bp->strm->next_in = zbuf + n;
5252 bp->strm->avail_in = len - n;
5253 bp->strm->next_out = bp->gunzip_buf;
5254 bp->strm->avail_out = FW_BUF_SIZE;
5255
5256 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5257 if (rc != Z_OK)
5258 return rc;
5259
5260 rc = zlib_inflate(bp->strm, Z_FINISH);
5261 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5262 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5263 bp->dev->name, bp->strm->msg);
5264
5265 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5266 if (bp->gunzip_outlen & 0x3)
5267 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5268 " gunzip_outlen (%d) not aligned\n",
5269 bp->dev->name, bp->gunzip_outlen);
5270 bp->gunzip_outlen >>= 2;
5271
5272 zlib_inflateEnd(bp->strm);
5273
5274 if (rc == Z_STREAM_END)
5275 return 0;
5276
5277 return rc;
5278}
5279
5280/* nic load/unload */
5281
5282/*
34f80b04 5283 * General service functions
a2fbb9ea
ET
5284 */
5285
5286/* send a NIG loopback debug packet */
5287static void bnx2x_lb_pckt(struct bnx2x *bp)
5288{
a2fbb9ea 5289 u32 wb_write[3];
a2fbb9ea
ET
5290
5291 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5292 wb_write[0] = 0x55555555;
5293 wb_write[1] = 0x55555555;
34f80b04 5294 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5295 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5296
5297 /* NON-IP protocol */
a2fbb9ea
ET
5298 wb_write[0] = 0x09000000;
5299 wb_write[1] = 0x55555555;
34f80b04 5300 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5301 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5302}
5303
5304/* some of the internal memories
5305 * are not directly readable from the driver
5306 * to test them we send debug packets
5307 */
5308static int bnx2x_int_mem_test(struct bnx2x *bp)
5309{
5310 int factor;
5311 int count, i;
5312 u32 val = 0;
5313
ad8d3948 5314 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5315 factor = 120;
ad8d3948
EG
5316 else if (CHIP_REV_IS_EMUL(bp))
5317 factor = 200;
5318 else
a2fbb9ea 5319 factor = 1;
a2fbb9ea
ET
5320
5321 DP(NETIF_MSG_HW, "start part1\n");
5322
5323 /* Disable inputs of parser neighbor blocks */
5324 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5325 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5326 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5327 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5328
5329 /* Write 0 to parser credits for CFC search request */
5330 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5331
5332 /* send Ethernet packet */
5333 bnx2x_lb_pckt(bp);
5334
5335 /* TODO do i reset NIG statistic? */
5336 /* Wait until NIG register shows 1 packet of size 0x10 */
5337 count = 1000 * factor;
5338 while (count) {
34f80b04 5339
a2fbb9ea
ET
5340 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5341 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5342 if (val == 0x10)
5343 break;
5344
5345 msleep(10);
5346 count--;
5347 }
5348 if (val != 0x10) {
5349 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5350 return -1;
5351 }
5352
5353 /* Wait until PRS register shows 1 packet */
5354 count = 1000 * factor;
5355 while (count) {
5356 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5357 if (val == 1)
5358 break;
5359
5360 msleep(10);
5361 count--;
5362 }
5363 if (val != 0x1) {
5364 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5365 return -2;
5366 }
5367
5368 /* Reset and init BRB, PRS */
34f80b04 5369 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5370 msleep(50);
34f80b04 5371 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5372 msleep(50);
5373 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5374 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5375
5376 DP(NETIF_MSG_HW, "part2\n");
5377
5378 /* Disable inputs of parser neighbor blocks */
5379 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5380 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5381 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5382 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5383
5384 /* Write 0 to parser credits for CFC search request */
5385 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5386
5387 /* send 10 Ethernet packets */
5388 for (i = 0; i < 10; i++)
5389 bnx2x_lb_pckt(bp);
5390
5391 /* Wait until NIG register shows 10 + 1
5392 packets of size 11*0x10 = 0xb0 */
5393 count = 1000 * factor;
5394 while (count) {
34f80b04 5395
a2fbb9ea
ET
5396 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5397 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5398 if (val == 0xb0)
5399 break;
5400
5401 msleep(10);
5402 count--;
5403 }
5404 if (val != 0xb0) {
5405 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5406 return -3;
5407 }
5408
5409 /* Wait until PRS register shows 2 packets */
5410 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5411 if (val != 2)
5412 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5413
5414 /* Write 1 to parser credits for CFC search request */
5415 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5416
5417 /* Wait until PRS register shows 3 packets */
5418 msleep(10 * factor);
5419 /* Wait until NIG register shows 1 packet of size 0x10 */
5420 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5421 if (val != 3)
5422 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5423
5424 /* clear NIG EOP FIFO */
5425 for (i = 0; i < 11; i++)
5426 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5427 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5428 if (val != 1) {
5429 BNX2X_ERR("clear of NIG failed\n");
5430 return -4;
5431 }
5432
5433 /* Reset and init BRB, PRS, NIG */
5434 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5435 msleep(50);
5436 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5437 msleep(50);
5438 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5439 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5440#ifndef BCM_ISCSI
5441 /* set NIC mode */
5442 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5443#endif
5444
5445 /* Enable inputs of parser neighbor blocks */
5446 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5447 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5448 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5449 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5450
5451 DP(NETIF_MSG_HW, "done\n");
5452
5453 return 0; /* OK */
5454}
5455
5456static void enable_blocks_attention(struct bnx2x *bp)
5457{
5458 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5459 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5460 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5461 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5462 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5463 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5464 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5465 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5466 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5467/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5468/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5469 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5470 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5471 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5472/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5473/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5474 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5475 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5476 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5477 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5478/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5479/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5480 if (CHIP_REV_IS_FPGA(bp))
5481 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5482 else
5483 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5484 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5485 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5486 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5487/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5488/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5489 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5490 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5491/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5492 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5493}
5494
34f80b04 5495
81f75bbf
EG
5496static void bnx2x_reset_common(struct bnx2x *bp)
5497{
5498 /* reset_common */
5499 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5500 0xd3ffff7f);
5501 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5502}
5503
34f80b04 5504static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5505{
a2fbb9ea 5506 u32 val, i;
a2fbb9ea 5507
34f80b04 5508 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5509
81f75bbf 5510 bnx2x_reset_common(bp);
34f80b04
EG
5511 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5512 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5513
34f80b04
EG
5514 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5515 if (CHIP_IS_E1H(bp))
5516 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5517
34f80b04
EG
5518 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5519 msleep(30);
5520 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5521
34f80b04
EG
5522 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5523 if (CHIP_IS_E1(bp)) {
5524 /* enable HW interrupt from PXP on USDM overflow
5525 bit 16 on INT_MASK_0 */
5526 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5527 }
a2fbb9ea 5528
34f80b04
EG
5529 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5530 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5531
5532#ifdef __BIG_ENDIAN
34f80b04
EG
5533 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5534 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5535 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5536 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5537 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5538 /* make sure this value is 0 */
5539 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5540
5541/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5542 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5543 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5544 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5545 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5546#endif
5547
34f80b04 5548 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5549#ifdef BCM_ISCSI
34f80b04
EG
5550 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5551 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5552 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5553#endif
5554
34f80b04
EG
5555 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5556 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5557
34f80b04
EG
5558 /* let the HW do it's magic ... */
5559 msleep(100);
5560 /* finish PXP init */
5561 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5562 if (val != 1) {
5563 BNX2X_ERR("PXP2 CFG failed\n");
5564 return -EBUSY;
5565 }
5566 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5567 if (val != 1) {
5568 BNX2X_ERR("PXP2 RD_INIT failed\n");
5569 return -EBUSY;
5570 }
a2fbb9ea 5571
34f80b04
EG
5572 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5573 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5574
34f80b04 5575 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5576
34f80b04
EG
5577 /* clean the DMAE memory */
5578 bp->dmae_ready = 1;
5579 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5580
34f80b04
EG
5581 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5582 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5583 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5584 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5585
34f80b04
EG
5586 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5587 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5588 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5589 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5590
5591 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5592 /* soft reset pulse */
5593 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5594 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5595
5596#ifdef BCM_ISCSI
34f80b04 5597 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5598#endif
a2fbb9ea 5599
34f80b04
EG
5600 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5601 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5602 if (!CHIP_REV_IS_SLOW(bp)) {
5603 /* enable hw interrupt from doorbell Q */
5604 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5605 }
a2fbb9ea 5606
34f80b04 5607 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5608 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5609 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5610 /* set NIC mode */
5611 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5612 if (CHIP_IS_E1H(bp))
5613 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5614
34f80b04
EG
5615 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5616 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5617 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5618 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5619
34f80b04
EG
5620 if (CHIP_IS_E1H(bp)) {
5621 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5622 STORM_INTMEM_SIZE_E1H/2);
5623 bnx2x_init_fill(bp,
5624 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5625 0, STORM_INTMEM_SIZE_E1H/2);
5626 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5627 STORM_INTMEM_SIZE_E1H/2);
5628 bnx2x_init_fill(bp,
5629 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5630 0, STORM_INTMEM_SIZE_E1H/2);
5631 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5632 STORM_INTMEM_SIZE_E1H/2);
5633 bnx2x_init_fill(bp,
5634 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5635 0, STORM_INTMEM_SIZE_E1H/2);
5636 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5637 STORM_INTMEM_SIZE_E1H/2);
5638 bnx2x_init_fill(bp,
5639 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5640 0, STORM_INTMEM_SIZE_E1H/2);
5641 } else { /* E1 */
ad8d3948
EG
5642 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5643 STORM_INTMEM_SIZE_E1);
5644 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5645 STORM_INTMEM_SIZE_E1);
5646 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5647 STORM_INTMEM_SIZE_E1);
5648 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5649 STORM_INTMEM_SIZE_E1);
34f80b04 5650 }
a2fbb9ea 5651
34f80b04
EG
5652 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5653 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5654 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5655 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5656
34f80b04
EG
5657 /* sync semi rtc */
5658 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5659 0x80000000);
5660 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5661 0x80000000);
a2fbb9ea 5662
34f80b04
EG
5663 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5664 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5665 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5666
34f80b04
EG
5667 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5668 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5669 REG_WR(bp, i, 0xc0cac01a);
5670 /* TODO: replace with something meaningful */
5671 }
8d9c5f34 5672 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5673 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5674
34f80b04
EG
5675 if (sizeof(union cdu_context) != 1024)
5676 /* we currently assume that a context is 1024 bytes */
5677 printk(KERN_ALERT PFX "please adjust the size of"
5678 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5679
34f80b04
EG
5680 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5681 val = (4 << 24) + (0 << 12) + 1024;
5682 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5683 if (CHIP_IS_E1(bp)) {
5684 /* !!! fix pxp client crdit until excel update */
5685 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5686 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5687 }
a2fbb9ea 5688
34f80b04
EG
5689 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5690 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5691 /* enable context validation interrupt from CFC */
5692 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5693
5694 /* set the thresholds to prevent CFC/CDU race */
5695 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5696
34f80b04
EG
5697 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5698 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5699
34f80b04
EG
5700 /* PXPCS COMMON comes here */
5701 /* Reset PCIE errors for debug */
5702 REG_WR(bp, 0x2814, 0xffffffff);
5703 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5704
34f80b04
EG
5705 /* EMAC0 COMMON comes here */
5706 /* EMAC1 COMMON comes here */
5707 /* DBU COMMON comes here */
5708 /* DBG COMMON comes here */
5709
5710 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5711 if (CHIP_IS_E1H(bp)) {
5712 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5713 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5714 }
5715
5716 if (CHIP_REV_IS_SLOW(bp))
5717 msleep(200);
5718
5719 /* finish CFC init */
5720 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5721 if (val != 1) {
5722 BNX2X_ERR("CFC LL_INIT failed\n");
5723 return -EBUSY;
5724 }
5725 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5726 if (val != 1) {
5727 BNX2X_ERR("CFC AC_INIT failed\n");
5728 return -EBUSY;
5729 }
5730 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5731 if (val != 1) {
5732 BNX2X_ERR("CFC CAM_INIT failed\n");
5733 return -EBUSY;
5734 }
5735 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5736
34f80b04
EG
5737 /* read NIG statistic
5738 to see if this is our first up since powerup */
5739 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5740 val = *bnx2x_sp(bp, wb_data[0]);
5741
5742 /* do internal memory self test */
5743 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5744 BNX2X_ERR("internal mem self test failed\n");
5745 return -EBUSY;
5746 }
5747
35b19ba5 5748 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5749 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5750 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5751 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5752 bp->port.need_hw_lock = 1;
5753 break;
5754
35b19ba5 5755 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5756 /* Fan failure is indicated by SPIO 5 */
5757 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5758 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5759
5760 /* set to active low mode */
5761 val = REG_RD(bp, MISC_REG_SPIO_INT);
5762 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5763 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5764 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5765
34f80b04
EG
5766 /* enable interrupt to signal the IGU */
5767 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5768 val |= (1 << MISC_REGISTERS_SPIO_5);
5769 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5770 break;
f1410647 5771
34f80b04
EG
5772 default:
5773 break;
5774 }
f1410647 5775
34f80b04
EG
5776 /* clear PXP2 attentions */
5777 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5778
34f80b04 5779 enable_blocks_attention(bp);
a2fbb9ea 5780
6bbca910
YR
5781 if (!BP_NOMCP(bp)) {
5782 bnx2x_acquire_phy_lock(bp);
5783 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5784 bnx2x_release_phy_lock(bp);
5785 } else
5786 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5787
34f80b04
EG
5788 return 0;
5789}
a2fbb9ea 5790
34f80b04
EG
5791static int bnx2x_init_port(struct bnx2x *bp)
5792{
5793 int port = BP_PORT(bp);
1c06328c 5794 u32 low, high;
34f80b04 5795 u32 val;
a2fbb9ea 5796
34f80b04
EG
5797 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5798
5799 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5800
5801 /* Port PXP comes here */
5802 /* Port PXP2 comes here */
a2fbb9ea
ET
5803#ifdef BCM_ISCSI
5804 /* Port0 1
5805 * Port1 385 */
5806 i++;
5807 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5808 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5809 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5810 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5811
5812 /* Port0 2
5813 * Port1 386 */
5814 i++;
5815 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5816 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5817 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5818 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5819
5820 /* Port0 3
5821 * Port1 387 */
5822 i++;
5823 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5824 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5825 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5826 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5827#endif
34f80b04 5828 /* Port CMs come here */
8d9c5f34
EG
5829 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5830 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5831
5832 /* Port QM comes here */
a2fbb9ea
ET
5833#ifdef BCM_ISCSI
5834 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5835 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5836
5837 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5838 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5839#endif
5840 /* Port DQ comes here */
1c06328c
EG
5841
5842 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5843 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5844 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5845 /* no pause for emulation and FPGA */
5846 low = 0;
5847 high = 513;
5848 } else {
5849 if (IS_E1HMF(bp))
5850 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5851 else if (bp->dev->mtu > 4096) {
5852 if (bp->flags & ONE_PORT_FLAG)
5853 low = 160;
5854 else {
5855 val = bp->dev->mtu;
5856 /* (24*1024 + val*4)/256 */
5857 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5858 }
5859 } else
5860 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5861 high = low + 56; /* 14*1024/256 */
5862 }
5863 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5864 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5865
5866
ad8d3948 5867 /* Port PRS comes here */
a2fbb9ea
ET
5868 /* Port TSDM comes here */
5869 /* Port CSDM comes here */
5870 /* Port USDM comes here */
5871 /* Port XSDM comes here */
356e2385 5872
34f80b04
EG
5873 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5874 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5875 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5876 port ? USEM_PORT1_END : USEM_PORT0_END);
5877 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5878 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5879 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5880 port ? XSEM_PORT1_END : XSEM_PORT0_END);
356e2385 5881
a2fbb9ea 5882 /* Port UPB comes here */
34f80b04
EG
5883 /* Port XPB comes here */
5884
5885 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5886 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5887
5888 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5889 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5890
5891 /* update threshold */
34f80b04 5892 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5893 /* update init credit */
34f80b04 5894 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5895
5896 /* probe changes */
34f80b04 5897 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5898 msleep(5);
34f80b04 5899 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5900
5901#ifdef BCM_ISCSI
5902 /* tell the searcher where the T2 table is */
5903 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5904
5905 wb_write[0] = U64_LO(bp->t2_mapping);
5906 wb_write[1] = U64_HI(bp->t2_mapping);
5907 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5908 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5909 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5910 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5911
5912 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5913 /* Port SRCH comes here */
5914#endif
5915 /* Port CDU comes here */
5916 /* Port CFC comes here */
34f80b04
EG
5917
5918 if (CHIP_IS_E1(bp)) {
5919 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5920 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5921 }
5922 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5923 port ? HC_PORT1_END : HC_PORT0_END);
5924
5925 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5926 MISC_AEU_PORT0_START,
34f80b04
EG
5927 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5928 /* init aeu_mask_attn_func_0/1:
5929 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5930 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5931 * bits 4-7 are used for "per vn group attention" */
5932 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5933 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5934
a2fbb9ea
ET
5935 /* Port PXPCS comes here */
5936 /* Port EMAC0 comes here */
5937 /* Port EMAC1 comes here */
5938 /* Port DBU comes here */
5939 /* Port DBG comes here */
356e2385 5940
34f80b04
EG
5941 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5942 port ? NIG_PORT1_END : NIG_PORT0_END);
5943
5944 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5945
5946 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5947 /* 0x2 disable e1hov, 0x1 enable */
5948 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5949 (IS_E1HMF(bp) ? 0x1 : 0x2));
5950
1c06328c
EG
5951 /* support pause requests from USDM, TSDM and BRB */
5952 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5953
5954 {
5955 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5956 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5957 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5958 }
34f80b04
EG
5959 }
5960
a2fbb9ea
ET
5961 /* Port MCP comes here */
5962 /* Port DMAE comes here */
5963
35b19ba5 5964 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5965 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5966 {
5967 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5968
5969 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5970 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5971
5972 /* The GPIO should be swapped if the swap register is
5973 set and active */
5974 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5975 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5976
5977 /* Select function upon port-swap configuration */
5978 if (port == 0) {
5979 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5980 aeu_gpio_mask = (swap_val && swap_override) ?
5981 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5982 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5983 } else {
5984 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5985 aeu_gpio_mask = (swap_val && swap_override) ?
5986 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5987 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5988 }
5989 val = REG_RD(bp, offset);
5990 /* add GPIO3 to group */
5991 val |= aeu_gpio_mask;
5992 REG_WR(bp, offset, val);
5993 }
5994 break;
5995
35b19ba5 5996 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5997 /* add SPIO 5 to group 0 */
5998 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5999 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6000 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6001 break;
6002
6003 default:
6004 break;
6005 }
6006
c18487ee 6007 bnx2x__link_reset(bp);
a2fbb9ea 6008
34f80b04
EG
6009 return 0;
6010}
6011
6012#define ILT_PER_FUNC (768/2)
6013#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6014/* the phys address is shifted right 12 bits and has an added
6015 1=valid bit added to the 53rd bit
6016 then since this is a wide register(TM)
6017 we split it into two 32 bit writes
6018 */
6019#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6020#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6021#define PXP_ONE_ILT(x) (((x) << 10) | x)
6022#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6023
6024#define CNIC_ILT_LINES 0
6025
6026static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6027{
6028 int reg;
6029
6030 if (CHIP_IS_E1H(bp))
6031 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6032 else /* E1 */
6033 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6034
6035 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6036}
6037
6038static int bnx2x_init_func(struct bnx2x *bp)
6039{
6040 int port = BP_PORT(bp);
6041 int func = BP_FUNC(bp);
8badd27a 6042 u32 addr, val;
34f80b04
EG
6043 int i;
6044
6045 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6046
8badd27a
EG
6047 /* set MSI reconfigure capability */
6048 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6049 val = REG_RD(bp, addr);
6050 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6051 REG_WR(bp, addr, val);
6052
34f80b04
EG
6053 i = FUNC_ILT_BASE(func);
6054
6055 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6056 if (CHIP_IS_E1H(bp)) {
6057 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6058 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6059 } else /* E1 */
6060 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6061 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6062
6063
6064 if (CHIP_IS_E1H(bp)) {
6065 for (i = 0; i < 9; i++)
6066 bnx2x_init_block(bp,
6067 cm_start[func][i], cm_end[func][i]);
6068
6069 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6070 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6071 }
6072
6073 /* HC init per function */
6074 if (CHIP_IS_E1H(bp)) {
6075 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6076
6077 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6078 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6079 }
6080 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6081
c14423fe 6082 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6083 REG_WR(bp, 0x2114, 0xffffffff);
6084 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6085
34f80b04
EG
6086 return 0;
6087}
6088
6089static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6090{
6091 int i, rc = 0;
a2fbb9ea 6092
34f80b04
EG
6093 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6094 BP_FUNC(bp), load_code);
a2fbb9ea 6095
34f80b04
EG
6096 bp->dmae_ready = 0;
6097 mutex_init(&bp->dmae_mutex);
6098 bnx2x_gunzip_init(bp);
a2fbb9ea 6099
34f80b04
EG
6100 switch (load_code) {
6101 case FW_MSG_CODE_DRV_LOAD_COMMON:
6102 rc = bnx2x_init_common(bp);
6103 if (rc)
6104 goto init_hw_err;
6105 /* no break */
6106
6107 case FW_MSG_CODE_DRV_LOAD_PORT:
6108 bp->dmae_ready = 1;
6109 rc = bnx2x_init_port(bp);
6110 if (rc)
6111 goto init_hw_err;
6112 /* no break */
6113
6114 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6115 bp->dmae_ready = 1;
6116 rc = bnx2x_init_func(bp);
6117 if (rc)
6118 goto init_hw_err;
6119 break;
6120
6121 default:
6122 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6123 break;
6124 }
6125
6126 if (!BP_NOMCP(bp)) {
6127 int func = BP_FUNC(bp);
a2fbb9ea
ET
6128
6129 bp->fw_drv_pulse_wr_seq =
34f80b04 6130 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6131 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6132 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6133 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6134 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6135 } else
6136 bp->func_stx = 0;
a2fbb9ea 6137
34f80b04
EG
6138 /* this needs to be done before gunzip end */
6139 bnx2x_zero_def_sb(bp);
6140 for_each_queue(bp, i)
6141 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6142
6143init_hw_err:
6144 bnx2x_gunzip_end(bp);
6145
6146 return rc;
a2fbb9ea
ET
6147}
6148
c14423fe 6149/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6150static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6151{
34f80b04 6152 int func = BP_FUNC(bp);
f1410647
ET
6153 u32 seq = ++bp->fw_seq;
6154 u32 rc = 0;
19680c48
EG
6155 u32 cnt = 1;
6156 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6157
34f80b04 6158 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6159 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6160
19680c48
EG
6161 do {
6162 /* let the FW do it's magic ... */
6163 msleep(delay);
a2fbb9ea 6164
19680c48 6165 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6166
19680c48
EG
6167 /* Give the FW up to 2 second (200*10ms) */
6168 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6169
6170 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6171 cnt*delay, rc, seq);
a2fbb9ea
ET
6172
6173 /* is this a reply to our command? */
6174 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6175 rc &= FW_MSG_CODE_MASK;
f1410647 6176
a2fbb9ea
ET
6177 } else {
6178 /* FW BUG! */
6179 BNX2X_ERR("FW failed to respond!\n");
6180 bnx2x_fw_dump(bp);
6181 rc = 0;
6182 }
f1410647 6183
a2fbb9ea
ET
6184 return rc;
6185}
6186
6187static void bnx2x_free_mem(struct bnx2x *bp)
6188{
6189
6190#define BNX2X_PCI_FREE(x, y, size) \
6191 do { \
6192 if (x) { \
6193 pci_free_consistent(bp->pdev, size, x, y); \
6194 x = NULL; \
6195 y = 0; \
6196 } \
6197 } while (0)
6198
6199#define BNX2X_FREE(x) \
6200 do { \
6201 if (x) { \
6202 vfree(x); \
6203 x = NULL; \
6204 } \
6205 } while (0)
6206
6207 int i;
6208
6209 /* fastpath */
555f6c78 6210 /* Common */
a2fbb9ea
ET
6211 for_each_queue(bp, i) {
6212
555f6c78 6213 /* status blocks */
a2fbb9ea
ET
6214 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6215 bnx2x_fp(bp, i, status_blk_mapping),
6216 sizeof(struct host_status_block) +
6217 sizeof(struct eth_tx_db_data));
555f6c78
EG
6218 }
6219 /* Rx */
6220 for_each_rx_queue(bp, i) {
a2fbb9ea 6221
555f6c78 6222 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6223 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6224 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6225 bnx2x_fp(bp, i, rx_desc_mapping),
6226 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6227
6228 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6229 bnx2x_fp(bp, i, rx_comp_mapping),
6230 sizeof(struct eth_fast_path_rx_cqe) *
6231 NUM_RCQ_BD);
a2fbb9ea 6232
7a9b2557 6233 /* SGE ring */
32626230 6234 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6235 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6236 bnx2x_fp(bp, i, rx_sge_mapping),
6237 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6238 }
555f6c78
EG
6239 /* Tx */
6240 for_each_tx_queue(bp, i) {
6241
6242 /* fastpath tx rings: tx_buf tx_desc */
6243 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6244 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6245 bnx2x_fp(bp, i, tx_desc_mapping),
6246 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6247 }
a2fbb9ea
ET
6248 /* end of fastpath */
6249
6250 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6251 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6252
6253 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6254 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6255
6256#ifdef BCM_ISCSI
6257 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6258 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6259 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6260 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6261#endif
7a9b2557 6262 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6263
6264#undef BNX2X_PCI_FREE
6265#undef BNX2X_KFREE
6266}
6267
6268static int bnx2x_alloc_mem(struct bnx2x *bp)
6269{
6270
6271#define BNX2X_PCI_ALLOC(x, y, size) \
6272 do { \
6273 x = pci_alloc_consistent(bp->pdev, size, y); \
6274 if (x == NULL) \
6275 goto alloc_mem_err; \
6276 memset(x, 0, size); \
6277 } while (0)
6278
6279#define BNX2X_ALLOC(x, size) \
6280 do { \
6281 x = vmalloc(size); \
6282 if (x == NULL) \
6283 goto alloc_mem_err; \
6284 memset(x, 0, size); \
6285 } while (0)
6286
6287 int i;
6288
6289 /* fastpath */
555f6c78 6290 /* Common */
a2fbb9ea
ET
6291 for_each_queue(bp, i) {
6292 bnx2x_fp(bp, i, bp) = bp;
6293
555f6c78 6294 /* status blocks */
a2fbb9ea
ET
6295 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6296 &bnx2x_fp(bp, i, status_blk_mapping),
6297 sizeof(struct host_status_block) +
6298 sizeof(struct eth_tx_db_data));
555f6c78
EG
6299 }
6300 /* Rx */
6301 for_each_rx_queue(bp, i) {
a2fbb9ea 6302
555f6c78 6303 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6304 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6305 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6306 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6307 &bnx2x_fp(bp, i, rx_desc_mapping),
6308 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6309
6310 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6311 &bnx2x_fp(bp, i, rx_comp_mapping),
6312 sizeof(struct eth_fast_path_rx_cqe) *
6313 NUM_RCQ_BD);
6314
7a9b2557
VZ
6315 /* SGE ring */
6316 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6317 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6318 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6319 &bnx2x_fp(bp, i, rx_sge_mapping),
6320 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6321 }
555f6c78
EG
6322 /* Tx */
6323 for_each_tx_queue(bp, i) {
6324
6325 bnx2x_fp(bp, i, hw_tx_prods) =
6326 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6327
6328 bnx2x_fp(bp, i, tx_prods_mapping) =
6329 bnx2x_fp(bp, i, status_blk_mapping) +
6330 sizeof(struct host_status_block);
6331
6332 /* fastpath tx rings: tx_buf tx_desc */
6333 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6334 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6335 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6336 &bnx2x_fp(bp, i, tx_desc_mapping),
6337 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6338 }
a2fbb9ea
ET
6339 /* end of fastpath */
6340
6341 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6342 sizeof(struct host_def_status_block));
6343
6344 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6345 sizeof(struct bnx2x_slowpath));
6346
6347#ifdef BCM_ISCSI
6348 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6349
6350 /* Initialize T1 */
6351 for (i = 0; i < 64*1024; i += 64) {
6352 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6353 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6354 }
6355
6356 /* allocate searcher T2 table
6357 we allocate 1/4 of alloc num for T2
6358 (which is not entered into the ILT) */
6359 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6360
6361 /* Initialize T2 */
6362 for (i = 0; i < 16*1024; i += 64)
6363 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6364
c14423fe 6365 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6366 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6367
6368 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6369 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6370
6371 /* QM queues (128*MAX_CONN) */
6372 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6373#endif
6374
6375 /* Slow path ring */
6376 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6377
6378 return 0;
6379
6380alloc_mem_err:
6381 bnx2x_free_mem(bp);
6382 return -ENOMEM;
6383
6384#undef BNX2X_PCI_ALLOC
6385#undef BNX2X_ALLOC
6386}
6387
6388static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6389{
6390 int i;
6391
555f6c78 6392 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6393 struct bnx2x_fastpath *fp = &bp->fp[i];
6394
6395 u16 bd_cons = fp->tx_bd_cons;
6396 u16 sw_prod = fp->tx_pkt_prod;
6397 u16 sw_cons = fp->tx_pkt_cons;
6398
a2fbb9ea
ET
6399 while (sw_cons != sw_prod) {
6400 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6401 sw_cons++;
6402 }
6403 }
6404}
6405
6406static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6407{
6408 int i, j;
6409
555f6c78 6410 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6411 struct bnx2x_fastpath *fp = &bp->fp[j];
6412
a2fbb9ea
ET
6413 for (i = 0; i < NUM_RX_BD; i++) {
6414 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6415 struct sk_buff *skb = rx_buf->skb;
6416
6417 if (skb == NULL)
6418 continue;
6419
6420 pci_unmap_single(bp->pdev,
6421 pci_unmap_addr(rx_buf, mapping),
356e2385 6422 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6423
6424 rx_buf->skb = NULL;
6425 dev_kfree_skb(skb);
6426 }
7a9b2557 6427 if (!fp->disable_tpa)
32626230
EG
6428 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6429 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6430 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6431 }
6432}
6433
6434static void bnx2x_free_skbs(struct bnx2x *bp)
6435{
6436 bnx2x_free_tx_skbs(bp);
6437 bnx2x_free_rx_skbs(bp);
6438}
6439
6440static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6441{
34f80b04 6442 int i, offset = 1;
a2fbb9ea
ET
6443
6444 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6445 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6446 bp->msix_table[0].vector);
6447
6448 for_each_queue(bp, i) {
c14423fe 6449 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6450 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6451 bnx2x_fp(bp, i, state));
6452
34f80b04 6453 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6454 }
a2fbb9ea
ET
6455}
6456
6457static void bnx2x_free_irq(struct bnx2x *bp)
6458{
a2fbb9ea 6459 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6460 bnx2x_free_msix_irqs(bp);
6461 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6462 bp->flags &= ~USING_MSIX_FLAG;
6463
8badd27a
EG
6464 } else if (bp->flags & USING_MSI_FLAG) {
6465 free_irq(bp->pdev->irq, bp->dev);
6466 pci_disable_msi(bp->pdev);
6467 bp->flags &= ~USING_MSI_FLAG;
6468
a2fbb9ea
ET
6469 } else
6470 free_irq(bp->pdev->irq, bp->dev);
6471}
6472
6473static int bnx2x_enable_msix(struct bnx2x *bp)
6474{
8badd27a
EG
6475 int i, rc, offset = 1;
6476 int igu_vec = 0;
a2fbb9ea 6477
8badd27a
EG
6478 bp->msix_table[0].entry = igu_vec;
6479 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6480
34f80b04 6481 for_each_queue(bp, i) {
8badd27a 6482 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6483 bp->msix_table[i + offset].entry = igu_vec;
6484 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6485 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6486 }
6487
34f80b04 6488 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6489 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6490 if (rc) {
8badd27a
EG
6491 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6492 return rc;
34f80b04 6493 }
8badd27a 6494
a2fbb9ea
ET
6495 bp->flags |= USING_MSIX_FLAG;
6496
6497 return 0;
a2fbb9ea
ET
6498}
6499
a2fbb9ea
ET
6500static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6501{
34f80b04 6502 int i, rc, offset = 1;
a2fbb9ea 6503
a2fbb9ea
ET
6504 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6505 bp->dev->name, bp->dev);
a2fbb9ea
ET
6506 if (rc) {
6507 BNX2X_ERR("request sp irq failed\n");
6508 return -EBUSY;
6509 }
6510
6511 for_each_queue(bp, i) {
555f6c78
EG
6512 struct bnx2x_fastpath *fp = &bp->fp[i];
6513
6514 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6515 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6516 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6517 if (rc) {
555f6c78 6518 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6519 bnx2x_free_msix_irqs(bp);
6520 return -EBUSY;
6521 }
6522
555f6c78 6523 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6524 }
6525
555f6c78
EG
6526 i = BNX2X_NUM_QUEUES(bp);
6527 if (is_multi(bp))
6528 printk(KERN_INFO PFX
6529 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6530 bp->dev->name, bp->msix_table[0].vector,
6531 bp->msix_table[offset].vector,
6532 bp->msix_table[offset + i - 1].vector);
6533 else
6534 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6535 bp->dev->name, bp->msix_table[0].vector,
6536 bp->msix_table[offset + i - 1].vector);
6537
a2fbb9ea 6538 return 0;
a2fbb9ea
ET
6539}
6540
8badd27a
EG
6541static int bnx2x_enable_msi(struct bnx2x *bp)
6542{
6543 int rc;
6544
6545 rc = pci_enable_msi(bp->pdev);
6546 if (rc) {
6547 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6548 return -1;
6549 }
6550 bp->flags |= USING_MSI_FLAG;
6551
6552 return 0;
6553}
6554
a2fbb9ea
ET
6555static int bnx2x_req_irq(struct bnx2x *bp)
6556{
8badd27a 6557 unsigned long flags;
34f80b04 6558 int rc;
a2fbb9ea 6559
8badd27a
EG
6560 if (bp->flags & USING_MSI_FLAG)
6561 flags = 0;
6562 else
6563 flags = IRQF_SHARED;
6564
6565 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6566 bp->dev->name, bp->dev);
a2fbb9ea
ET
6567 if (!rc)
6568 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6569
6570 return rc;
a2fbb9ea
ET
6571}
6572
65abd74d
YG
6573static void bnx2x_napi_enable(struct bnx2x *bp)
6574{
6575 int i;
6576
555f6c78 6577 for_each_rx_queue(bp, i)
65abd74d
YG
6578 napi_enable(&bnx2x_fp(bp, i, napi));
6579}
6580
6581static void bnx2x_napi_disable(struct bnx2x *bp)
6582{
6583 int i;
6584
555f6c78 6585 for_each_rx_queue(bp, i)
65abd74d
YG
6586 napi_disable(&bnx2x_fp(bp, i, napi));
6587}
6588
6589static void bnx2x_netif_start(struct bnx2x *bp)
6590{
6591 if (atomic_dec_and_test(&bp->intr_sem)) {
6592 if (netif_running(bp->dev)) {
65abd74d
YG
6593 bnx2x_napi_enable(bp);
6594 bnx2x_int_enable(bp);
555f6c78
EG
6595 if (bp->state == BNX2X_STATE_OPEN)
6596 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6597 }
6598 }
6599}
6600
f8ef6e44 6601static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6602{
f8ef6e44 6603 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6604 bnx2x_napi_disable(bp);
65abd74d 6605 if (netif_running(bp->dev)) {
65abd74d
YG
6606 netif_tx_disable(bp->dev);
6607 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6608 }
6609}
6610
a2fbb9ea
ET
6611/*
6612 * Init service functions
6613 */
6614
3101c2bc 6615static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6616{
6617 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6618 int port = BP_PORT(bp);
a2fbb9ea
ET
6619
6620 /* CAM allocation
6621 * unicasts 0-31:port0 32-63:port1
6622 * multicast 64-127:port0 128-191:port1
6623 */
8d9c5f34 6624 config->hdr.length = 2;
af246401 6625 config->hdr.offset = port ? 32 : 0;
0626b899 6626 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6627 config->hdr.reserved1 = 0;
6628
6629 /* primary MAC */
6630 config->config_table[0].cam_entry.msb_mac_addr =
6631 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6632 config->config_table[0].cam_entry.middle_mac_addr =
6633 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6634 config->config_table[0].cam_entry.lsb_mac_addr =
6635 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6636 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6637 if (set)
6638 config->config_table[0].target_table_entry.flags = 0;
6639 else
6640 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6641 config->config_table[0].target_table_entry.client_id = 0;
6642 config->config_table[0].target_table_entry.vlan_id = 0;
6643
3101c2bc
YG
6644 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6645 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6646 config->config_table[0].cam_entry.msb_mac_addr,
6647 config->config_table[0].cam_entry.middle_mac_addr,
6648 config->config_table[0].cam_entry.lsb_mac_addr);
6649
6650 /* broadcast */
4781bfad
EG
6651 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6652 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6653 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6654 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6655 if (set)
6656 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6657 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6658 else
6659 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6660 config->config_table[1].target_table_entry.client_id = 0;
6661 config->config_table[1].target_table_entry.vlan_id = 0;
6662
6663 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6664 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6665 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6666}
6667
3101c2bc 6668static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6669{
6670 struct mac_configuration_cmd_e1h *config =
6671 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6672
3101c2bc 6673 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6674 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6675 return;
6676 }
6677
6678 /* CAM allocation for E1H
6679 * unicasts: by func number
6680 * multicast: 20+FUNC*20, 20 each
6681 */
8d9c5f34 6682 config->hdr.length = 1;
34f80b04 6683 config->hdr.offset = BP_FUNC(bp);
0626b899 6684 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6685 config->hdr.reserved1 = 0;
6686
6687 /* primary MAC */
6688 config->config_table[0].msb_mac_addr =
6689 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6690 config->config_table[0].middle_mac_addr =
6691 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6692 config->config_table[0].lsb_mac_addr =
6693 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6694 config->config_table[0].client_id = BP_L_ID(bp);
6695 config->config_table[0].vlan_id = 0;
6696 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6697 if (set)
6698 config->config_table[0].flags = BP_PORT(bp);
6699 else
6700 config->config_table[0].flags =
6701 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6702
3101c2bc
YG
6703 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6704 (set ? "setting" : "clearing"),
34f80b04
EG
6705 config->config_table[0].msb_mac_addr,
6706 config->config_table[0].middle_mac_addr,
6707 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6708
6709 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6710 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6711 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6712}
6713
a2fbb9ea
ET
6714static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6715 int *state_p, int poll)
6716{
6717 /* can take a while if any port is running */
8b3a0f0b 6718 int cnt = 5000;
a2fbb9ea 6719
c14423fe
ET
6720 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6721 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6722
6723 might_sleep();
34f80b04 6724 while (cnt--) {
a2fbb9ea
ET
6725 if (poll) {
6726 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6727 /* if index is different from 0
6728 * the reply for some commands will
3101c2bc 6729 * be on the non default queue
a2fbb9ea
ET
6730 */
6731 if (idx)
6732 bnx2x_rx_int(&bp->fp[idx], 10);
6733 }
a2fbb9ea 6734
3101c2bc 6735 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6736 if (*state_p == state) {
6737#ifdef BNX2X_STOP_ON_ERROR
6738 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6739#endif
a2fbb9ea 6740 return 0;
8b3a0f0b 6741 }
a2fbb9ea 6742
a2fbb9ea 6743 msleep(1);
a2fbb9ea
ET
6744 }
6745
a2fbb9ea 6746 /* timeout! */
49d66772
ET
6747 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6748 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6749#ifdef BNX2X_STOP_ON_ERROR
6750 bnx2x_panic();
6751#endif
a2fbb9ea 6752
49d66772 6753 return -EBUSY;
a2fbb9ea
ET
6754}
6755
6756static int bnx2x_setup_leading(struct bnx2x *bp)
6757{
34f80b04 6758 int rc;
a2fbb9ea 6759
c14423fe 6760 /* reset IGU state */
34f80b04 6761 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6762
6763 /* SETUP ramrod */
6764 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6765
34f80b04
EG
6766 /* Wait for completion */
6767 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6768
34f80b04 6769 return rc;
a2fbb9ea
ET
6770}
6771
6772static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6773{
555f6c78
EG
6774 struct bnx2x_fastpath *fp = &bp->fp[index];
6775
a2fbb9ea 6776 /* reset IGU state */
555f6c78 6777 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6778
228241eb 6779 /* SETUP ramrod */
555f6c78
EG
6780 fp->state = BNX2X_FP_STATE_OPENING;
6781 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6782 fp->cl_id, 0);
a2fbb9ea
ET
6783
6784 /* Wait for completion */
6785 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6786 &(fp->state), 0);
a2fbb9ea
ET
6787}
6788
a2fbb9ea 6789static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6790
8badd27a 6791static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6792{
555f6c78 6793 int num_queues;
a2fbb9ea 6794
8badd27a
EG
6795 switch (int_mode) {
6796 case INT_MODE_INTx:
6797 case INT_MODE_MSI:
555f6c78
EG
6798 num_queues = 1;
6799 bp->num_rx_queues = num_queues;
6800 bp->num_tx_queues = num_queues;
6801 DP(NETIF_MSG_IFUP,
6802 "set number of queues to %d\n", num_queues);
8badd27a
EG
6803 break;
6804
6805 case INT_MODE_MSIX:
6806 default:
555f6c78
EG
6807 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6808 num_queues = min_t(u32, num_online_cpus(),
6809 BNX2X_MAX_QUEUES(bp));
34f80b04 6810 else
555f6c78
EG
6811 num_queues = 1;
6812 bp->num_rx_queues = num_queues;
6813 bp->num_tx_queues = num_queues;
6814 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6815 " number of tx queues to %d\n",
6816 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6817 /* if we can't use MSI-X we only need one fp,
6818 * so try to enable MSI-X with the requested number of fp's
6819 * and fallback to MSI or legacy INTx with one fp
6820 */
8badd27a 6821 if (bnx2x_enable_msix(bp)) {
34f80b04 6822 /* failed to enable MSI-X */
555f6c78
EG
6823 num_queues = 1;
6824 bp->num_rx_queues = num_queues;
6825 bp->num_tx_queues = num_queues;
6826 if (bp->multi_mode)
6827 BNX2X_ERR("Multi requested but failed to "
6828 "enable MSI-X set number of "
6829 "queues to %d\n", num_queues);
a2fbb9ea 6830 }
8badd27a 6831 break;
a2fbb9ea 6832 }
555f6c78 6833 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6834}
6835
6836static void bnx2x_set_rx_mode(struct net_device *dev);
6837
6838/* must be called with rtnl_lock */
6839static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6840{
6841 u32 load_code;
6842 int i, rc = 0;
6843#ifdef BNX2X_STOP_ON_ERROR
6844 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6845 if (unlikely(bp->panic))
6846 return -EPERM;
6847#endif
6848
6849 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6850
6851 bnx2x_set_int_mode(bp);
c14423fe 6852
a2fbb9ea
ET
6853 if (bnx2x_alloc_mem(bp))
6854 return -ENOMEM;
6855
555f6c78 6856 for_each_rx_queue(bp, i)
7a9b2557
VZ
6857 bnx2x_fp(bp, i, disable_tpa) =
6858 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6859
555f6c78 6860 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6861 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6862 bnx2x_poll, 128);
6863
6864#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6865 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6866 struct bnx2x_fastpath *fp = &bp->fp[i];
6867
6868 fp->poll_no_work = 0;
6869 fp->poll_calls = 0;
6870 fp->poll_max_calls = 0;
6871 fp->poll_complete = 0;
6872 fp->poll_exit = 0;
6873 }
6874#endif
6875 bnx2x_napi_enable(bp);
6876
34f80b04
EG
6877 if (bp->flags & USING_MSIX_FLAG) {
6878 rc = bnx2x_req_msix_irqs(bp);
6879 if (rc) {
6880 pci_disable_msix(bp->pdev);
2dfe0e1f 6881 goto load_error1;
34f80b04
EG
6882 }
6883 } else {
8badd27a
EG
6884 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6885 bnx2x_enable_msi(bp);
34f80b04
EG
6886 bnx2x_ack_int(bp);
6887 rc = bnx2x_req_irq(bp);
6888 if (rc) {
2dfe0e1f 6889 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6890 if (bp->flags & USING_MSI_FLAG)
6891 pci_disable_msi(bp->pdev);
2dfe0e1f 6892 goto load_error1;
a2fbb9ea 6893 }
8badd27a
EG
6894 if (bp->flags & USING_MSI_FLAG) {
6895 bp->dev->irq = bp->pdev->irq;
6896 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6897 bp->dev->name, bp->pdev->irq);
6898 }
a2fbb9ea
ET
6899 }
6900
2dfe0e1f
EG
6901 /* Send LOAD_REQUEST command to MCP
6902 Returns the type of LOAD command:
6903 if it is the first port to be initialized
6904 common blocks should be initialized, otherwise - not
6905 */
6906 if (!BP_NOMCP(bp)) {
6907 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6908 if (!load_code) {
6909 BNX2X_ERR("MCP response failure, aborting\n");
6910 rc = -EBUSY;
6911 goto load_error2;
6912 }
6913 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6914 rc = -EBUSY; /* other port in diagnostic mode */
6915 goto load_error2;
6916 }
6917
6918 } else {
6919 int port = BP_PORT(bp);
6920
f5372251 6921 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
6922 load_count[0], load_count[1], load_count[2]);
6923 load_count[0]++;
6924 load_count[1 + port]++;
f5372251 6925 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
6926 load_count[0], load_count[1], load_count[2]);
6927 if (load_count[0] == 1)
6928 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6929 else if (load_count[1 + port] == 1)
6930 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6931 else
6932 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6933 }
6934
6935 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6936 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6937 bp->port.pmf = 1;
6938 else
6939 bp->port.pmf = 0;
6940 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6941
a2fbb9ea 6942 /* Initialize HW */
34f80b04
EG
6943 rc = bnx2x_init_hw(bp, load_code);
6944 if (rc) {
a2fbb9ea 6945 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6946 goto load_error2;
a2fbb9ea
ET
6947 }
6948
a2fbb9ea 6949 /* Setup NIC internals and enable interrupts */
471de716 6950 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6951
6952 /* Send LOAD_DONE command to MCP */
34f80b04 6953 if (!BP_NOMCP(bp)) {
228241eb
ET
6954 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6955 if (!load_code) {
da5a662a 6956 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6957 rc = -EBUSY;
2dfe0e1f 6958 goto load_error3;
a2fbb9ea
ET
6959 }
6960 }
6961
6962 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6963
34f80b04
EG
6964 rc = bnx2x_setup_leading(bp);
6965 if (rc) {
da5a662a 6966 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6967 goto load_error3;
34f80b04 6968 }
a2fbb9ea 6969
34f80b04
EG
6970 if (CHIP_IS_E1H(bp))
6971 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 6972 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
6973 bp->state = BNX2X_STATE_DISABLED;
6974 }
a2fbb9ea 6975
34f80b04
EG
6976 if (bp->state == BNX2X_STATE_OPEN)
6977 for_each_nondefault_queue(bp, i) {
6978 rc = bnx2x_setup_multi(bp, i);
6979 if (rc)
2dfe0e1f 6980 goto load_error3;
34f80b04 6981 }
a2fbb9ea 6982
34f80b04 6983 if (CHIP_IS_E1(bp))
3101c2bc 6984 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6985 else
3101c2bc 6986 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6987
6988 if (bp->port.pmf)
b5bf9068 6989 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
6990
6991 /* Start fast path */
34f80b04
EG
6992 switch (load_mode) {
6993 case LOAD_NORMAL:
6994 /* Tx queue should be only reenabled */
555f6c78 6995 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6996 /* Initialize the receive filter. */
34f80b04
EG
6997 bnx2x_set_rx_mode(bp->dev);
6998 break;
6999
7000 case LOAD_OPEN:
555f6c78 7001 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 7002 /* Initialize the receive filter. */
34f80b04 7003 bnx2x_set_rx_mode(bp->dev);
34f80b04 7004 break;
a2fbb9ea 7005
34f80b04 7006 case LOAD_DIAG:
2dfe0e1f 7007 /* Initialize the receive filter. */
a2fbb9ea 7008 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7009 bp->state = BNX2X_STATE_DIAG;
7010 break;
7011
7012 default:
7013 break;
a2fbb9ea
ET
7014 }
7015
34f80b04
EG
7016 if (!bp->port.pmf)
7017 bnx2x__link_status_update(bp);
7018
a2fbb9ea
ET
7019 /* start the timer */
7020 mod_timer(&bp->timer, jiffies + bp->current_interval);
7021
34f80b04 7022
a2fbb9ea
ET
7023 return 0;
7024
2dfe0e1f
EG
7025load_error3:
7026 bnx2x_int_disable_sync(bp, 1);
7027 if (!BP_NOMCP(bp)) {
7028 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7029 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7030 }
7031 bp->port.pmf = 0;
7a9b2557
VZ
7032 /* Free SKBs, SGEs, TPA pool and driver internals */
7033 bnx2x_free_skbs(bp);
555f6c78 7034 for_each_rx_queue(bp, i)
3196a88a 7035 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7036load_error2:
d1014634
YG
7037 /* Release IRQs */
7038 bnx2x_free_irq(bp);
2dfe0e1f
EG
7039load_error1:
7040 bnx2x_napi_disable(bp);
555f6c78 7041 for_each_rx_queue(bp, i)
7cde1c8b 7042 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7043 bnx2x_free_mem(bp);
7044
34f80b04 7045 return rc;
a2fbb9ea
ET
7046}
7047
7048static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7049{
555f6c78 7050 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7051 int rc;
7052
c14423fe 7053 /* halt the connection */
555f6c78
EG
7054 fp->state = BNX2X_FP_STATE_HALTING;
7055 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7056
34f80b04 7057 /* Wait for completion */
a2fbb9ea 7058 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7059 &(fp->state), 1);
c14423fe 7060 if (rc) /* timeout */
a2fbb9ea
ET
7061 return rc;
7062
7063 /* delete cfc entry */
7064 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7065
34f80b04
EG
7066 /* Wait for completion */
7067 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7068 &(fp->state), 1);
34f80b04 7069 return rc;
a2fbb9ea
ET
7070}
7071
da5a662a 7072static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7073{
4781bfad 7074 __le16 dsb_sp_prod_idx;
c14423fe 7075 /* if the other port is handling traffic,
a2fbb9ea 7076 this can take a lot of time */
34f80b04
EG
7077 int cnt = 500;
7078 int rc;
a2fbb9ea
ET
7079
7080 might_sleep();
7081
7082 /* Send HALT ramrod */
7083 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7084 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7085
34f80b04
EG
7086 /* Wait for completion */
7087 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7088 &(bp->fp[0].state), 1);
7089 if (rc) /* timeout */
da5a662a 7090 return rc;
a2fbb9ea 7091
49d66772 7092 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7093
228241eb 7094 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7095 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7096
49d66772 7097 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7098 we are going to reset the chip anyway
7099 so there is not much to do if this times out
7100 */
34f80b04 7101 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7102 if (!cnt) {
7103 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7104 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7105 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7106#ifdef BNX2X_STOP_ON_ERROR
7107 bnx2x_panic();
7108#endif
36e552ab 7109 rc = -EBUSY;
34f80b04
EG
7110 break;
7111 }
7112 cnt--;
da5a662a 7113 msleep(1);
5650d9d4 7114 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7115 }
7116 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7117 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7118
7119 return rc;
a2fbb9ea
ET
7120}
7121
34f80b04
EG
7122static void bnx2x_reset_func(struct bnx2x *bp)
7123{
7124 int port = BP_PORT(bp);
7125 int func = BP_FUNC(bp);
7126 int base, i;
7127
7128 /* Configure IGU */
7129 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7130 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7131
34f80b04
EG
7132 /* Clear ILT */
7133 base = FUNC_ILT_BASE(func);
7134 for (i = base; i < base + ILT_PER_FUNC; i++)
7135 bnx2x_ilt_wr(bp, i, 0);
7136}
7137
7138static void bnx2x_reset_port(struct bnx2x *bp)
7139{
7140 int port = BP_PORT(bp);
7141 u32 val;
7142
7143 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7144
7145 /* Do not rcv packets to BRB */
7146 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7147 /* Do not direct rcv packets that are not for MCP to the BRB */
7148 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7149 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7150
7151 /* Configure AEU */
7152 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7153
7154 msleep(100);
7155 /* Check for BRB port occupancy */
7156 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7157 if (val)
7158 DP(NETIF_MSG_IFDOWN,
33471629 7159 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7160
7161 /* TODO: Close Doorbell port? */
7162}
7163
34f80b04
EG
7164static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7165{
7166 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7167 BP_FUNC(bp), reset_code);
7168
7169 switch (reset_code) {
7170 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7171 bnx2x_reset_port(bp);
7172 bnx2x_reset_func(bp);
7173 bnx2x_reset_common(bp);
7174 break;
7175
7176 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7177 bnx2x_reset_port(bp);
7178 bnx2x_reset_func(bp);
7179 break;
7180
7181 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7182 bnx2x_reset_func(bp);
7183 break;
49d66772 7184
34f80b04
EG
7185 default:
7186 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7187 break;
7188 }
7189}
7190
33471629 7191/* must be called with rtnl_lock */
34f80b04 7192static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7193{
da5a662a 7194 int port = BP_PORT(bp);
a2fbb9ea 7195 u32 reset_code = 0;
da5a662a 7196 int i, cnt, rc;
a2fbb9ea
ET
7197
7198 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7199
228241eb
ET
7200 bp->rx_mode = BNX2X_RX_MODE_NONE;
7201 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7202
f8ef6e44 7203 bnx2x_netif_stop(bp, 1);
e94d8af3 7204
34f80b04
EG
7205 del_timer_sync(&bp->timer);
7206 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7207 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7208 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7209
70b9986c
EG
7210 /* Release IRQs */
7211 bnx2x_free_irq(bp);
7212
555f6c78
EG
7213 /* Wait until tx fastpath tasks complete */
7214 for_each_tx_queue(bp, i) {
228241eb
ET
7215 struct bnx2x_fastpath *fp = &bp->fp[i];
7216
34f80b04 7217 cnt = 1000;
e8b5fc51 7218 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7219
65abd74d 7220 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7221 if (!cnt) {
7222 BNX2X_ERR("timeout waiting for queue[%d]\n",
7223 i);
7224#ifdef BNX2X_STOP_ON_ERROR
7225 bnx2x_panic();
7226 return -EBUSY;
7227#else
7228 break;
7229#endif
7230 }
7231 cnt--;
da5a662a 7232 msleep(1);
34f80b04 7233 }
228241eb 7234 }
da5a662a
VZ
7235 /* Give HW time to discard old tx messages */
7236 msleep(1);
a2fbb9ea 7237
3101c2bc
YG
7238 if (CHIP_IS_E1(bp)) {
7239 struct mac_configuration_cmd *config =
7240 bnx2x_sp(bp, mcast_config);
7241
7242 bnx2x_set_mac_addr_e1(bp, 0);
7243
8d9c5f34 7244 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7245 CAM_INVALIDATE(config->config_table[i]);
7246
8d9c5f34 7247 config->hdr.length = i;
3101c2bc
YG
7248 if (CHIP_REV_IS_SLOW(bp))
7249 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7250 else
7251 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7252 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7253 config->hdr.reserved1 = 0;
7254
7255 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7256 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7257 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7258
7259 } else { /* E1H */
65abd74d
YG
7260 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7261
3101c2bc
YG
7262 bnx2x_set_mac_addr_e1h(bp, 0);
7263
7264 for (i = 0; i < MC_HASH_SIZE; i++)
7265 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7266 }
7267
65abd74d
YG
7268 if (unload_mode == UNLOAD_NORMAL)
7269 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7270
7271 else if (bp->flags & NO_WOL_FLAG) {
7272 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7273 if (CHIP_IS_E1H(bp))
7274 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7275
7276 } else if (bp->wol) {
7277 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7278 u8 *mac_addr = bp->dev->dev_addr;
7279 u32 val;
7280 /* The mac address is written to entries 1-4 to
7281 preserve entry 0 which is used by the PMF */
7282 u8 entry = (BP_E1HVN(bp) + 1)*8;
7283
7284 val = (mac_addr[0] << 8) | mac_addr[1];
7285 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7286
7287 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7288 (mac_addr[4] << 8) | mac_addr[5];
7289 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7290
7291 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7292
7293 } else
7294 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7295
34f80b04
EG
7296 /* Close multi and leading connections
7297 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7298 for_each_nondefault_queue(bp, i)
7299 if (bnx2x_stop_multi(bp, i))
228241eb 7300 goto unload_error;
a2fbb9ea 7301
da5a662a
VZ
7302 rc = bnx2x_stop_leading(bp);
7303 if (rc) {
34f80b04 7304 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7305#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7306 return -EBUSY;
da5a662a
VZ
7307#else
7308 goto unload_error;
34f80b04 7309#endif
228241eb
ET
7310 }
7311
7312unload_error:
34f80b04 7313 if (!BP_NOMCP(bp))
228241eb 7314 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7315 else {
f5372251 7316 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7317 load_count[0], load_count[1], load_count[2]);
7318 load_count[0]--;
da5a662a 7319 load_count[1 + port]--;
f5372251 7320 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7321 load_count[0], load_count[1], load_count[2]);
7322 if (load_count[0] == 0)
7323 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7324 else if (load_count[1 + port] == 0)
34f80b04
EG
7325 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7326 else
7327 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7328 }
a2fbb9ea 7329
34f80b04
EG
7330 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7331 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7332 bnx2x__link_reset(bp);
a2fbb9ea
ET
7333
7334 /* Reset the chip */
228241eb 7335 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7336
7337 /* Report UNLOAD_DONE to MCP */
34f80b04 7338 if (!BP_NOMCP(bp))
a2fbb9ea 7339 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7340
9a035440 7341 bp->port.pmf = 0;
a2fbb9ea 7342
7a9b2557 7343 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7344 bnx2x_free_skbs(bp);
555f6c78 7345 for_each_rx_queue(bp, i)
3196a88a 7346 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7347 for_each_rx_queue(bp, i)
7cde1c8b 7348 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7349 bnx2x_free_mem(bp);
7350
7351 bp->state = BNX2X_STATE_CLOSED;
228241eb 7352
a2fbb9ea
ET
7353 netif_carrier_off(bp->dev);
7354
7355 return 0;
7356}
7357
34f80b04
EG
7358static void bnx2x_reset_task(struct work_struct *work)
7359{
7360 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7361
7362#ifdef BNX2X_STOP_ON_ERROR
7363 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7364 " so reset not done to allow debug dump,\n"
7365 KERN_ERR " you will need to reboot when done\n");
7366 return;
7367#endif
7368
7369 rtnl_lock();
7370
7371 if (!netif_running(bp->dev))
7372 goto reset_task_exit;
7373
7374 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7375 bnx2x_nic_load(bp, LOAD_NORMAL);
7376
7377reset_task_exit:
7378 rtnl_unlock();
7379}
7380
a2fbb9ea
ET
7381/* end of nic load/unload */
7382
7383/* ethtool_ops */
7384
7385/*
7386 * Init service functions
7387 */
7388
f1ef27ef
EG
7389static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7390{
7391 switch (func) {
7392 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7393 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7394 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7395 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7396 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7397 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7398 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7399 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7400 default:
7401 BNX2X_ERR("Unsupported function index: %d\n", func);
7402 return (u32)(-1);
7403 }
7404}
7405
7406static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7407{
7408 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7409
7410 /* Flush all outstanding writes */
7411 mmiowb();
7412
7413 /* Pretend to be function 0 */
7414 REG_WR(bp, reg, 0);
7415 /* Flush the GRC transaction (in the chip) */
7416 new_val = REG_RD(bp, reg);
7417 if (new_val != 0) {
7418 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7419 new_val);
7420 BUG();
7421 }
7422
7423 /* From now we are in the "like-E1" mode */
7424 bnx2x_int_disable(bp);
7425
7426 /* Flush all outstanding writes */
7427 mmiowb();
7428
7429 /* Restore the original funtion settings */
7430 REG_WR(bp, reg, orig_func);
7431 new_val = REG_RD(bp, reg);
7432 if (new_val != orig_func) {
7433 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7434 orig_func, new_val);
7435 BUG();
7436 }
7437}
7438
7439static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7440{
7441 if (CHIP_IS_E1H(bp))
7442 bnx2x_undi_int_disable_e1h(bp, func);
7443 else
7444 bnx2x_int_disable(bp);
7445}
7446
34f80b04
EG
7447static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7448{
7449 u32 val;
7450
7451 /* Check if there is any driver already loaded */
7452 val = REG_RD(bp, MISC_REG_UNPREPARED);
7453 if (val == 0x1) {
7454 /* Check if it is the UNDI driver
7455 * UNDI driver initializes CID offset for normal bell to 0x7
7456 */
4a37fb66 7457 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7458 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7459 if (val == 0x7) {
7460 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7461 /* save our func */
34f80b04 7462 int func = BP_FUNC(bp);
da5a662a
VZ
7463 u32 swap_en;
7464 u32 swap_val;
34f80b04 7465
b4661739
EG
7466 /* clear the UNDI indication */
7467 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7468
34f80b04
EG
7469 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7470
7471 /* try unload UNDI on port 0 */
7472 bp->func = 0;
da5a662a
VZ
7473 bp->fw_seq =
7474 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7475 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7476 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7477
7478 /* if UNDI is loaded on the other port */
7479 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7480
da5a662a
VZ
7481 /* send "DONE" for previous unload */
7482 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7483
7484 /* unload UNDI on port 1 */
34f80b04 7485 bp->func = 1;
da5a662a
VZ
7486 bp->fw_seq =
7487 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7488 DRV_MSG_SEQ_NUMBER_MASK);
7489 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7490
7491 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7492 }
7493
b4661739
EG
7494 /* now it's safe to release the lock */
7495 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7496
f1ef27ef 7497 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7498
7499 /* close input traffic and wait for it */
7500 /* Do not rcv packets to BRB */
7501 REG_WR(bp,
7502 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7503 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7504 /* Do not direct rcv packets that are not for MCP to
7505 * the BRB */
7506 REG_WR(bp,
7507 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7508 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7509 /* clear AEU */
7510 REG_WR(bp,
7511 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7512 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7513 msleep(10);
7514
7515 /* save NIG port swap info */
7516 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7517 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7518 /* reset device */
7519 REG_WR(bp,
7520 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7521 0xd3ffffff);
34f80b04
EG
7522 REG_WR(bp,
7523 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7524 0x1403);
da5a662a
VZ
7525 /* take the NIG out of reset and restore swap values */
7526 REG_WR(bp,
7527 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7528 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7529 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7530 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7531
7532 /* send unload done to the MCP */
7533 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7534
7535 /* restore our func and fw_seq */
7536 bp->func = func;
7537 bp->fw_seq =
7538 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7539 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7540
7541 } else
7542 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7543 }
7544}
7545
7546static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7547{
7548 u32 val, val2, val3, val4, id;
72ce58c3 7549 u16 pmc;
34f80b04
EG
7550
7551 /* Get the chip revision id and number. */
7552 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7553 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7554 id = ((val & 0xffff) << 16);
7555 val = REG_RD(bp, MISC_REG_CHIP_REV);
7556 id |= ((val & 0xf) << 12);
7557 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7558 id |= ((val & 0xff) << 4);
5a40e08e 7559 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7560 id |= (val & 0xf);
7561 bp->common.chip_id = id;
7562 bp->link_params.chip_id = bp->common.chip_id;
7563 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7564
1c06328c
EG
7565 val = (REG_RD(bp, 0x2874) & 0x55);
7566 if ((bp->common.chip_id & 0x1) ||
7567 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7568 bp->flags |= ONE_PORT_FLAG;
7569 BNX2X_DEV_INFO("single port device\n");
7570 }
7571
34f80b04
EG
7572 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7573 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7574 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7575 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7576 bp->common.flash_size, bp->common.flash_size);
7577
7578 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7579 bp->link_params.shmem_base = bp->common.shmem_base;
7580 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7581
7582 if (!bp->common.shmem_base ||
7583 (bp->common.shmem_base < 0xA0000) ||
7584 (bp->common.shmem_base >= 0xC0000)) {
7585 BNX2X_DEV_INFO("MCP not active\n");
7586 bp->flags |= NO_MCP_FLAG;
7587 return;
7588 }
7589
7590 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7591 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7592 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7593 BNX2X_ERR("BAD MCP validity signature\n");
7594
7595 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7596 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7597
7598 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7599 SHARED_HW_CFG_LED_MODE_MASK) >>
7600 SHARED_HW_CFG_LED_MODE_SHIFT);
7601
c2c8b03e
EG
7602 bp->link_params.feature_config_flags = 0;
7603 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7604 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7605 bp->link_params.feature_config_flags |=
7606 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7607 else
7608 bp->link_params.feature_config_flags &=
7609 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7610
34f80b04
EG
7611 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7612 bp->common.bc_ver = val;
7613 BNX2X_DEV_INFO("bc_ver %X\n", val);
7614 if (val < BNX2X_BC_VER) {
7615 /* for now only warn
7616 * later we might need to enforce this */
7617 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7618 " please upgrade BC\n", BNX2X_BC_VER, val);
7619 }
72ce58c3
EG
7620
7621 if (BP_E1HVN(bp) == 0) {
7622 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7623 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7624 } else {
7625 /* no WOL capability for E1HVN != 0 */
7626 bp->flags |= NO_WOL_FLAG;
7627 }
7628 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7629 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7630
7631 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7632 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7633 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7634 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7635
7636 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7637 val, val2, val3, val4);
7638}
7639
7640static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7641 u32 switch_cfg)
a2fbb9ea 7642{
34f80b04 7643 int port = BP_PORT(bp);
a2fbb9ea
ET
7644 u32 ext_phy_type;
7645
a2fbb9ea
ET
7646 switch (switch_cfg) {
7647 case SWITCH_CFG_1G:
7648 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7649
c18487ee
YR
7650 ext_phy_type =
7651 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7652 switch (ext_phy_type) {
7653 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7654 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7655 ext_phy_type);
7656
34f80b04
EG
7657 bp->port.supported |= (SUPPORTED_10baseT_Half |
7658 SUPPORTED_10baseT_Full |
7659 SUPPORTED_100baseT_Half |
7660 SUPPORTED_100baseT_Full |
7661 SUPPORTED_1000baseT_Full |
7662 SUPPORTED_2500baseX_Full |
7663 SUPPORTED_TP |
7664 SUPPORTED_FIBRE |
7665 SUPPORTED_Autoneg |
7666 SUPPORTED_Pause |
7667 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7668 break;
7669
7670 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7671 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7672 ext_phy_type);
7673
34f80b04
EG
7674 bp->port.supported |= (SUPPORTED_10baseT_Half |
7675 SUPPORTED_10baseT_Full |
7676 SUPPORTED_100baseT_Half |
7677 SUPPORTED_100baseT_Full |
7678 SUPPORTED_1000baseT_Full |
7679 SUPPORTED_TP |
7680 SUPPORTED_FIBRE |
7681 SUPPORTED_Autoneg |
7682 SUPPORTED_Pause |
7683 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7684 break;
7685
7686 default:
7687 BNX2X_ERR("NVRAM config error. "
7688 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7689 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7690 return;
7691 }
7692
34f80b04
EG
7693 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7694 port*0x10);
7695 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7696 break;
7697
7698 case SWITCH_CFG_10G:
7699 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7700
c18487ee
YR
7701 ext_phy_type =
7702 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7703 switch (ext_phy_type) {
7704 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7705 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7706 ext_phy_type);
7707
34f80b04
EG
7708 bp->port.supported |= (SUPPORTED_10baseT_Half |
7709 SUPPORTED_10baseT_Full |
7710 SUPPORTED_100baseT_Half |
7711 SUPPORTED_100baseT_Full |
7712 SUPPORTED_1000baseT_Full |
7713 SUPPORTED_2500baseX_Full |
7714 SUPPORTED_10000baseT_Full |
7715 SUPPORTED_TP |
7716 SUPPORTED_FIBRE |
7717 SUPPORTED_Autoneg |
7718 SUPPORTED_Pause |
7719 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7720 break;
7721
589abe3a
EG
7722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7723 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7724 ext_phy_type);
f1410647 7725
34f80b04 7726 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7727 SUPPORTED_1000baseT_Full |
34f80b04 7728 SUPPORTED_FIBRE |
589abe3a 7729 SUPPORTED_Autoneg |
34f80b04
EG
7730 SUPPORTED_Pause |
7731 SUPPORTED_Asym_Pause);
f1410647
ET
7732 break;
7733
589abe3a
EG
7734 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7735 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7736 ext_phy_type);
7737
34f80b04 7738 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7739 SUPPORTED_2500baseX_Full |
34f80b04 7740 SUPPORTED_1000baseT_Full |
589abe3a
EG
7741 SUPPORTED_FIBRE |
7742 SUPPORTED_Autoneg |
7743 SUPPORTED_Pause |
7744 SUPPORTED_Asym_Pause);
7745 break;
7746
7747 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7748 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7749 ext_phy_type);
7750
7751 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7752 SUPPORTED_FIBRE |
7753 SUPPORTED_Pause |
7754 SUPPORTED_Asym_Pause);
f1410647
ET
7755 break;
7756
589abe3a
EG
7757 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7758 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7759 ext_phy_type);
7760
34f80b04
EG
7761 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7762 SUPPORTED_1000baseT_Full |
7763 SUPPORTED_FIBRE |
34f80b04
EG
7764 SUPPORTED_Pause |
7765 SUPPORTED_Asym_Pause);
f1410647
ET
7766 break;
7767
589abe3a
EG
7768 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7769 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7770 ext_phy_type);
7771
34f80b04 7772 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7773 SUPPORTED_1000baseT_Full |
34f80b04 7774 SUPPORTED_Autoneg |
589abe3a 7775 SUPPORTED_FIBRE |
34f80b04
EG
7776 SUPPORTED_Pause |
7777 SUPPORTED_Asym_Pause);
c18487ee
YR
7778 break;
7779
f1410647
ET
7780 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7781 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7782 ext_phy_type);
7783
34f80b04
EG
7784 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7785 SUPPORTED_TP |
7786 SUPPORTED_Autoneg |
7787 SUPPORTED_Pause |
7788 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7789 break;
7790
28577185
EG
7791 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7792 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7793 ext_phy_type);
7794
7795 bp->port.supported |= (SUPPORTED_10baseT_Half |
7796 SUPPORTED_10baseT_Full |
7797 SUPPORTED_100baseT_Half |
7798 SUPPORTED_100baseT_Full |
7799 SUPPORTED_1000baseT_Full |
7800 SUPPORTED_10000baseT_Full |
7801 SUPPORTED_TP |
7802 SUPPORTED_Autoneg |
7803 SUPPORTED_Pause |
7804 SUPPORTED_Asym_Pause);
7805 break;
7806
c18487ee
YR
7807 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7808 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7809 bp->link_params.ext_phy_config);
7810 break;
7811
a2fbb9ea
ET
7812 default:
7813 BNX2X_ERR("NVRAM config error. "
7814 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7815 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7816 return;
7817 }
7818
34f80b04
EG
7819 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7820 port*0x18);
7821 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7822
a2fbb9ea
ET
7823 break;
7824
7825 default:
7826 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7827 bp->port.link_config);
a2fbb9ea
ET
7828 return;
7829 }
34f80b04 7830 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7831
7832 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7833 if (!(bp->link_params.speed_cap_mask &
7834 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7835 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7836
c18487ee
YR
7837 if (!(bp->link_params.speed_cap_mask &
7838 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7839 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7840
c18487ee
YR
7841 if (!(bp->link_params.speed_cap_mask &
7842 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7843 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7844
c18487ee
YR
7845 if (!(bp->link_params.speed_cap_mask &
7846 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7847 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7848
c18487ee
YR
7849 if (!(bp->link_params.speed_cap_mask &
7850 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7851 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7852 SUPPORTED_1000baseT_Full);
a2fbb9ea 7853
c18487ee
YR
7854 if (!(bp->link_params.speed_cap_mask &
7855 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7856 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7857
c18487ee
YR
7858 if (!(bp->link_params.speed_cap_mask &
7859 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7860 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7861
34f80b04 7862 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7863}
7864
34f80b04 7865static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7866{
c18487ee 7867 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7868
34f80b04 7869 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7870 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7871 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7872 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7873 bp->port.advertising = bp->port.supported;
a2fbb9ea 7874 } else {
c18487ee
YR
7875 u32 ext_phy_type =
7876 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7877
7878 if ((ext_phy_type ==
7879 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7880 (ext_phy_type ==
7881 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7882 /* force 10G, no AN */
c18487ee 7883 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7884 bp->port.advertising =
a2fbb9ea
ET
7885 (ADVERTISED_10000baseT_Full |
7886 ADVERTISED_FIBRE);
7887 break;
7888 }
7889 BNX2X_ERR("NVRAM config error. "
7890 "Invalid link_config 0x%x"
7891 " Autoneg not supported\n",
34f80b04 7892 bp->port.link_config);
a2fbb9ea
ET
7893 return;
7894 }
7895 break;
7896
7897 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7898 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7899 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7900 bp->port.advertising = (ADVERTISED_10baseT_Full |
7901 ADVERTISED_TP);
a2fbb9ea
ET
7902 } else {
7903 BNX2X_ERR("NVRAM config error. "
7904 "Invalid link_config 0x%x"
7905 " speed_cap_mask 0x%x\n",
34f80b04 7906 bp->port.link_config,
c18487ee 7907 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7908 return;
7909 }
7910 break;
7911
7912 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7913 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7914 bp->link_params.req_line_speed = SPEED_10;
7915 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7916 bp->port.advertising = (ADVERTISED_10baseT_Half |
7917 ADVERTISED_TP);
a2fbb9ea
ET
7918 } else {
7919 BNX2X_ERR("NVRAM config error. "
7920 "Invalid link_config 0x%x"
7921 " speed_cap_mask 0x%x\n",
34f80b04 7922 bp->port.link_config,
c18487ee 7923 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7924 return;
7925 }
7926 break;
7927
7928 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7929 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7930 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7931 bp->port.advertising = (ADVERTISED_100baseT_Full |
7932 ADVERTISED_TP);
a2fbb9ea
ET
7933 } else {
7934 BNX2X_ERR("NVRAM config error. "
7935 "Invalid link_config 0x%x"
7936 " speed_cap_mask 0x%x\n",
34f80b04 7937 bp->port.link_config,
c18487ee 7938 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7939 return;
7940 }
7941 break;
7942
7943 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7944 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7945 bp->link_params.req_line_speed = SPEED_100;
7946 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7947 bp->port.advertising = (ADVERTISED_100baseT_Half |
7948 ADVERTISED_TP);
a2fbb9ea
ET
7949 } else {
7950 BNX2X_ERR("NVRAM config error. "
7951 "Invalid link_config 0x%x"
7952 " speed_cap_mask 0x%x\n",
34f80b04 7953 bp->port.link_config,
c18487ee 7954 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7955 return;
7956 }
7957 break;
7958
7959 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7960 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7961 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7962 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7963 ADVERTISED_TP);
a2fbb9ea
ET
7964 } else {
7965 BNX2X_ERR("NVRAM config error. "
7966 "Invalid link_config 0x%x"
7967 " speed_cap_mask 0x%x\n",
34f80b04 7968 bp->port.link_config,
c18487ee 7969 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7970 return;
7971 }
7972 break;
7973
7974 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7975 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7976 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7977 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7978 ADVERTISED_TP);
a2fbb9ea
ET
7979 } else {
7980 BNX2X_ERR("NVRAM config error. "
7981 "Invalid link_config 0x%x"
7982 " speed_cap_mask 0x%x\n",
34f80b04 7983 bp->port.link_config,
c18487ee 7984 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7985 return;
7986 }
7987 break;
7988
7989 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7990 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7991 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7992 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7993 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7994 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7995 ADVERTISED_FIBRE);
a2fbb9ea
ET
7996 } else {
7997 BNX2X_ERR("NVRAM config error. "
7998 "Invalid link_config 0x%x"
7999 " speed_cap_mask 0x%x\n",
34f80b04 8000 bp->port.link_config,
c18487ee 8001 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8002 return;
8003 }
8004 break;
8005
8006 default:
8007 BNX2X_ERR("NVRAM config error. "
8008 "BAD link speed link_config 0x%x\n",
34f80b04 8009 bp->port.link_config);
c18487ee 8010 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8011 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8012 break;
8013 }
a2fbb9ea 8014
34f80b04
EG
8015 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8016 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8017 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8018 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8019 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8020
c18487ee 8021 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8022 " advertising 0x%x\n",
c18487ee
YR
8023 bp->link_params.req_line_speed,
8024 bp->link_params.req_duplex,
34f80b04 8025 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8026}
8027
34f80b04 8028static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8029{
34f80b04
EG
8030 int port = BP_PORT(bp);
8031 u32 val, val2;
589abe3a 8032 u32 config;
c2c8b03e 8033 u16 i;
a2fbb9ea 8034
c18487ee 8035 bp->link_params.bp = bp;
34f80b04 8036 bp->link_params.port = port;
c18487ee 8037
c18487ee 8038 bp->link_params.lane_config =
a2fbb9ea 8039 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8040 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8041 SHMEM_RD(bp,
8042 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8043 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8044 SHMEM_RD(bp,
8045 dev_info.port_hw_config[port].speed_capability_mask);
8046
34f80b04 8047 bp->port.link_config =
a2fbb9ea
ET
8048 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8049
c2c8b03e
EG
8050 /* Get the 4 lanes xgxs config rx and tx */
8051 for (i = 0; i < 2; i++) {
8052 val = SHMEM_RD(bp,
8053 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8054 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8055 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8056
8057 val = SHMEM_RD(bp,
8058 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8059 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8060 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8061 }
8062
589abe3a
EG
8063 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8064 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8065 bp->link_params.feature_config_flags |=
8066 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8067 else
8068 bp->link_params.feature_config_flags &=
8069 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8070
3ce2c3f9
EG
8071 /* If the device is capable of WoL, set the default state according
8072 * to the HW
8073 */
8074 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8075 (config & PORT_FEATURE_WOL_ENABLED));
8076
c2c8b03e
EG
8077 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8078 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8079 bp->link_params.lane_config,
8080 bp->link_params.ext_phy_config,
34f80b04 8081 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8082
34f80b04 8083 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8084 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8085 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8086
8087 bnx2x_link_settings_requested(bp);
8088
8089 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8090 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8091 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8092 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8093 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8094 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8095 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8096 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8097 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8098 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8099}
8100
8101static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8102{
8103 int func = BP_FUNC(bp);
8104 u32 val, val2;
8105 int rc = 0;
a2fbb9ea 8106
34f80b04 8107 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8108
34f80b04
EG
8109 bp->e1hov = 0;
8110 bp->e1hmf = 0;
8111 if (CHIP_IS_E1H(bp)) {
8112 bp->mf_config =
8113 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8114
3196a88a
EG
8115 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8116 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8117 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8118
34f80b04
EG
8119 bp->e1hov = val;
8120 bp->e1hmf = 1;
8121 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8122 "(0x%04x)\n",
8123 func, bp->e1hov, bp->e1hov);
8124 } else {
f5372251 8125 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8126 if (BP_E1HVN(bp)) {
8127 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8128 " aborting\n", func);
8129 rc = -EPERM;
8130 }
8131 }
8132 }
a2fbb9ea 8133
34f80b04
EG
8134 if (!BP_NOMCP(bp)) {
8135 bnx2x_get_port_hwinfo(bp);
8136
8137 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8138 DRV_MSG_SEQ_NUMBER_MASK);
8139 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8140 }
8141
8142 if (IS_E1HMF(bp)) {
8143 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8144 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8145 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8146 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8147 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8148 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8149 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8150 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8151 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8152 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8153 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8154 ETH_ALEN);
8155 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8156 ETH_ALEN);
a2fbb9ea 8157 }
34f80b04
EG
8158
8159 return rc;
a2fbb9ea
ET
8160 }
8161
34f80b04
EG
8162 if (BP_NOMCP(bp)) {
8163 /* only supposed to happen on emulation/FPGA */
33471629 8164 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8165 random_ether_addr(bp->dev->dev_addr);
8166 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8167 }
a2fbb9ea 8168
34f80b04
EG
8169 return rc;
8170}
8171
8172static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8173{
8174 int func = BP_FUNC(bp);
87942b46 8175 int timer_interval;
34f80b04
EG
8176 int rc;
8177
da5a662a
VZ
8178 /* Disable interrupt handling until HW is initialized */
8179 atomic_set(&bp->intr_sem, 1);
8180
34f80b04 8181 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8182
1cf167f2 8183 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8184 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8185
8186 rc = bnx2x_get_hwinfo(bp);
8187
8188 /* need to reset chip if undi was active */
8189 if (!BP_NOMCP(bp))
8190 bnx2x_undi_unload(bp);
8191
8192 if (CHIP_REV_IS_FPGA(bp))
8193 printk(KERN_ERR PFX "FPGA detected\n");
8194
8195 if (BP_NOMCP(bp) && (func == 0))
8196 printk(KERN_ERR PFX
8197 "MCP disabled, must load devices in order!\n");
8198
555f6c78 8199 /* Set multi queue mode */
8badd27a
EG
8200 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8201 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8202 printk(KERN_ERR PFX
8badd27a 8203 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8204 multi_mode = ETH_RSS_MODE_DISABLED;
8205 }
8206 bp->multi_mode = multi_mode;
8207
8208
7a9b2557
VZ
8209 /* Set TPA flags */
8210 if (disable_tpa) {
8211 bp->flags &= ~TPA_ENABLE_FLAG;
8212 bp->dev->features &= ~NETIF_F_LRO;
8213 } else {
8214 bp->flags |= TPA_ENABLE_FLAG;
8215 bp->dev->features |= NETIF_F_LRO;
8216 }
8217
8d5726c4 8218 bp->mrrs = mrrs;
7a9b2557 8219
34f80b04
EG
8220 bp->tx_ring_size = MAX_TX_AVAIL;
8221 bp->rx_ring_size = MAX_RX_AVAIL;
8222
8223 bp->rx_csum = 1;
34f80b04
EG
8224
8225 bp->tx_ticks = 50;
8226 bp->rx_ticks = 25;
8227
87942b46
EG
8228 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8229 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8230
8231 init_timer(&bp->timer);
8232 bp->timer.expires = jiffies + bp->current_interval;
8233 bp->timer.data = (unsigned long) bp;
8234 bp->timer.function = bnx2x_timer;
8235
8236 return rc;
a2fbb9ea
ET
8237}
8238
8239/*
8240 * ethtool service functions
8241 */
8242
8243/* All ethtool functions called with rtnl_lock */
8244
8245static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8246{
8247 struct bnx2x *bp = netdev_priv(dev);
8248
34f80b04
EG
8249 cmd->supported = bp->port.supported;
8250 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8251
8252 if (netif_carrier_ok(dev)) {
c18487ee
YR
8253 cmd->speed = bp->link_vars.line_speed;
8254 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8255 } else {
c18487ee
YR
8256 cmd->speed = bp->link_params.req_line_speed;
8257 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8258 }
34f80b04
EG
8259 if (IS_E1HMF(bp)) {
8260 u16 vn_max_rate;
8261
8262 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8263 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8264 if (vn_max_rate < cmd->speed)
8265 cmd->speed = vn_max_rate;
8266 }
a2fbb9ea 8267
c18487ee
YR
8268 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8269 u32 ext_phy_type =
8270 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8271
8272 switch (ext_phy_type) {
8273 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8276 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8277 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8278 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8279 cmd->port = PORT_FIBRE;
8280 break;
8281
8282 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8283 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8284 cmd->port = PORT_TP;
8285 break;
8286
c18487ee
YR
8287 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8288 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8289 bp->link_params.ext_phy_config);
8290 break;
8291
f1410647
ET
8292 default:
8293 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8294 bp->link_params.ext_phy_config);
8295 break;
f1410647
ET
8296 }
8297 } else
a2fbb9ea 8298 cmd->port = PORT_TP;
a2fbb9ea 8299
34f80b04 8300 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8301 cmd->transceiver = XCVR_INTERNAL;
8302
c18487ee 8303 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8304 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8305 else
a2fbb9ea 8306 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8307
8308 cmd->maxtxpkt = 0;
8309 cmd->maxrxpkt = 0;
8310
8311 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8312 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8313 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8314 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8315 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8316 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8317 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8318
8319 return 0;
8320}
8321
8322static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8323{
8324 struct bnx2x *bp = netdev_priv(dev);
8325 u32 advertising;
8326
34f80b04
EG
8327 if (IS_E1HMF(bp))
8328 return 0;
8329
a2fbb9ea
ET
8330 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8331 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8332 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8333 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8334 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8335 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8336 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8337
a2fbb9ea 8338 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8339 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8340 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8341 return -EINVAL;
f1410647 8342 }
a2fbb9ea
ET
8343
8344 /* advertise the requested speed and duplex if supported */
34f80b04 8345 cmd->advertising &= bp->port.supported;
a2fbb9ea 8346
c18487ee
YR
8347 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8348 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8349 bp->port.advertising |= (ADVERTISED_Autoneg |
8350 cmd->advertising);
a2fbb9ea
ET
8351
8352 } else { /* forced speed */
8353 /* advertise the requested speed and duplex if supported */
8354 switch (cmd->speed) {
8355 case SPEED_10:
8356 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8357 if (!(bp->port.supported &
f1410647
ET
8358 SUPPORTED_10baseT_Full)) {
8359 DP(NETIF_MSG_LINK,
8360 "10M full not supported\n");
a2fbb9ea 8361 return -EINVAL;
f1410647 8362 }
a2fbb9ea
ET
8363
8364 advertising = (ADVERTISED_10baseT_Full |
8365 ADVERTISED_TP);
8366 } else {
34f80b04 8367 if (!(bp->port.supported &
f1410647
ET
8368 SUPPORTED_10baseT_Half)) {
8369 DP(NETIF_MSG_LINK,
8370 "10M half not supported\n");
a2fbb9ea 8371 return -EINVAL;
f1410647 8372 }
a2fbb9ea
ET
8373
8374 advertising = (ADVERTISED_10baseT_Half |
8375 ADVERTISED_TP);
8376 }
8377 break;
8378
8379 case SPEED_100:
8380 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8381 if (!(bp->port.supported &
f1410647
ET
8382 SUPPORTED_100baseT_Full)) {
8383 DP(NETIF_MSG_LINK,
8384 "100M full not supported\n");
a2fbb9ea 8385 return -EINVAL;
f1410647 8386 }
a2fbb9ea
ET
8387
8388 advertising = (ADVERTISED_100baseT_Full |
8389 ADVERTISED_TP);
8390 } else {
34f80b04 8391 if (!(bp->port.supported &
f1410647
ET
8392 SUPPORTED_100baseT_Half)) {
8393 DP(NETIF_MSG_LINK,
8394 "100M half not supported\n");
a2fbb9ea 8395 return -EINVAL;
f1410647 8396 }
a2fbb9ea
ET
8397
8398 advertising = (ADVERTISED_100baseT_Half |
8399 ADVERTISED_TP);
8400 }
8401 break;
8402
8403 case SPEED_1000:
f1410647
ET
8404 if (cmd->duplex != DUPLEX_FULL) {
8405 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8406 return -EINVAL;
f1410647 8407 }
a2fbb9ea 8408
34f80b04 8409 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8410 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8411 return -EINVAL;
f1410647 8412 }
a2fbb9ea
ET
8413
8414 advertising = (ADVERTISED_1000baseT_Full |
8415 ADVERTISED_TP);
8416 break;
8417
8418 case SPEED_2500:
f1410647
ET
8419 if (cmd->duplex != DUPLEX_FULL) {
8420 DP(NETIF_MSG_LINK,
8421 "2.5G half not supported\n");
a2fbb9ea 8422 return -EINVAL;
f1410647 8423 }
a2fbb9ea 8424
34f80b04 8425 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8426 DP(NETIF_MSG_LINK,
8427 "2.5G full not supported\n");
a2fbb9ea 8428 return -EINVAL;
f1410647 8429 }
a2fbb9ea 8430
f1410647 8431 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8432 ADVERTISED_TP);
8433 break;
8434
8435 case SPEED_10000:
f1410647
ET
8436 if (cmd->duplex != DUPLEX_FULL) {
8437 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8438 return -EINVAL;
f1410647 8439 }
a2fbb9ea 8440
34f80b04 8441 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8442 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8443 return -EINVAL;
f1410647 8444 }
a2fbb9ea
ET
8445
8446 advertising = (ADVERTISED_10000baseT_Full |
8447 ADVERTISED_FIBRE);
8448 break;
8449
8450 default:
f1410647 8451 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8452 return -EINVAL;
8453 }
8454
c18487ee
YR
8455 bp->link_params.req_line_speed = cmd->speed;
8456 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8457 bp->port.advertising = advertising;
a2fbb9ea
ET
8458 }
8459
c18487ee 8460 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8461 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8462 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8463 bp->port.advertising);
a2fbb9ea 8464
34f80b04 8465 if (netif_running(dev)) {
bb2a0f7a 8466 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8467 bnx2x_link_set(bp);
8468 }
a2fbb9ea
ET
8469
8470 return 0;
8471}
8472
c18487ee
YR
8473#define PHY_FW_VER_LEN 10
8474
a2fbb9ea
ET
8475static void bnx2x_get_drvinfo(struct net_device *dev,
8476 struct ethtool_drvinfo *info)
8477{
8478 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8479 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8480
8481 strcpy(info->driver, DRV_MODULE_NAME);
8482 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8483
8484 phy_fw_ver[0] = '\0';
34f80b04 8485 if (bp->port.pmf) {
4a37fb66 8486 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8487 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8488 (bp->state != BNX2X_STATE_CLOSED),
8489 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8490 bnx2x_release_phy_lock(bp);
34f80b04 8491 }
c18487ee 8492
f0e53a84
EG
8493 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8494 (bp->common.bc_ver & 0xff0000) >> 16,
8495 (bp->common.bc_ver & 0xff00) >> 8,
8496 (bp->common.bc_ver & 0xff),
8497 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8498 strcpy(info->bus_info, pci_name(bp->pdev));
8499 info->n_stats = BNX2X_NUM_STATS;
8500 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8501 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8502 info->regdump_len = 0;
8503}
8504
8505static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8506{
8507 struct bnx2x *bp = netdev_priv(dev);
8508
8509 if (bp->flags & NO_WOL_FLAG) {
8510 wol->supported = 0;
8511 wol->wolopts = 0;
8512 } else {
8513 wol->supported = WAKE_MAGIC;
8514 if (bp->wol)
8515 wol->wolopts = WAKE_MAGIC;
8516 else
8517 wol->wolopts = 0;
8518 }
8519 memset(&wol->sopass, 0, sizeof(wol->sopass));
8520}
8521
8522static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8523{
8524 struct bnx2x *bp = netdev_priv(dev);
8525
8526 if (wol->wolopts & ~WAKE_MAGIC)
8527 return -EINVAL;
8528
8529 if (wol->wolopts & WAKE_MAGIC) {
8530 if (bp->flags & NO_WOL_FLAG)
8531 return -EINVAL;
8532
8533 bp->wol = 1;
34f80b04 8534 } else
a2fbb9ea 8535 bp->wol = 0;
34f80b04 8536
a2fbb9ea
ET
8537 return 0;
8538}
8539
8540static u32 bnx2x_get_msglevel(struct net_device *dev)
8541{
8542 struct bnx2x *bp = netdev_priv(dev);
8543
8544 return bp->msglevel;
8545}
8546
8547static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8548{
8549 struct bnx2x *bp = netdev_priv(dev);
8550
8551 if (capable(CAP_NET_ADMIN))
8552 bp->msglevel = level;
8553}
8554
8555static int bnx2x_nway_reset(struct net_device *dev)
8556{
8557 struct bnx2x *bp = netdev_priv(dev);
8558
34f80b04
EG
8559 if (!bp->port.pmf)
8560 return 0;
a2fbb9ea 8561
34f80b04 8562 if (netif_running(dev)) {
bb2a0f7a 8563 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8564 bnx2x_link_set(bp);
8565 }
a2fbb9ea
ET
8566
8567 return 0;
8568}
8569
8570static int bnx2x_get_eeprom_len(struct net_device *dev)
8571{
8572 struct bnx2x *bp = netdev_priv(dev);
8573
34f80b04 8574 return bp->common.flash_size;
a2fbb9ea
ET
8575}
8576
8577static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8578{
34f80b04 8579 int port = BP_PORT(bp);
a2fbb9ea
ET
8580 int count, i;
8581 u32 val = 0;
8582
8583 /* adjust timeout for emulation/FPGA */
8584 count = NVRAM_TIMEOUT_COUNT;
8585 if (CHIP_REV_IS_SLOW(bp))
8586 count *= 100;
8587
8588 /* request access to nvram interface */
8589 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8590 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8591
8592 for (i = 0; i < count*10; i++) {
8593 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8594 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8595 break;
8596
8597 udelay(5);
8598 }
8599
8600 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8601 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8602 return -EBUSY;
8603 }
8604
8605 return 0;
8606}
8607
8608static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8609{
34f80b04 8610 int port = BP_PORT(bp);
a2fbb9ea
ET
8611 int count, i;
8612 u32 val = 0;
8613
8614 /* adjust timeout for emulation/FPGA */
8615 count = NVRAM_TIMEOUT_COUNT;
8616 if (CHIP_REV_IS_SLOW(bp))
8617 count *= 100;
8618
8619 /* relinquish nvram interface */
8620 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8621 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8622
8623 for (i = 0; i < count*10; i++) {
8624 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8625 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8626 break;
8627
8628 udelay(5);
8629 }
8630
8631 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8632 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8633 return -EBUSY;
8634 }
8635
8636 return 0;
8637}
8638
8639static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8640{
8641 u32 val;
8642
8643 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8644
8645 /* enable both bits, even on read */
8646 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8647 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8648 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8649}
8650
8651static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8652{
8653 u32 val;
8654
8655 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8656
8657 /* disable both bits, even after read */
8658 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8659 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8660 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8661}
8662
4781bfad 8663static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8664 u32 cmd_flags)
8665{
f1410647 8666 int count, i, rc;
a2fbb9ea
ET
8667 u32 val;
8668
8669 /* build the command word */
8670 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8671
8672 /* need to clear DONE bit separately */
8673 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8674
8675 /* address of the NVRAM to read from */
8676 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8677 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8678
8679 /* issue a read command */
8680 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8681
8682 /* adjust timeout for emulation/FPGA */
8683 count = NVRAM_TIMEOUT_COUNT;
8684 if (CHIP_REV_IS_SLOW(bp))
8685 count *= 100;
8686
8687 /* wait for completion */
8688 *ret_val = 0;
8689 rc = -EBUSY;
8690 for (i = 0; i < count; i++) {
8691 udelay(5);
8692 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8693
8694 if (val & MCPR_NVM_COMMAND_DONE) {
8695 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8696 /* we read nvram data in cpu order
8697 * but ethtool sees it as an array of bytes
8698 * converting to big-endian will do the work */
4781bfad 8699 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8700 rc = 0;
8701 break;
8702 }
8703 }
8704
8705 return rc;
8706}
8707
8708static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8709 int buf_size)
8710{
8711 int rc;
8712 u32 cmd_flags;
4781bfad 8713 __be32 val;
a2fbb9ea
ET
8714
8715 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8716 DP(BNX2X_MSG_NVM,
c14423fe 8717 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8718 offset, buf_size);
8719 return -EINVAL;
8720 }
8721
34f80b04
EG
8722 if (offset + buf_size > bp->common.flash_size) {
8723 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8724 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8725 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8726 return -EINVAL;
8727 }
8728
8729 /* request access to nvram interface */
8730 rc = bnx2x_acquire_nvram_lock(bp);
8731 if (rc)
8732 return rc;
8733
8734 /* enable access to nvram interface */
8735 bnx2x_enable_nvram_access(bp);
8736
8737 /* read the first word(s) */
8738 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8739 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8740 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8741 memcpy(ret_buf, &val, 4);
8742
8743 /* advance to the next dword */
8744 offset += sizeof(u32);
8745 ret_buf += sizeof(u32);
8746 buf_size -= sizeof(u32);
8747 cmd_flags = 0;
8748 }
8749
8750 if (rc == 0) {
8751 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8752 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8753 memcpy(ret_buf, &val, 4);
8754 }
8755
8756 /* disable access to nvram interface */
8757 bnx2x_disable_nvram_access(bp);
8758 bnx2x_release_nvram_lock(bp);
8759
8760 return rc;
8761}
8762
8763static int bnx2x_get_eeprom(struct net_device *dev,
8764 struct ethtool_eeprom *eeprom, u8 *eebuf)
8765{
8766 struct bnx2x *bp = netdev_priv(dev);
8767 int rc;
8768
2add3acb
EG
8769 if (!netif_running(dev))
8770 return -EAGAIN;
8771
34f80b04 8772 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8773 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8774 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8775 eeprom->len, eeprom->len);
8776
8777 /* parameters already validated in ethtool_get_eeprom */
8778
8779 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8780
8781 return rc;
8782}
8783
8784static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8785 u32 cmd_flags)
8786{
f1410647 8787 int count, i, rc;
a2fbb9ea
ET
8788
8789 /* build the command word */
8790 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8791
8792 /* need to clear DONE bit separately */
8793 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8794
8795 /* write the data */
8796 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8797
8798 /* address of the NVRAM to write to */
8799 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8800 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8801
8802 /* issue the write command */
8803 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8804
8805 /* adjust timeout for emulation/FPGA */
8806 count = NVRAM_TIMEOUT_COUNT;
8807 if (CHIP_REV_IS_SLOW(bp))
8808 count *= 100;
8809
8810 /* wait for completion */
8811 rc = -EBUSY;
8812 for (i = 0; i < count; i++) {
8813 udelay(5);
8814 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8815 if (val & MCPR_NVM_COMMAND_DONE) {
8816 rc = 0;
8817 break;
8818 }
8819 }
8820
8821 return rc;
8822}
8823
f1410647 8824#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8825
8826static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8827 int buf_size)
8828{
8829 int rc;
8830 u32 cmd_flags;
8831 u32 align_offset;
4781bfad 8832 __be32 val;
a2fbb9ea 8833
34f80b04
EG
8834 if (offset + buf_size > bp->common.flash_size) {
8835 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8836 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8837 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8838 return -EINVAL;
8839 }
8840
8841 /* request access to nvram interface */
8842 rc = bnx2x_acquire_nvram_lock(bp);
8843 if (rc)
8844 return rc;
8845
8846 /* enable access to nvram interface */
8847 bnx2x_enable_nvram_access(bp);
8848
8849 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8850 align_offset = (offset & ~0x03);
8851 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8852
8853 if (rc == 0) {
8854 val &= ~(0xff << BYTE_OFFSET(offset));
8855 val |= (*data_buf << BYTE_OFFSET(offset));
8856
8857 /* nvram data is returned as an array of bytes
8858 * convert it back to cpu order */
8859 val = be32_to_cpu(val);
8860
a2fbb9ea
ET
8861 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8862 cmd_flags);
8863 }
8864
8865 /* disable access to nvram interface */
8866 bnx2x_disable_nvram_access(bp);
8867 bnx2x_release_nvram_lock(bp);
8868
8869 return rc;
8870}
8871
8872static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8873 int buf_size)
8874{
8875 int rc;
8876 u32 cmd_flags;
8877 u32 val;
8878 u32 written_so_far;
8879
34f80b04 8880 if (buf_size == 1) /* ethtool */
a2fbb9ea 8881 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8882
8883 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8884 DP(BNX2X_MSG_NVM,
c14423fe 8885 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8886 offset, buf_size);
8887 return -EINVAL;
8888 }
8889
34f80b04
EG
8890 if (offset + buf_size > bp->common.flash_size) {
8891 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8892 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8893 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8894 return -EINVAL;
8895 }
8896
8897 /* request access to nvram interface */
8898 rc = bnx2x_acquire_nvram_lock(bp);
8899 if (rc)
8900 return rc;
8901
8902 /* enable access to nvram interface */
8903 bnx2x_enable_nvram_access(bp);
8904
8905 written_so_far = 0;
8906 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8907 while ((written_so_far < buf_size) && (rc == 0)) {
8908 if (written_so_far == (buf_size - sizeof(u32)))
8909 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8910 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8911 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8912 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8913 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8914
8915 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8916
8917 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8918
8919 /* advance to the next dword */
8920 offset += sizeof(u32);
8921 data_buf += sizeof(u32);
8922 written_so_far += sizeof(u32);
8923 cmd_flags = 0;
8924 }
8925
8926 /* disable access to nvram interface */
8927 bnx2x_disable_nvram_access(bp);
8928 bnx2x_release_nvram_lock(bp);
8929
8930 return rc;
8931}
8932
8933static int bnx2x_set_eeprom(struct net_device *dev,
8934 struct ethtool_eeprom *eeprom, u8 *eebuf)
8935{
8936 struct bnx2x *bp = netdev_priv(dev);
8937 int rc;
8938
9f4c9583
EG
8939 if (!netif_running(dev))
8940 return -EAGAIN;
8941
34f80b04 8942 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8943 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8944 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8945 eeprom->len, eeprom->len);
8946
8947 /* parameters already validated in ethtool_set_eeprom */
8948
c18487ee 8949 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8950 if (eeprom->magic == 0x00504859)
8951 if (bp->port.pmf) {
8952
4a37fb66 8953 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8954 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8955 bp->link_params.ext_phy_config,
8956 (bp->state != BNX2X_STATE_CLOSED),
8957 eebuf, eeprom->len);
bb2a0f7a
YG
8958 if ((bp->state == BNX2X_STATE_OPEN) ||
8959 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8960 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8961 &bp->link_vars, 1);
34f80b04
EG
8962 rc |= bnx2x_phy_init(&bp->link_params,
8963 &bp->link_vars);
bb2a0f7a 8964 }
4a37fb66 8965 bnx2x_release_phy_lock(bp);
34f80b04
EG
8966
8967 } else /* Only the PMF can access the PHY */
8968 return -EINVAL;
8969 else
c18487ee 8970 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8971
8972 return rc;
8973}
8974
8975static int bnx2x_get_coalesce(struct net_device *dev,
8976 struct ethtool_coalesce *coal)
8977{
8978 struct bnx2x *bp = netdev_priv(dev);
8979
8980 memset(coal, 0, sizeof(struct ethtool_coalesce));
8981
8982 coal->rx_coalesce_usecs = bp->rx_ticks;
8983 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8984
8985 return 0;
8986}
8987
8988static int bnx2x_set_coalesce(struct net_device *dev,
8989 struct ethtool_coalesce *coal)
8990{
8991 struct bnx2x *bp = netdev_priv(dev);
8992
8993 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8994 if (bp->rx_ticks > 3000)
8995 bp->rx_ticks = 3000;
8996
8997 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8998 if (bp->tx_ticks > 0x3000)
8999 bp->tx_ticks = 0x3000;
9000
34f80b04 9001 if (netif_running(dev))
a2fbb9ea
ET
9002 bnx2x_update_coalesce(bp);
9003
9004 return 0;
9005}
9006
9007static void bnx2x_get_ringparam(struct net_device *dev,
9008 struct ethtool_ringparam *ering)
9009{
9010 struct bnx2x *bp = netdev_priv(dev);
9011
9012 ering->rx_max_pending = MAX_RX_AVAIL;
9013 ering->rx_mini_max_pending = 0;
9014 ering->rx_jumbo_max_pending = 0;
9015
9016 ering->rx_pending = bp->rx_ring_size;
9017 ering->rx_mini_pending = 0;
9018 ering->rx_jumbo_pending = 0;
9019
9020 ering->tx_max_pending = MAX_TX_AVAIL;
9021 ering->tx_pending = bp->tx_ring_size;
9022}
9023
9024static int bnx2x_set_ringparam(struct net_device *dev,
9025 struct ethtool_ringparam *ering)
9026{
9027 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9028 int rc = 0;
a2fbb9ea
ET
9029
9030 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9031 (ering->tx_pending > MAX_TX_AVAIL) ||
9032 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9033 return -EINVAL;
9034
9035 bp->rx_ring_size = ering->rx_pending;
9036 bp->tx_ring_size = ering->tx_pending;
9037
34f80b04
EG
9038 if (netif_running(dev)) {
9039 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9040 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9041 }
9042
34f80b04 9043 return rc;
a2fbb9ea
ET
9044}
9045
9046static void bnx2x_get_pauseparam(struct net_device *dev,
9047 struct ethtool_pauseparam *epause)
9048{
9049 struct bnx2x *bp = netdev_priv(dev);
9050
356e2385
EG
9051 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9052 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9053 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9054
c0700f90
DM
9055 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9056 BNX2X_FLOW_CTRL_RX);
9057 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9058 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9059
9060 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9061 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9062 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9063}
9064
9065static int bnx2x_set_pauseparam(struct net_device *dev,
9066 struct ethtool_pauseparam *epause)
9067{
9068 struct bnx2x *bp = netdev_priv(dev);
9069
34f80b04
EG
9070 if (IS_E1HMF(bp))
9071 return 0;
9072
a2fbb9ea
ET
9073 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9074 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9075 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9076
c0700f90 9077 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9078
f1410647 9079 if (epause->rx_pause)
c0700f90 9080 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9081
f1410647 9082 if (epause->tx_pause)
c0700f90 9083 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9084
c0700f90
DM
9085 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9086 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9087
c18487ee 9088 if (epause->autoneg) {
34f80b04 9089 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9090 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9091 return -EINVAL;
9092 }
a2fbb9ea 9093
c18487ee 9094 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9095 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9096 }
a2fbb9ea 9097
c18487ee
YR
9098 DP(NETIF_MSG_LINK,
9099 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9100
9101 if (netif_running(dev)) {
bb2a0f7a 9102 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9103 bnx2x_link_set(bp);
9104 }
a2fbb9ea
ET
9105
9106 return 0;
9107}
9108
df0f2343
VZ
9109static int bnx2x_set_flags(struct net_device *dev, u32 data)
9110{
9111 struct bnx2x *bp = netdev_priv(dev);
9112 int changed = 0;
9113 int rc = 0;
9114
9115 /* TPA requires Rx CSUM offloading */
9116 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9117 if (!(dev->features & NETIF_F_LRO)) {
9118 dev->features |= NETIF_F_LRO;
9119 bp->flags |= TPA_ENABLE_FLAG;
9120 changed = 1;
9121 }
9122
9123 } else if (dev->features & NETIF_F_LRO) {
9124 dev->features &= ~NETIF_F_LRO;
9125 bp->flags &= ~TPA_ENABLE_FLAG;
9126 changed = 1;
9127 }
9128
9129 if (changed && netif_running(dev)) {
9130 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9131 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9132 }
9133
9134 return rc;
9135}
9136
a2fbb9ea
ET
9137static u32 bnx2x_get_rx_csum(struct net_device *dev)
9138{
9139 struct bnx2x *bp = netdev_priv(dev);
9140
9141 return bp->rx_csum;
9142}
9143
9144static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9145{
9146 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9147 int rc = 0;
a2fbb9ea
ET
9148
9149 bp->rx_csum = data;
df0f2343
VZ
9150
9151 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9152 TPA'ed packets will be discarded due to wrong TCP CSUM */
9153 if (!data) {
9154 u32 flags = ethtool_op_get_flags(dev);
9155
9156 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9157 }
9158
9159 return rc;
a2fbb9ea
ET
9160}
9161
9162static int bnx2x_set_tso(struct net_device *dev, u32 data)
9163{
755735eb 9164 if (data) {
a2fbb9ea 9165 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9166 dev->features |= NETIF_F_TSO6;
9167 } else {
a2fbb9ea 9168 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9169 dev->features &= ~NETIF_F_TSO6;
9170 }
9171
a2fbb9ea
ET
9172 return 0;
9173}
9174
f3c87cdd 9175static const struct {
a2fbb9ea
ET
9176 char string[ETH_GSTRING_LEN];
9177} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9178 { "register_test (offline)" },
9179 { "memory_test (offline)" },
9180 { "loopback_test (offline)" },
9181 { "nvram_test (online)" },
9182 { "interrupt_test (online)" },
9183 { "link_test (online)" },
d3d4f495 9184 { "idle check (online)" }
a2fbb9ea
ET
9185};
9186
9187static int bnx2x_self_test_count(struct net_device *dev)
9188{
9189 return BNX2X_NUM_TESTS;
9190}
9191
f3c87cdd
YG
9192static int bnx2x_test_registers(struct bnx2x *bp)
9193{
9194 int idx, i, rc = -ENODEV;
9195 u32 wr_val = 0;
9dabc424 9196 int port = BP_PORT(bp);
f3c87cdd
YG
9197 static const struct {
9198 u32 offset0;
9199 u32 offset1;
9200 u32 mask;
9201 } reg_tbl[] = {
9202/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9203 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9204 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9205 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9206 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9207 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9208 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9209 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9210 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9211 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9212/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9213 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9214 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9215 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9216 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9217 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9218 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9219 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9220 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9221 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9222/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9223 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9224 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9225 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9226 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9227 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9228 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9229 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9230 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9231 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9232/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9233 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9234 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9235 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9236 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9237 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9238 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9239 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9240
9241 { 0xffffffff, 0, 0x00000000 }
9242 };
9243
9244 if (!netif_running(bp->dev))
9245 return rc;
9246
9247 /* Repeat the test twice:
9248 First by writing 0x00000000, second by writing 0xffffffff */
9249 for (idx = 0; idx < 2; idx++) {
9250
9251 switch (idx) {
9252 case 0:
9253 wr_val = 0;
9254 break;
9255 case 1:
9256 wr_val = 0xffffffff;
9257 break;
9258 }
9259
9260 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9261 u32 offset, mask, save_val, val;
f3c87cdd
YG
9262
9263 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9264 mask = reg_tbl[i].mask;
9265
9266 save_val = REG_RD(bp, offset);
9267
9268 REG_WR(bp, offset, wr_val);
9269 val = REG_RD(bp, offset);
9270
9271 /* Restore the original register's value */
9272 REG_WR(bp, offset, save_val);
9273
9274 /* verify that value is as expected value */
9275 if ((val & mask) != (wr_val & mask))
9276 goto test_reg_exit;
9277 }
9278 }
9279
9280 rc = 0;
9281
9282test_reg_exit:
9283 return rc;
9284}
9285
9286static int bnx2x_test_memory(struct bnx2x *bp)
9287{
9288 int i, j, rc = -ENODEV;
9289 u32 val;
9290 static const struct {
9291 u32 offset;
9292 int size;
9293 } mem_tbl[] = {
9294 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9295 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9296 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9297 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9298 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9299 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9300 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9301
9302 { 0xffffffff, 0 }
9303 };
9304 static const struct {
9305 char *name;
9306 u32 offset;
9dabc424
YG
9307 u32 e1_mask;
9308 u32 e1h_mask;
f3c87cdd 9309 } prty_tbl[] = {
9dabc424
YG
9310 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9311 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9312 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9313 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9314 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9315 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9316
9317 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9318 };
9319
9320 if (!netif_running(bp->dev))
9321 return rc;
9322
9323 /* Go through all the memories */
9324 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9325 for (j = 0; j < mem_tbl[i].size; j++)
9326 REG_RD(bp, mem_tbl[i].offset + j*4);
9327
9328 /* Check the parity status */
9329 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9330 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9331 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9332 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9333 DP(NETIF_MSG_HW,
9334 "%s is 0x%x\n", prty_tbl[i].name, val);
9335 goto test_mem_exit;
9336 }
9337 }
9338
9339 rc = 0;
9340
9341test_mem_exit:
9342 return rc;
9343}
9344
f3c87cdd
YG
9345static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9346{
9347 int cnt = 1000;
9348
9349 if (link_up)
9350 while (bnx2x_link_test(bp) && cnt--)
9351 msleep(10);
9352}
9353
9354static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9355{
9356 unsigned int pkt_size, num_pkts, i;
9357 struct sk_buff *skb;
9358 unsigned char *packet;
9359 struct bnx2x_fastpath *fp = &bp->fp[0];
9360 u16 tx_start_idx, tx_idx;
9361 u16 rx_start_idx, rx_idx;
9362 u16 pkt_prod;
9363 struct sw_tx_bd *tx_buf;
9364 struct eth_tx_bd *tx_bd;
9365 dma_addr_t mapping;
9366 union eth_rx_cqe *cqe;
9367 u8 cqe_fp_flags;
9368 struct sw_rx_bd *rx_buf;
9369 u16 len;
9370 int rc = -ENODEV;
9371
b5bf9068
EG
9372 /* check the loopback mode */
9373 switch (loopback_mode) {
9374 case BNX2X_PHY_LOOPBACK:
9375 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9376 return -EINVAL;
9377 break;
9378 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9379 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9380 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9381 break;
9382 default:
f3c87cdd 9383 return -EINVAL;
b5bf9068 9384 }
f3c87cdd 9385
b5bf9068
EG
9386 /* prepare the loopback packet */
9387 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9388 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9389 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9390 if (!skb) {
9391 rc = -ENOMEM;
9392 goto test_loopback_exit;
9393 }
9394 packet = skb_put(skb, pkt_size);
9395 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9396 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9397 for (i = ETH_HLEN; i < pkt_size; i++)
9398 packet[i] = (unsigned char) (i & 0xff);
9399
b5bf9068 9400 /* send the loopback packet */
f3c87cdd
YG
9401 num_pkts = 0;
9402 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9403 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9404
9405 pkt_prod = fp->tx_pkt_prod++;
9406 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9407 tx_buf->first_bd = fp->tx_bd_prod;
9408 tx_buf->skb = skb;
9409
9410 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9411 mapping = pci_map_single(bp->pdev, skb->data,
9412 skb_headlen(skb), PCI_DMA_TODEVICE);
9413 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9414 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9415 tx_bd->nbd = cpu_to_le16(1);
9416 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9417 tx_bd->vlan = cpu_to_le16(pkt_prod);
9418 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9419 ETH_TX_BD_FLAGS_END_BD);
9420 tx_bd->general_data = ((UNICAST_ADDRESS <<
9421 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9422
58f4c4cf
EG
9423 wmb();
9424
4781bfad 9425 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9426 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9427 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9428 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9429
9430 mmiowb();
9431
9432 num_pkts++;
9433 fp->tx_bd_prod++;
9434 bp->dev->trans_start = jiffies;
9435
9436 udelay(100);
9437
9438 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9439 if (tx_idx != tx_start_idx + num_pkts)
9440 goto test_loopback_exit;
9441
9442 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9443 if (rx_idx != rx_start_idx + num_pkts)
9444 goto test_loopback_exit;
9445
9446 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9447 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9448 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9449 goto test_loopback_rx_exit;
9450
9451 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9452 if (len != pkt_size)
9453 goto test_loopback_rx_exit;
9454
9455 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9456 skb = rx_buf->skb;
9457 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9458 for (i = ETH_HLEN; i < pkt_size; i++)
9459 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9460 goto test_loopback_rx_exit;
9461
9462 rc = 0;
9463
9464test_loopback_rx_exit:
f3c87cdd
YG
9465
9466 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9467 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9468 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9469 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9470
9471 /* Update producers */
9472 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9473 fp->rx_sge_prod);
f3c87cdd
YG
9474
9475test_loopback_exit:
9476 bp->link_params.loopback_mode = LOOPBACK_NONE;
9477
9478 return rc;
9479}
9480
9481static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9482{
b5bf9068 9483 int rc = 0, res;
f3c87cdd
YG
9484
9485 if (!netif_running(bp->dev))
9486 return BNX2X_LOOPBACK_FAILED;
9487
f8ef6e44 9488 bnx2x_netif_stop(bp, 1);
3910c8ae 9489 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9490
b5bf9068
EG
9491 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9492 if (res) {
9493 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9494 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9495 }
9496
b5bf9068
EG
9497 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9498 if (res) {
9499 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9500 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9501 }
9502
3910c8ae 9503 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9504 bnx2x_netif_start(bp);
9505
9506 return rc;
9507}
9508
9509#define CRC32_RESIDUAL 0xdebb20e3
9510
9511static int bnx2x_test_nvram(struct bnx2x *bp)
9512{
9513 static const struct {
9514 int offset;
9515 int size;
9516 } nvram_tbl[] = {
9517 { 0, 0x14 }, /* bootstrap */
9518 { 0x14, 0xec }, /* dir */
9519 { 0x100, 0x350 }, /* manuf_info */
9520 { 0x450, 0xf0 }, /* feature_info */
9521 { 0x640, 0x64 }, /* upgrade_key_info */
9522 { 0x6a4, 0x64 },
9523 { 0x708, 0x70 }, /* manuf_key_info */
9524 { 0x778, 0x70 },
9525 { 0, 0 }
9526 };
4781bfad 9527 __be32 buf[0x350 / 4];
f3c87cdd
YG
9528 u8 *data = (u8 *)buf;
9529 int i, rc;
9530 u32 magic, csum;
9531
9532 rc = bnx2x_nvram_read(bp, 0, data, 4);
9533 if (rc) {
f5372251 9534 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9535 goto test_nvram_exit;
9536 }
9537
9538 magic = be32_to_cpu(buf[0]);
9539 if (magic != 0x669955aa) {
9540 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9541 rc = -ENODEV;
9542 goto test_nvram_exit;
9543 }
9544
9545 for (i = 0; nvram_tbl[i].size; i++) {
9546
9547 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9548 nvram_tbl[i].size);
9549 if (rc) {
9550 DP(NETIF_MSG_PROBE,
f5372251 9551 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9552 goto test_nvram_exit;
9553 }
9554
9555 csum = ether_crc_le(nvram_tbl[i].size, data);
9556 if (csum != CRC32_RESIDUAL) {
9557 DP(NETIF_MSG_PROBE,
9558 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9559 rc = -ENODEV;
9560 goto test_nvram_exit;
9561 }
9562 }
9563
9564test_nvram_exit:
9565 return rc;
9566}
9567
9568static int bnx2x_test_intr(struct bnx2x *bp)
9569{
9570 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9571 int i, rc;
9572
9573 if (!netif_running(bp->dev))
9574 return -ENODEV;
9575
8d9c5f34 9576 config->hdr.length = 0;
af246401
EG
9577 if (CHIP_IS_E1(bp))
9578 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9579 else
9580 config->hdr.offset = BP_FUNC(bp);
0626b899 9581 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9582 config->hdr.reserved1 = 0;
9583
9584 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9585 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9586 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9587 if (rc == 0) {
9588 bp->set_mac_pending++;
9589 for (i = 0; i < 10; i++) {
9590 if (!bp->set_mac_pending)
9591 break;
9592 msleep_interruptible(10);
9593 }
9594 if (i == 10)
9595 rc = -ENODEV;
9596 }
9597
9598 return rc;
9599}
9600
a2fbb9ea
ET
9601static void bnx2x_self_test(struct net_device *dev,
9602 struct ethtool_test *etest, u64 *buf)
9603{
9604 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9605
9606 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9607
f3c87cdd 9608 if (!netif_running(dev))
a2fbb9ea 9609 return;
a2fbb9ea 9610
33471629 9611 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9612 if (IS_E1HMF(bp))
9613 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9614
9615 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9616 u8 link_up;
9617
9618 link_up = bp->link_vars.link_up;
9619 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9620 bnx2x_nic_load(bp, LOAD_DIAG);
9621 /* wait until link state is restored */
9622 bnx2x_wait_for_link(bp, link_up);
9623
9624 if (bnx2x_test_registers(bp) != 0) {
9625 buf[0] = 1;
9626 etest->flags |= ETH_TEST_FL_FAILED;
9627 }
9628 if (bnx2x_test_memory(bp) != 0) {
9629 buf[1] = 1;
9630 etest->flags |= ETH_TEST_FL_FAILED;
9631 }
9632 buf[2] = bnx2x_test_loopback(bp, link_up);
9633 if (buf[2] != 0)
9634 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9635
f3c87cdd
YG
9636 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9637 bnx2x_nic_load(bp, LOAD_NORMAL);
9638 /* wait until link state is restored */
9639 bnx2x_wait_for_link(bp, link_up);
9640 }
9641 if (bnx2x_test_nvram(bp) != 0) {
9642 buf[3] = 1;
a2fbb9ea
ET
9643 etest->flags |= ETH_TEST_FL_FAILED;
9644 }
f3c87cdd
YG
9645 if (bnx2x_test_intr(bp) != 0) {
9646 buf[4] = 1;
9647 etest->flags |= ETH_TEST_FL_FAILED;
9648 }
9649 if (bp->port.pmf)
9650 if (bnx2x_link_test(bp) != 0) {
9651 buf[5] = 1;
9652 etest->flags |= ETH_TEST_FL_FAILED;
9653 }
f3c87cdd
YG
9654
9655#ifdef BNX2X_EXTRA_DEBUG
9656 bnx2x_panic_dump(bp);
9657#endif
a2fbb9ea
ET
9658}
9659
de832a55
EG
9660static const struct {
9661 long offset;
9662 int size;
9663 u8 string[ETH_GSTRING_LEN];
9664} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9665/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9666 { Q_STATS_OFFSET32(error_bytes_received_hi),
9667 8, "[%d]: rx_error_bytes" },
9668 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9669 8, "[%d]: rx_ucast_packets" },
9670 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9671 8, "[%d]: rx_mcast_packets" },
9672 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9673 8, "[%d]: rx_bcast_packets" },
9674 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9675 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9676 4, "[%d]: rx_phy_ip_err_discards"},
9677 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9678 4, "[%d]: rx_skb_alloc_discard" },
9679 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9680
9681/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9682 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9683 8, "[%d]: tx_packets" }
9684};
9685
bb2a0f7a
YG
9686static const struct {
9687 long offset;
9688 int size;
9689 u32 flags;
66e855f3
YG
9690#define STATS_FLAGS_PORT 1
9691#define STATS_FLAGS_FUNC 2
de832a55 9692#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9693 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9694} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9695/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9696 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9697 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9698 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9699 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9700 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9701 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9702 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9703 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9704 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9705 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9706 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9707 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9708 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9709 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9710 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9711 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9712 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9713/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9714 8, STATS_FLAGS_PORT, "rx_fragments" },
9715 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9716 8, STATS_FLAGS_PORT, "rx_jabbers" },
9717 { STATS_OFFSET32(no_buff_discard_hi),
9718 8, STATS_FLAGS_BOTH, "rx_discards" },
9719 { STATS_OFFSET32(mac_filter_discard),
9720 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9721 { STATS_OFFSET32(xxoverflow_discard),
9722 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9723 { STATS_OFFSET32(brb_drop_hi),
9724 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9725 { STATS_OFFSET32(brb_truncate_hi),
9726 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9727 { STATS_OFFSET32(pause_frames_received_hi),
9728 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9729 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9730 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9731 { STATS_OFFSET32(nig_timer_max),
9732 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9733/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9734 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9735 { STATS_OFFSET32(rx_skb_alloc_failed),
9736 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9737 { STATS_OFFSET32(hw_csum_err),
9738 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9739
9740 { STATS_OFFSET32(total_bytes_transmitted_hi),
9741 8, STATS_FLAGS_BOTH, "tx_bytes" },
9742 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9743 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9744 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9745 8, STATS_FLAGS_BOTH, "tx_packets" },
9746 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9747 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9748 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9749 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9750 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9751 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9752 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9753 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9754/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9755 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9756 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9757 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9758 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9759 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9760 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9761 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9762 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9763 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9764 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9765 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9766 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9767 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9768 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9769 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9770 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9771 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9772 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9773 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9774/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9775 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9776 { STATS_OFFSET32(pause_frames_sent_hi),
9777 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9778};
9779
de832a55
EG
9780#define IS_PORT_STAT(i) \
9781 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9782#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9783#define IS_E1HMF_MODE_STAT(bp) \
9784 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9785
a2fbb9ea
ET
9786static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9787{
bb2a0f7a 9788 struct bnx2x *bp = netdev_priv(dev);
de832a55 9789 int i, j, k;
bb2a0f7a 9790
a2fbb9ea
ET
9791 switch (stringset) {
9792 case ETH_SS_STATS:
de832a55
EG
9793 if (is_multi(bp)) {
9794 k = 0;
9795 for_each_queue(bp, i) {
9796 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9797 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9798 bnx2x_q_stats_arr[j].string, i);
9799 k += BNX2X_NUM_Q_STATS;
9800 }
9801 if (IS_E1HMF_MODE_STAT(bp))
9802 break;
9803 for (j = 0; j < BNX2X_NUM_STATS; j++)
9804 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9805 bnx2x_stats_arr[j].string);
9806 } else {
9807 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9808 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9809 continue;
9810 strcpy(buf + j*ETH_GSTRING_LEN,
9811 bnx2x_stats_arr[i].string);
9812 j++;
9813 }
bb2a0f7a 9814 }
a2fbb9ea
ET
9815 break;
9816
9817 case ETH_SS_TEST:
9818 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9819 break;
9820 }
9821}
9822
9823static int bnx2x_get_stats_count(struct net_device *dev)
9824{
bb2a0f7a 9825 struct bnx2x *bp = netdev_priv(dev);
de832a55 9826 int i, num_stats;
bb2a0f7a 9827
de832a55
EG
9828 if (is_multi(bp)) {
9829 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9830 if (!IS_E1HMF_MODE_STAT(bp))
9831 num_stats += BNX2X_NUM_STATS;
9832 } else {
9833 if (IS_E1HMF_MODE_STAT(bp)) {
9834 num_stats = 0;
9835 for (i = 0; i < BNX2X_NUM_STATS; i++)
9836 if (IS_FUNC_STAT(i))
9837 num_stats++;
9838 } else
9839 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9840 }
de832a55 9841
bb2a0f7a 9842 return num_stats;
a2fbb9ea
ET
9843}
9844
9845static void bnx2x_get_ethtool_stats(struct net_device *dev,
9846 struct ethtool_stats *stats, u64 *buf)
9847{
9848 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9849 u32 *hw_stats, *offset;
9850 int i, j, k;
bb2a0f7a 9851
de832a55
EG
9852 if (is_multi(bp)) {
9853 k = 0;
9854 for_each_queue(bp, i) {
9855 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9856 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9857 if (bnx2x_q_stats_arr[j].size == 0) {
9858 /* skip this counter */
9859 buf[k + j] = 0;
9860 continue;
9861 }
9862 offset = (hw_stats +
9863 bnx2x_q_stats_arr[j].offset);
9864 if (bnx2x_q_stats_arr[j].size == 4) {
9865 /* 4-byte counter */
9866 buf[k + j] = (u64) *offset;
9867 continue;
9868 }
9869 /* 8-byte counter */
9870 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9871 }
9872 k += BNX2X_NUM_Q_STATS;
9873 }
9874 if (IS_E1HMF_MODE_STAT(bp))
9875 return;
9876 hw_stats = (u32 *)&bp->eth_stats;
9877 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9878 if (bnx2x_stats_arr[j].size == 0) {
9879 /* skip this counter */
9880 buf[k + j] = 0;
9881 continue;
9882 }
9883 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9884 if (bnx2x_stats_arr[j].size == 4) {
9885 /* 4-byte counter */
9886 buf[k + j] = (u64) *offset;
9887 continue;
9888 }
9889 /* 8-byte counter */
9890 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9891 }
de832a55
EG
9892 } else {
9893 hw_stats = (u32 *)&bp->eth_stats;
9894 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9895 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9896 continue;
9897 if (bnx2x_stats_arr[i].size == 0) {
9898 /* skip this counter */
9899 buf[j] = 0;
9900 j++;
9901 continue;
9902 }
9903 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9904 if (bnx2x_stats_arr[i].size == 4) {
9905 /* 4-byte counter */
9906 buf[j] = (u64) *offset;
9907 j++;
9908 continue;
9909 }
9910 /* 8-byte counter */
9911 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9912 j++;
a2fbb9ea 9913 }
a2fbb9ea
ET
9914 }
9915}
9916
9917static int bnx2x_phys_id(struct net_device *dev, u32 data)
9918{
9919 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9920 int port = BP_PORT(bp);
a2fbb9ea
ET
9921 int i;
9922
34f80b04
EG
9923 if (!netif_running(dev))
9924 return 0;
9925
9926 if (!bp->port.pmf)
9927 return 0;
9928
a2fbb9ea
ET
9929 if (data == 0)
9930 data = 2;
9931
9932 for (i = 0; i < (data * 2); i++) {
c18487ee 9933 if ((i % 2) == 0)
34f80b04 9934 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9935 bp->link_params.hw_led_mode,
9936 bp->link_params.chip_id);
9937 else
34f80b04 9938 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9939 bp->link_params.hw_led_mode,
9940 bp->link_params.chip_id);
9941
a2fbb9ea
ET
9942 msleep_interruptible(500);
9943 if (signal_pending(current))
9944 break;
9945 }
9946
c18487ee 9947 if (bp->link_vars.link_up)
34f80b04 9948 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9949 bp->link_vars.line_speed,
9950 bp->link_params.hw_led_mode,
9951 bp->link_params.chip_id);
a2fbb9ea
ET
9952
9953 return 0;
9954}
9955
9956static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9957 .get_settings = bnx2x_get_settings,
9958 .set_settings = bnx2x_set_settings,
9959 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9960 .get_wol = bnx2x_get_wol,
9961 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9962 .get_msglevel = bnx2x_get_msglevel,
9963 .set_msglevel = bnx2x_set_msglevel,
9964 .nway_reset = bnx2x_nway_reset,
9965 .get_link = ethtool_op_get_link,
9966 .get_eeprom_len = bnx2x_get_eeprom_len,
9967 .get_eeprom = bnx2x_get_eeprom,
9968 .set_eeprom = bnx2x_set_eeprom,
9969 .get_coalesce = bnx2x_get_coalesce,
9970 .set_coalesce = bnx2x_set_coalesce,
9971 .get_ringparam = bnx2x_get_ringparam,
9972 .set_ringparam = bnx2x_set_ringparam,
9973 .get_pauseparam = bnx2x_get_pauseparam,
9974 .set_pauseparam = bnx2x_set_pauseparam,
9975 .get_rx_csum = bnx2x_get_rx_csum,
9976 .set_rx_csum = bnx2x_set_rx_csum,
9977 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9978 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9979 .set_flags = bnx2x_set_flags,
9980 .get_flags = ethtool_op_get_flags,
9981 .get_sg = ethtool_op_get_sg,
9982 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9983 .get_tso = ethtool_op_get_tso,
9984 .set_tso = bnx2x_set_tso,
9985 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9986 .self_test = bnx2x_self_test,
9987 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9988 .phys_id = bnx2x_phys_id,
9989 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9990 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9991};
9992
9993/* end of ethtool_ops */
9994
9995/****************************************************************************
9996* General service functions
9997****************************************************************************/
9998
9999static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10000{
10001 u16 pmcsr;
10002
10003 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10004
10005 switch (state) {
10006 case PCI_D0:
34f80b04 10007 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10008 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10009 PCI_PM_CTRL_PME_STATUS));
10010
10011 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10012 /* delay required during transition out of D3hot */
a2fbb9ea 10013 msleep(20);
34f80b04 10014 break;
a2fbb9ea 10015
34f80b04
EG
10016 case PCI_D3hot:
10017 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10018 pmcsr |= 3;
a2fbb9ea 10019
34f80b04
EG
10020 if (bp->wol)
10021 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10022
34f80b04
EG
10023 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10024 pmcsr);
a2fbb9ea 10025
34f80b04
EG
10026 /* No more memory access after this point until
10027 * device is brought back to D0.
10028 */
10029 break;
10030
10031 default:
10032 return -EINVAL;
10033 }
10034 return 0;
a2fbb9ea
ET
10035}
10036
237907c1
EG
10037static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10038{
10039 u16 rx_cons_sb;
10040
10041 /* Tell compiler that status block fields can change */
10042 barrier();
10043 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10044 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10045 rx_cons_sb++;
10046 return (fp->rx_comp_cons != rx_cons_sb);
10047}
10048
34f80b04
EG
10049/*
10050 * net_device service functions
10051 */
10052
a2fbb9ea
ET
10053static int bnx2x_poll(struct napi_struct *napi, int budget)
10054{
10055 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10056 napi);
10057 struct bnx2x *bp = fp->bp;
10058 int work_done = 0;
10059
10060#ifdef BNX2X_STOP_ON_ERROR
10061 if (unlikely(bp->panic))
34f80b04 10062 goto poll_panic;
a2fbb9ea
ET
10063#endif
10064
10065 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10066 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10067 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10068
10069 bnx2x_update_fpsb_idx(fp);
10070
237907c1 10071 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
10072 bnx2x_tx_int(fp, budget);
10073
237907c1 10074 if (bnx2x_has_rx_work(fp))
a2fbb9ea 10075 work_done = bnx2x_rx_int(fp, budget);
356e2385 10076
da5a662a 10077 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
10078
10079 /* must not complete if we consumed full budget */
da5a662a 10080 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
10081
10082#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10083poll_panic:
a2fbb9ea 10084#endif
288379f0 10085 napi_complete(napi);
a2fbb9ea 10086
0626b899 10087 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10088 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10089 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10090 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10091 }
356e2385 10092
a2fbb9ea
ET
10093 return work_done;
10094}
10095
755735eb
EG
10096
10097/* we split the first BD into headers and data BDs
33471629 10098 * to ease the pain of our fellow microcode engineers
755735eb
EG
10099 * we use one mapping for both BDs
10100 * So far this has only been observed to happen
10101 * in Other Operating Systems(TM)
10102 */
10103static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10104 struct bnx2x_fastpath *fp,
10105 struct eth_tx_bd **tx_bd, u16 hlen,
10106 u16 bd_prod, int nbd)
10107{
10108 struct eth_tx_bd *h_tx_bd = *tx_bd;
10109 struct eth_tx_bd *d_tx_bd;
10110 dma_addr_t mapping;
10111 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10112
10113 /* first fix first BD */
10114 h_tx_bd->nbd = cpu_to_le16(nbd);
10115 h_tx_bd->nbytes = cpu_to_le16(hlen);
10116
10117 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10118 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10119 h_tx_bd->addr_lo, h_tx_bd->nbd);
10120
10121 /* now get a new data BD
10122 * (after the pbd) and fill it */
10123 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10124 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10125
10126 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10127 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10128
10129 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10130 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10131 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10132 d_tx_bd->vlan = 0;
10133 /* this marks the BD as one that has no individual mapping
10134 * the FW ignores this flag in a BD not marked start
10135 */
10136 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10137 DP(NETIF_MSG_TX_QUEUED,
10138 "TSO split data size is %d (%x:%x)\n",
10139 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10140
10141 /* update tx_bd for marking the last BD flag */
10142 *tx_bd = d_tx_bd;
10143
10144 return bd_prod;
10145}
10146
10147static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10148{
10149 if (fix > 0)
10150 csum = (u16) ~csum_fold(csum_sub(csum,
10151 csum_partial(t_header - fix, fix, 0)));
10152
10153 else if (fix < 0)
10154 csum = (u16) ~csum_fold(csum_add(csum,
10155 csum_partial(t_header, -fix, 0)));
10156
10157 return swab16(csum);
10158}
10159
10160static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10161{
10162 u32 rc;
10163
10164 if (skb->ip_summed != CHECKSUM_PARTIAL)
10165 rc = XMIT_PLAIN;
10166
10167 else {
4781bfad 10168 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10169 rc = XMIT_CSUM_V6;
10170 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10171 rc |= XMIT_CSUM_TCP;
10172
10173 } else {
10174 rc = XMIT_CSUM_V4;
10175 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10176 rc |= XMIT_CSUM_TCP;
10177 }
10178 }
10179
10180 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10181 rc |= XMIT_GSO_V4;
10182
10183 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10184 rc |= XMIT_GSO_V6;
10185
10186 return rc;
10187}
10188
632da4d6 10189#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10190/* check if packet requires linearization (packet is too fragmented)
10191 no need to check fragmentation if page size > 8K (there will be no
10192 violation to FW restrictions) */
755735eb
EG
10193static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10194 u32 xmit_type)
10195{
10196 int to_copy = 0;
10197 int hlen = 0;
10198 int first_bd_sz = 0;
10199
10200 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10201 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10202
10203 if (xmit_type & XMIT_GSO) {
10204 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10205 /* Check if LSO packet needs to be copied:
10206 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10207 int wnd_size = MAX_FETCH_BD - 3;
33471629 10208 /* Number of windows to check */
755735eb
EG
10209 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10210 int wnd_idx = 0;
10211 int frag_idx = 0;
10212 u32 wnd_sum = 0;
10213
10214 /* Headers length */
10215 hlen = (int)(skb_transport_header(skb) - skb->data) +
10216 tcp_hdrlen(skb);
10217
10218 /* Amount of data (w/o headers) on linear part of SKB*/
10219 first_bd_sz = skb_headlen(skb) - hlen;
10220
10221 wnd_sum = first_bd_sz;
10222
10223 /* Calculate the first sum - it's special */
10224 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10225 wnd_sum +=
10226 skb_shinfo(skb)->frags[frag_idx].size;
10227
10228 /* If there was data on linear skb data - check it */
10229 if (first_bd_sz > 0) {
10230 if (unlikely(wnd_sum < lso_mss)) {
10231 to_copy = 1;
10232 goto exit_lbl;
10233 }
10234
10235 wnd_sum -= first_bd_sz;
10236 }
10237
10238 /* Others are easier: run through the frag list and
10239 check all windows */
10240 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10241 wnd_sum +=
10242 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10243
10244 if (unlikely(wnd_sum < lso_mss)) {
10245 to_copy = 1;
10246 break;
10247 }
10248 wnd_sum -=
10249 skb_shinfo(skb)->frags[wnd_idx].size;
10250 }
755735eb
EG
10251 } else {
10252 /* in non-LSO too fragmented packet should always
10253 be linearized */
10254 to_copy = 1;
10255 }
10256 }
10257
10258exit_lbl:
10259 if (unlikely(to_copy))
10260 DP(NETIF_MSG_TX_QUEUED,
10261 "Linearization IS REQUIRED for %s packet. "
10262 "num_frags %d hlen %d first_bd_sz %d\n",
10263 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10264 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10265
10266 return to_copy;
10267}
632da4d6 10268#endif
755735eb
EG
10269
10270/* called with netif_tx_lock
a2fbb9ea 10271 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10272 * netif_wake_queue()
a2fbb9ea
ET
10273 */
10274static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10275{
10276 struct bnx2x *bp = netdev_priv(dev);
10277 struct bnx2x_fastpath *fp;
555f6c78 10278 struct netdev_queue *txq;
a2fbb9ea
ET
10279 struct sw_tx_bd *tx_buf;
10280 struct eth_tx_bd *tx_bd;
10281 struct eth_tx_parse_bd *pbd = NULL;
10282 u16 pkt_prod, bd_prod;
755735eb 10283 int nbd, fp_index;
a2fbb9ea 10284 dma_addr_t mapping;
755735eb
EG
10285 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10286 int vlan_off = (bp->e1hov ? 4 : 0);
10287 int i;
10288 u8 hlen = 0;
a2fbb9ea
ET
10289
10290#ifdef BNX2X_STOP_ON_ERROR
10291 if (unlikely(bp->panic))
10292 return NETDEV_TX_BUSY;
10293#endif
10294
555f6c78
EG
10295 fp_index = skb_get_queue_mapping(skb);
10296 txq = netdev_get_tx_queue(dev, fp_index);
10297
a2fbb9ea 10298 fp = &bp->fp[fp_index];
755735eb 10299
231fd58a 10300 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10301 fp->eth_q_stats.driver_xoff++,
555f6c78 10302 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10303 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10304 return NETDEV_TX_BUSY;
10305 }
10306
755735eb
EG
10307 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10308 " gso type %x xmit_type %x\n",
10309 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10310 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10311
632da4d6 10312#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10313 /* First, check if we need to linearize the skb (due to FW
10314 restrictions). No need to check fragmentation if page size > 8K
10315 (there will be no violation to FW restrictions) */
755735eb
EG
10316 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10317 /* Statistics of linearization */
10318 bp->lin_cnt++;
10319 if (skb_linearize(skb) != 0) {
10320 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10321 "silently dropping this SKB\n");
10322 dev_kfree_skb_any(skb);
da5a662a 10323 return NETDEV_TX_OK;
755735eb
EG
10324 }
10325 }
632da4d6 10326#endif
755735eb 10327
a2fbb9ea 10328 /*
755735eb 10329 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10330 then for TSO or xsum we have a parsing info BD,
755735eb 10331 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10332 (don't forget to mark the last one as last,
10333 and to unmap only AFTER you write to the BD ...)
755735eb 10334 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10335 */
10336
10337 pkt_prod = fp->tx_pkt_prod++;
755735eb 10338 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10339
755735eb 10340 /* get a tx_buf and first BD */
a2fbb9ea
ET
10341 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10342 tx_bd = &fp->tx_desc_ring[bd_prod];
10343
10344 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10345 tx_bd->general_data = (UNICAST_ADDRESS <<
10346 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10347 /* header nbd */
10348 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10349
755735eb
EG
10350 /* remember the first BD of the packet */
10351 tx_buf->first_bd = fp->tx_bd_prod;
10352 tx_buf->skb = skb;
a2fbb9ea
ET
10353
10354 DP(NETIF_MSG_TX_QUEUED,
10355 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10356 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10357
0c6671b0
EG
10358#ifdef BCM_VLAN
10359 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10360 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10361 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10362 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10363 vlan_off += 4;
10364 } else
0c6671b0 10365#endif
755735eb 10366 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10367
755735eb 10368 if (xmit_type) {
755735eb 10369 /* turn on parsing and get a BD */
a2fbb9ea
ET
10370 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10371 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10372
10373 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10374 }
10375
10376 if (xmit_type & XMIT_CSUM) {
10377 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10378
10379 /* for now NS flag is not used in Linux */
4781bfad
EG
10380 pbd->global_data =
10381 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10382 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10383
755735eb
EG
10384 pbd->ip_hlen = (skb_transport_header(skb) -
10385 skb_network_header(skb)) / 2;
10386
10387 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10388
755735eb
EG
10389 pbd->total_hlen = cpu_to_le16(hlen);
10390 hlen = hlen*2 - vlan_off;
a2fbb9ea 10391
755735eb
EG
10392 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10393
10394 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10395 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10396 ETH_TX_BD_FLAGS_IP_CSUM;
10397 else
10398 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10399
10400 if (xmit_type & XMIT_CSUM_TCP) {
10401 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10402
10403 } else {
10404 s8 fix = SKB_CS_OFF(skb); /* signed! */
10405
a2fbb9ea 10406 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10407 pbd->cs_offset = fix / 2;
a2fbb9ea 10408
755735eb
EG
10409 DP(NETIF_MSG_TX_QUEUED,
10410 "hlen %d offset %d fix %d csum before fix %x\n",
10411 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10412 SKB_CS(skb));
10413
10414 /* HW bug: fixup the CSUM */
10415 pbd->tcp_pseudo_csum =
10416 bnx2x_csum_fix(skb_transport_header(skb),
10417 SKB_CS(skb), fix);
10418
10419 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10420 pbd->tcp_pseudo_csum);
10421 }
a2fbb9ea
ET
10422 }
10423
10424 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10425 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10426
10427 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10428 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10429 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10430 tx_bd->nbd = cpu_to_le16(nbd);
10431 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10432
10433 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10434 " nbytes %d flags %x vlan %x\n",
10435 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10436 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10437 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10438
755735eb 10439 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10440
10441 DP(NETIF_MSG_TX_QUEUED,
10442 "TSO packet len %d hlen %d total len %d tso size %d\n",
10443 skb->len, hlen, skb_headlen(skb),
10444 skb_shinfo(skb)->gso_size);
10445
10446 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10447
755735eb
EG
10448 if (unlikely(skb_headlen(skb) > hlen))
10449 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10450 bd_prod, ++nbd);
a2fbb9ea
ET
10451
10452 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10453 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10454 pbd->tcp_flags = pbd_tcp_flags(skb);
10455
10456 if (xmit_type & XMIT_GSO_V4) {
10457 pbd->ip_id = swab16(ip_hdr(skb)->id);
10458 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10459 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10460 ip_hdr(skb)->daddr,
10461 0, IPPROTO_TCP, 0));
755735eb
EG
10462
10463 } else
10464 pbd->tcp_pseudo_csum =
10465 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10466 &ipv6_hdr(skb)->daddr,
10467 0, IPPROTO_TCP, 0));
10468
a2fbb9ea
ET
10469 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10470 }
10471
755735eb
EG
10472 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10473 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10474
755735eb
EG
10475 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10476 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10477
755735eb
EG
10478 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10479 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10480
755735eb
EG
10481 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10482 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10483 tx_bd->nbytes = cpu_to_le16(frag->size);
10484 tx_bd->vlan = cpu_to_le16(pkt_prod);
10485 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10486
755735eb
EG
10487 DP(NETIF_MSG_TX_QUEUED,
10488 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10489 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10490 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10491 }
10492
755735eb 10493 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10494 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10495
10496 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10497 tx_bd, tx_bd->bd_flags.as_bitfield);
10498
a2fbb9ea
ET
10499 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10500
755735eb 10501 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10502 * if the packet contains or ends with it
10503 */
10504 if (TX_BD_POFF(bd_prod) < nbd)
10505 nbd++;
10506
10507 if (pbd)
10508 DP(NETIF_MSG_TX_QUEUED,
10509 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10510 " tcp_flags %x xsum %x seq %u hlen %u\n",
10511 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10512 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10513 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10514
755735eb 10515 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10516
58f4c4cf
EG
10517 /*
10518 * Make sure that the BD data is updated before updating the producer
10519 * since FW might read the BD right after the producer is updated.
10520 * This is only applicable for weak-ordered memory model archs such
10521 * as IA-64. The following barrier is also mandatory since FW will
10522 * assumes packets must have BDs.
10523 */
10524 wmb();
10525
4781bfad 10526 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10527 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10528 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10529 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10530
10531 mmiowb();
10532
755735eb 10533 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10534 dev->trans_start = jiffies;
10535
10536 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10537 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10538 if we put Tx into XOFF state. */
10539 smp_mb();
555f6c78 10540 netif_tx_stop_queue(txq);
de832a55 10541 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10542 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10543 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10544 }
10545 fp->tx_pkt++;
10546
10547 return NETDEV_TX_OK;
10548}
10549
bb2a0f7a 10550/* called with rtnl_lock */
a2fbb9ea
ET
10551static int bnx2x_open(struct net_device *dev)
10552{
10553 struct bnx2x *bp = netdev_priv(dev);
10554
6eccabb3
EG
10555 netif_carrier_off(dev);
10556
a2fbb9ea
ET
10557 bnx2x_set_power_state(bp, PCI_D0);
10558
bb2a0f7a 10559 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10560}
10561
bb2a0f7a 10562/* called with rtnl_lock */
a2fbb9ea
ET
10563static int bnx2x_close(struct net_device *dev)
10564{
a2fbb9ea
ET
10565 struct bnx2x *bp = netdev_priv(dev);
10566
10567 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10568 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10569 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10570 if (!CHIP_REV_IS_SLOW(bp))
10571 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10572
10573 return 0;
10574}
10575
f5372251 10576/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10577static void bnx2x_set_rx_mode(struct net_device *dev)
10578{
10579 struct bnx2x *bp = netdev_priv(dev);
10580 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10581 int port = BP_PORT(bp);
10582
10583 if (bp->state != BNX2X_STATE_OPEN) {
10584 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10585 return;
10586 }
10587
10588 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10589
10590 if (dev->flags & IFF_PROMISC)
10591 rx_mode = BNX2X_RX_MODE_PROMISC;
10592
10593 else if ((dev->flags & IFF_ALLMULTI) ||
10594 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10595 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10596
10597 else { /* some multicasts */
10598 if (CHIP_IS_E1(bp)) {
10599 int i, old, offset;
10600 struct dev_mc_list *mclist;
10601 struct mac_configuration_cmd *config =
10602 bnx2x_sp(bp, mcast_config);
10603
10604 for (i = 0, mclist = dev->mc_list;
10605 mclist && (i < dev->mc_count);
10606 i++, mclist = mclist->next) {
10607
10608 config->config_table[i].
10609 cam_entry.msb_mac_addr =
10610 swab16(*(u16 *)&mclist->dmi_addr[0]);
10611 config->config_table[i].
10612 cam_entry.middle_mac_addr =
10613 swab16(*(u16 *)&mclist->dmi_addr[2]);
10614 config->config_table[i].
10615 cam_entry.lsb_mac_addr =
10616 swab16(*(u16 *)&mclist->dmi_addr[4]);
10617 config->config_table[i].cam_entry.flags =
10618 cpu_to_le16(port);
10619 config->config_table[i].
10620 target_table_entry.flags = 0;
10621 config->config_table[i].
10622 target_table_entry.client_id = 0;
10623 config->config_table[i].
10624 target_table_entry.vlan_id = 0;
10625
10626 DP(NETIF_MSG_IFUP,
10627 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10628 config->config_table[i].
10629 cam_entry.msb_mac_addr,
10630 config->config_table[i].
10631 cam_entry.middle_mac_addr,
10632 config->config_table[i].
10633 cam_entry.lsb_mac_addr);
10634 }
8d9c5f34 10635 old = config->hdr.length;
34f80b04
EG
10636 if (old > i) {
10637 for (; i < old; i++) {
10638 if (CAM_IS_INVALID(config->
10639 config_table[i])) {
af246401 10640 /* already invalidated */
34f80b04
EG
10641 break;
10642 }
10643 /* invalidate */
10644 CAM_INVALIDATE(config->
10645 config_table[i]);
10646 }
10647 }
10648
10649 if (CHIP_REV_IS_SLOW(bp))
10650 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10651 else
10652 offset = BNX2X_MAX_MULTICAST*(1 + port);
10653
8d9c5f34 10654 config->hdr.length = i;
34f80b04 10655 config->hdr.offset = offset;
8d9c5f34 10656 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10657 config->hdr.reserved1 = 0;
10658
10659 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10660 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10661 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10662 0);
10663 } else { /* E1H */
10664 /* Accept one or more multicasts */
10665 struct dev_mc_list *mclist;
10666 u32 mc_filter[MC_HASH_SIZE];
10667 u32 crc, bit, regidx;
10668 int i;
10669
10670 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10671
10672 for (i = 0, mclist = dev->mc_list;
10673 mclist && (i < dev->mc_count);
10674 i++, mclist = mclist->next) {
10675
7c510e4b
JB
10676 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10677 mclist->dmi_addr);
34f80b04
EG
10678
10679 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10680 bit = (crc >> 24) & 0xff;
10681 regidx = bit >> 5;
10682 bit &= 0x1f;
10683 mc_filter[regidx] |= (1 << bit);
10684 }
10685
10686 for (i = 0; i < MC_HASH_SIZE; i++)
10687 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10688 mc_filter[i]);
10689 }
10690 }
10691
10692 bp->rx_mode = rx_mode;
10693 bnx2x_set_storm_rx_mode(bp);
10694}
10695
10696/* called with rtnl_lock */
a2fbb9ea
ET
10697static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10698{
10699 struct sockaddr *addr = p;
10700 struct bnx2x *bp = netdev_priv(dev);
10701
34f80b04 10702 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10703 return -EINVAL;
10704
10705 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10706 if (netif_running(dev)) {
10707 if (CHIP_IS_E1(bp))
3101c2bc 10708 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10709 else
3101c2bc 10710 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10711 }
a2fbb9ea
ET
10712
10713 return 0;
10714}
10715
c18487ee 10716/* called with rtnl_lock */
a2fbb9ea
ET
10717static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10718{
10719 struct mii_ioctl_data *data = if_mii(ifr);
10720 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10721 int port = BP_PORT(bp);
a2fbb9ea
ET
10722 int err;
10723
10724 switch (cmd) {
10725 case SIOCGMIIPHY:
34f80b04 10726 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10727
c14423fe 10728 /* fallthrough */
c18487ee 10729
a2fbb9ea 10730 case SIOCGMIIREG: {
c18487ee 10731 u16 mii_regval;
a2fbb9ea 10732
c18487ee
YR
10733 if (!netif_running(dev))
10734 return -EAGAIN;
a2fbb9ea 10735
34f80b04 10736 mutex_lock(&bp->port.phy_mutex);
3196a88a 10737 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10738 DEFAULT_PHY_DEV_ADDR,
10739 (data->reg_num & 0x1f), &mii_regval);
10740 data->val_out = mii_regval;
34f80b04 10741 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10742 return err;
10743 }
10744
10745 case SIOCSMIIREG:
10746 if (!capable(CAP_NET_ADMIN))
10747 return -EPERM;
10748
c18487ee
YR
10749 if (!netif_running(dev))
10750 return -EAGAIN;
10751
34f80b04 10752 mutex_lock(&bp->port.phy_mutex);
3196a88a 10753 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10754 DEFAULT_PHY_DEV_ADDR,
10755 (data->reg_num & 0x1f), data->val_in);
34f80b04 10756 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10757 return err;
10758
10759 default:
10760 /* do nothing */
10761 break;
10762 }
10763
10764 return -EOPNOTSUPP;
10765}
10766
34f80b04 10767/* called with rtnl_lock */
a2fbb9ea
ET
10768static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10769{
10770 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10771 int rc = 0;
a2fbb9ea
ET
10772
10773 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10774 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10775 return -EINVAL;
10776
10777 /* This does not race with packet allocation
c14423fe 10778 * because the actual alloc size is
a2fbb9ea
ET
10779 * only updated as part of load
10780 */
10781 dev->mtu = new_mtu;
10782
10783 if (netif_running(dev)) {
34f80b04
EG
10784 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10785 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10786 }
34f80b04
EG
10787
10788 return rc;
a2fbb9ea
ET
10789}
10790
10791static void bnx2x_tx_timeout(struct net_device *dev)
10792{
10793 struct bnx2x *bp = netdev_priv(dev);
10794
10795#ifdef BNX2X_STOP_ON_ERROR
10796 if (!bp->panic)
10797 bnx2x_panic();
10798#endif
10799 /* This allows the netif to be shutdown gracefully before resetting */
10800 schedule_work(&bp->reset_task);
10801}
10802
10803#ifdef BCM_VLAN
34f80b04 10804/* called with rtnl_lock */
a2fbb9ea
ET
10805static void bnx2x_vlan_rx_register(struct net_device *dev,
10806 struct vlan_group *vlgrp)
10807{
10808 struct bnx2x *bp = netdev_priv(dev);
10809
10810 bp->vlgrp = vlgrp;
0c6671b0
EG
10811
10812 /* Set flags according to the required capabilities */
10813 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10814
10815 if (dev->features & NETIF_F_HW_VLAN_TX)
10816 bp->flags |= HW_VLAN_TX_FLAG;
10817
10818 if (dev->features & NETIF_F_HW_VLAN_RX)
10819 bp->flags |= HW_VLAN_RX_FLAG;
10820
a2fbb9ea 10821 if (netif_running(dev))
49d66772 10822 bnx2x_set_client_config(bp);
a2fbb9ea 10823}
34f80b04 10824
a2fbb9ea
ET
10825#endif
10826
10827#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10828static void poll_bnx2x(struct net_device *dev)
10829{
10830 struct bnx2x *bp = netdev_priv(dev);
10831
10832 disable_irq(bp->pdev->irq);
10833 bnx2x_interrupt(bp->pdev->irq, dev);
10834 enable_irq(bp->pdev->irq);
10835}
10836#endif
10837
c64213cd
SH
10838static const struct net_device_ops bnx2x_netdev_ops = {
10839 .ndo_open = bnx2x_open,
10840 .ndo_stop = bnx2x_close,
10841 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 10842 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
10843 .ndo_set_mac_address = bnx2x_change_mac_addr,
10844 .ndo_validate_addr = eth_validate_addr,
10845 .ndo_do_ioctl = bnx2x_ioctl,
10846 .ndo_change_mtu = bnx2x_change_mtu,
10847 .ndo_tx_timeout = bnx2x_tx_timeout,
10848#ifdef BCM_VLAN
10849 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10850#endif
10851#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10852 .ndo_poll_controller = poll_bnx2x,
10853#endif
10854};
10855
34f80b04
EG
10856static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10857 struct net_device *dev)
a2fbb9ea
ET
10858{
10859 struct bnx2x *bp;
10860 int rc;
10861
10862 SET_NETDEV_DEV(dev, &pdev->dev);
10863 bp = netdev_priv(dev);
10864
34f80b04
EG
10865 bp->dev = dev;
10866 bp->pdev = pdev;
a2fbb9ea 10867 bp->flags = 0;
34f80b04 10868 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10869
10870 rc = pci_enable_device(pdev);
10871 if (rc) {
10872 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10873 goto err_out;
10874 }
10875
10876 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10877 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10878 " aborting\n");
10879 rc = -ENODEV;
10880 goto err_out_disable;
10881 }
10882
10883 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10884 printk(KERN_ERR PFX "Cannot find second PCI device"
10885 " base address, aborting\n");
10886 rc = -ENODEV;
10887 goto err_out_disable;
10888 }
10889
34f80b04
EG
10890 if (atomic_read(&pdev->enable_cnt) == 1) {
10891 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10892 if (rc) {
10893 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10894 " aborting\n");
10895 goto err_out_disable;
10896 }
a2fbb9ea 10897
34f80b04
EG
10898 pci_set_master(pdev);
10899 pci_save_state(pdev);
10900 }
a2fbb9ea
ET
10901
10902 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10903 if (bp->pm_cap == 0) {
10904 printk(KERN_ERR PFX "Cannot find power management"
10905 " capability, aborting\n");
10906 rc = -EIO;
10907 goto err_out_release;
10908 }
10909
10910 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10911 if (bp->pcie_cap == 0) {
10912 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10913 " aborting\n");
10914 rc = -EIO;
10915 goto err_out_release;
10916 }
10917
10918 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10919 bp->flags |= USING_DAC_FLAG;
10920 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10921 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10922 " failed, aborting\n");
10923 rc = -EIO;
10924 goto err_out_release;
10925 }
10926
10927 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10928 printk(KERN_ERR PFX "System does not support DMA,"
10929 " aborting\n");
10930 rc = -EIO;
10931 goto err_out_release;
10932 }
10933
34f80b04
EG
10934 dev->mem_start = pci_resource_start(pdev, 0);
10935 dev->base_addr = dev->mem_start;
10936 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10937
10938 dev->irq = pdev->irq;
10939
275f165f 10940 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10941 if (!bp->regview) {
10942 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10943 rc = -ENOMEM;
10944 goto err_out_release;
10945 }
10946
34f80b04
EG
10947 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10948 min_t(u64, BNX2X_DB_SIZE,
10949 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10950 if (!bp->doorbells) {
10951 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10952 rc = -ENOMEM;
10953 goto err_out_unmap;
10954 }
10955
10956 bnx2x_set_power_state(bp, PCI_D0);
10957
34f80b04
EG
10958 /* clean indirect addresses */
10959 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10960 PCICFG_VENDOR_ID_OFFSET);
10961 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10962 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10963 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10964 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10965
34f80b04 10966 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10967
c64213cd 10968 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10969 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10970 dev->features |= NETIF_F_SG;
10971 dev->features |= NETIF_F_HW_CSUM;
10972 if (bp->flags & USING_DAC_FLAG)
10973 dev->features |= NETIF_F_HIGHDMA;
10974#ifdef BCM_VLAN
10975 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10976 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10977#endif
10978 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10979 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10980
10981 return 0;
10982
10983err_out_unmap:
10984 if (bp->regview) {
10985 iounmap(bp->regview);
10986 bp->regview = NULL;
10987 }
a2fbb9ea
ET
10988 if (bp->doorbells) {
10989 iounmap(bp->doorbells);
10990 bp->doorbells = NULL;
10991 }
10992
10993err_out_release:
34f80b04
EG
10994 if (atomic_read(&pdev->enable_cnt) == 1)
10995 pci_release_regions(pdev);
a2fbb9ea
ET
10996
10997err_out_disable:
10998 pci_disable_device(pdev);
10999 pci_set_drvdata(pdev, NULL);
11000
11001err_out:
11002 return rc;
11003}
11004
25047950
ET
11005static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11006{
11007 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11008
11009 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11010 return val;
11011}
11012
11013/* return value of 1=2.5GHz 2=5GHz */
11014static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11015{
11016 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11017
11018 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11019 return val;
11020}
11021
a2fbb9ea
ET
11022static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11023 const struct pci_device_id *ent)
11024{
11025 static int version_printed;
11026 struct net_device *dev = NULL;
11027 struct bnx2x *bp;
25047950 11028 int rc;
a2fbb9ea
ET
11029
11030 if (version_printed++ == 0)
11031 printk(KERN_INFO "%s", version);
11032
11033 /* dev zeroed in init_etherdev */
555f6c78 11034 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11035 if (!dev) {
11036 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11037 return -ENOMEM;
34f80b04 11038 }
a2fbb9ea 11039
a2fbb9ea
ET
11040 bp = netdev_priv(dev);
11041 bp->msglevel = debug;
11042
34f80b04 11043 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11044 if (rc < 0) {
11045 free_netdev(dev);
11046 return rc;
11047 }
11048
a2fbb9ea
ET
11049 pci_set_drvdata(pdev, dev);
11050
34f80b04 11051 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11052 if (rc)
11053 goto init_one_exit;
11054
11055 rc = register_netdev(dev);
34f80b04 11056 if (rc) {
693fc0d1 11057 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11058 goto init_one_exit;
11059 }
11060
25047950 11061 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11062 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11063 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11064 bnx2x_get_pcie_width(bp),
11065 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11066 dev->base_addr, bp->pdev->irq);
e174961c 11067 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 11068 return 0;
34f80b04
EG
11069
11070init_one_exit:
11071 if (bp->regview)
11072 iounmap(bp->regview);
11073
11074 if (bp->doorbells)
11075 iounmap(bp->doorbells);
11076
11077 free_netdev(dev);
11078
11079 if (atomic_read(&pdev->enable_cnt) == 1)
11080 pci_release_regions(pdev);
11081
11082 pci_disable_device(pdev);
11083 pci_set_drvdata(pdev, NULL);
11084
11085 return rc;
a2fbb9ea
ET
11086}
11087
11088static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11089{
11090 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11091 struct bnx2x *bp;
11092
11093 if (!dev) {
228241eb
ET
11094 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11095 return;
11096 }
228241eb 11097 bp = netdev_priv(dev);
a2fbb9ea 11098
a2fbb9ea
ET
11099 unregister_netdev(dev);
11100
11101 if (bp->regview)
11102 iounmap(bp->regview);
11103
11104 if (bp->doorbells)
11105 iounmap(bp->doorbells);
11106
11107 free_netdev(dev);
34f80b04
EG
11108
11109 if (atomic_read(&pdev->enable_cnt) == 1)
11110 pci_release_regions(pdev);
11111
a2fbb9ea
ET
11112 pci_disable_device(pdev);
11113 pci_set_drvdata(pdev, NULL);
11114}
11115
11116static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11117{
11118 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11119 struct bnx2x *bp;
11120
34f80b04
EG
11121 if (!dev) {
11122 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11123 return -ENODEV;
11124 }
11125 bp = netdev_priv(dev);
a2fbb9ea 11126
34f80b04 11127 rtnl_lock();
a2fbb9ea 11128
34f80b04 11129 pci_save_state(pdev);
228241eb 11130
34f80b04
EG
11131 if (!netif_running(dev)) {
11132 rtnl_unlock();
11133 return 0;
11134 }
a2fbb9ea
ET
11135
11136 netif_device_detach(dev);
a2fbb9ea 11137
da5a662a 11138 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11139
a2fbb9ea 11140 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11141
34f80b04
EG
11142 rtnl_unlock();
11143
a2fbb9ea
ET
11144 return 0;
11145}
11146
11147static int bnx2x_resume(struct pci_dev *pdev)
11148{
11149 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11150 struct bnx2x *bp;
a2fbb9ea
ET
11151 int rc;
11152
228241eb
ET
11153 if (!dev) {
11154 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11155 return -ENODEV;
11156 }
228241eb 11157 bp = netdev_priv(dev);
a2fbb9ea 11158
34f80b04
EG
11159 rtnl_lock();
11160
228241eb 11161 pci_restore_state(pdev);
34f80b04
EG
11162
11163 if (!netif_running(dev)) {
11164 rtnl_unlock();
11165 return 0;
11166 }
11167
a2fbb9ea
ET
11168 bnx2x_set_power_state(bp, PCI_D0);
11169 netif_device_attach(dev);
11170
da5a662a 11171 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11172
34f80b04
EG
11173 rtnl_unlock();
11174
11175 return rc;
a2fbb9ea
ET
11176}
11177
f8ef6e44
YG
11178static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11179{
11180 int i;
11181
11182 bp->state = BNX2X_STATE_ERROR;
11183
11184 bp->rx_mode = BNX2X_RX_MODE_NONE;
11185
11186 bnx2x_netif_stop(bp, 0);
11187
11188 del_timer_sync(&bp->timer);
11189 bp->stats_state = STATS_STATE_DISABLED;
11190 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11191
11192 /* Release IRQs */
11193 bnx2x_free_irq(bp);
11194
11195 if (CHIP_IS_E1(bp)) {
11196 struct mac_configuration_cmd *config =
11197 bnx2x_sp(bp, mcast_config);
11198
8d9c5f34 11199 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11200 CAM_INVALIDATE(config->config_table[i]);
11201 }
11202
11203 /* Free SKBs, SGEs, TPA pool and driver internals */
11204 bnx2x_free_skbs(bp);
555f6c78 11205 for_each_rx_queue(bp, i)
f8ef6e44 11206 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11207 for_each_rx_queue(bp, i)
7cde1c8b 11208 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11209 bnx2x_free_mem(bp);
11210
11211 bp->state = BNX2X_STATE_CLOSED;
11212
11213 netif_carrier_off(bp->dev);
11214
11215 return 0;
11216}
11217
11218static void bnx2x_eeh_recover(struct bnx2x *bp)
11219{
11220 u32 val;
11221
11222 mutex_init(&bp->port.phy_mutex);
11223
11224 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11225 bp->link_params.shmem_base = bp->common.shmem_base;
11226 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11227
11228 if (!bp->common.shmem_base ||
11229 (bp->common.shmem_base < 0xA0000) ||
11230 (bp->common.shmem_base >= 0xC0000)) {
11231 BNX2X_DEV_INFO("MCP not active\n");
11232 bp->flags |= NO_MCP_FLAG;
11233 return;
11234 }
11235
11236 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11237 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11238 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11239 BNX2X_ERR("BAD MCP validity signature\n");
11240
11241 if (!BP_NOMCP(bp)) {
11242 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11243 & DRV_MSG_SEQ_NUMBER_MASK);
11244 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11245 }
11246}
11247
493adb1f
WX
11248/**
11249 * bnx2x_io_error_detected - called when PCI error is detected
11250 * @pdev: Pointer to PCI device
11251 * @state: The current pci connection state
11252 *
11253 * This function is called after a PCI bus error affecting
11254 * this device has been detected.
11255 */
11256static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11257 pci_channel_state_t state)
11258{
11259 struct net_device *dev = pci_get_drvdata(pdev);
11260 struct bnx2x *bp = netdev_priv(dev);
11261
11262 rtnl_lock();
11263
11264 netif_device_detach(dev);
11265
11266 if (netif_running(dev))
f8ef6e44 11267 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11268
11269 pci_disable_device(pdev);
11270
11271 rtnl_unlock();
11272
11273 /* Request a slot reset */
11274 return PCI_ERS_RESULT_NEED_RESET;
11275}
11276
11277/**
11278 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11279 * @pdev: Pointer to PCI device
11280 *
11281 * Restart the card from scratch, as if from a cold-boot.
11282 */
11283static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11284{
11285 struct net_device *dev = pci_get_drvdata(pdev);
11286 struct bnx2x *bp = netdev_priv(dev);
11287
11288 rtnl_lock();
11289
11290 if (pci_enable_device(pdev)) {
11291 dev_err(&pdev->dev,
11292 "Cannot re-enable PCI device after reset\n");
11293 rtnl_unlock();
11294 return PCI_ERS_RESULT_DISCONNECT;
11295 }
11296
11297 pci_set_master(pdev);
11298 pci_restore_state(pdev);
11299
11300 if (netif_running(dev))
11301 bnx2x_set_power_state(bp, PCI_D0);
11302
11303 rtnl_unlock();
11304
11305 return PCI_ERS_RESULT_RECOVERED;
11306}
11307
11308/**
11309 * bnx2x_io_resume - called when traffic can start flowing again
11310 * @pdev: Pointer to PCI device
11311 *
11312 * This callback is called when the error recovery driver tells us that
11313 * its OK to resume normal operation.
11314 */
11315static void bnx2x_io_resume(struct pci_dev *pdev)
11316{
11317 struct net_device *dev = pci_get_drvdata(pdev);
11318 struct bnx2x *bp = netdev_priv(dev);
11319
11320 rtnl_lock();
11321
f8ef6e44
YG
11322 bnx2x_eeh_recover(bp);
11323
493adb1f 11324 if (netif_running(dev))
f8ef6e44 11325 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11326
11327 netif_device_attach(dev);
11328
11329 rtnl_unlock();
11330}
11331
11332static struct pci_error_handlers bnx2x_err_handler = {
11333 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11334 .slot_reset = bnx2x_io_slot_reset,
11335 .resume = bnx2x_io_resume,
493adb1f
WX
11336};
11337
a2fbb9ea 11338static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11339 .name = DRV_MODULE_NAME,
11340 .id_table = bnx2x_pci_tbl,
11341 .probe = bnx2x_init_one,
11342 .remove = __devexit_p(bnx2x_remove_one),
11343 .suspend = bnx2x_suspend,
11344 .resume = bnx2x_resume,
11345 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11346};
11347
11348static int __init bnx2x_init(void)
11349{
1cf167f2
EG
11350 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11351 if (bnx2x_wq == NULL) {
11352 printk(KERN_ERR PFX "Cannot create workqueue\n");
11353 return -ENOMEM;
11354 }
11355
a2fbb9ea
ET
11356 return pci_register_driver(&bnx2x_pci_driver);
11357}
11358
11359static void __exit bnx2x_cleanup(void)
11360{
11361 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11362
11363 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11364}
11365
11366module_init(bnx2x_init);
11367module_exit(bnx2x_cleanup);
11368